Christoph Hellwig | a497ee3 | 2019-04-30 14:42:40 -0400 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 2 | /* |
| 3 | * Header file for the BFQ I/O scheduler: data structures and |
| 4 | * prototypes of interface functions among BFQ components. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 5 | */ |
| 6 | #ifndef _BFQ_H |
| 7 | #define _BFQ_H |
| 8 | |
| 9 | #include <linux/blktrace_api.h> |
| 10 | #include <linux/hrtimer.h> |
| 11 | #include <linux/blk-cgroup.h> |
| 12 | |
Tejun Heo | 1d156646 | 2019-11-07 11:18:04 -0800 | [diff] [blame] | 13 | #include "blk-cgroup-rwstat.h" |
| 14 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 15 | #define BFQ_IOPRIO_CLASSES 3 |
| 16 | #define BFQ_CL_IDLE_TIMEOUT (HZ/5) |
| 17 | |
| 18 | #define BFQ_MIN_WEIGHT 1 |
| 19 | #define BFQ_MAX_WEIGHT 1000 |
| 20 | #define BFQ_WEIGHT_CONVERSION_COEFF 10 |
| 21 | |
| 22 | #define BFQ_DEFAULT_QUEUE_IOPRIO 4 |
| 23 | |
| 24 | #define BFQ_WEIGHT_LEGACY_DFL 100 |
| 25 | #define BFQ_DEFAULT_GRP_IOPRIO 0 |
| 26 | #define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE |
| 27 | |
Francesco Pollicino | 1e66413 | 2019-03-12 09:59:33 +0100 | [diff] [blame] | 28 | #define MAX_PID_STR_LENGTH 12 |
| 29 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 30 | /* |
| 31 | * Soft real-time applications are extremely more latency sensitive |
| 32 | * than interactive ones. Over-raise the weight of the former to |
| 33 | * privilege them against the latter. |
| 34 | */ |
| 35 | #define BFQ_SOFTRT_WEIGHT_FACTOR 100 |
| 36 | |
| 37 | struct bfq_entity; |
| 38 | |
| 39 | /** |
| 40 | * struct bfq_service_tree - per ioprio_class service tree. |
| 41 | * |
| 42 | * Each service tree represents a B-WF2Q+ scheduler on its own. Each |
| 43 | * ioprio_class has its own independent scheduler, and so its own |
| 44 | * bfq_service_tree. All the fields are protected by the queue lock |
| 45 | * of the containing bfqd. |
| 46 | */ |
| 47 | struct bfq_service_tree { |
| 48 | /* tree for active entities (i.e., those backlogged) */ |
| 49 | struct rb_root active; |
Hou Tao | 38c9140 | 2017-07-12 15:25:01 +0800 | [diff] [blame] | 50 | /* tree for idle entities (i.e., not backlogged, with V < F_i)*/ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 51 | struct rb_root idle; |
| 52 | |
| 53 | /* idle entity with minimum F_i */ |
| 54 | struct bfq_entity *first_idle; |
| 55 | /* idle entity with maximum F_i */ |
| 56 | struct bfq_entity *last_idle; |
| 57 | |
| 58 | /* scheduler virtual time */ |
| 59 | u64 vtime; |
| 60 | /* scheduler weight sum; active and idle entities contribute to it */ |
| 61 | unsigned long wsum; |
| 62 | }; |
| 63 | |
| 64 | /** |
| 65 | * struct bfq_sched_data - multi-class scheduler. |
| 66 | * |
| 67 | * bfq_sched_data is the basic scheduler queue. It supports three |
| 68 | * ioprio_classes, and can be used either as a toplevel queue or as an |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 69 | * intermediate queue in a hierarchical setup. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 70 | * |
| 71 | * The supported ioprio_classes are the same as in CFQ, in descending |
| 72 | * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. |
| 73 | * Requests from higher priority queues are served before all the |
| 74 | * requests from lower priority queues; among requests of the same |
| 75 | * queue requests are served according to B-WF2Q+. |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 76 | * |
| 77 | * The schedule is implemented by the service trees, plus the field |
| 78 | * @next_in_service, which points to the entity on the active trees |
| 79 | * that will be served next, if 1) no changes in the schedule occurs |
| 80 | * before the current in-service entity is expired, 2) the in-service |
| 81 | * queue becomes idle when it expires, and 3) if the entity pointed by |
| 82 | * in_service_entity is not a queue, then the in-service child entity |
| 83 | * of the entity pointed by in_service_entity becomes idle on |
| 84 | * expiration. This peculiar definition allows for the following |
| 85 | * optimization, not yet exploited: while a given entity is still in |
| 86 | * service, we already know which is the best candidate for next |
Angelo Ruocco | 636b8fe | 2019-04-08 17:35:34 +0200 | [diff] [blame] | 87 | * service among the other active entities in the same parent |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 88 | * entity. We can then quickly compare the timestamps of the |
| 89 | * in-service entity with those of such best candidate. |
| 90 | * |
| 91 | * All fields are protected by the lock of the containing bfqd. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 92 | */ |
| 93 | struct bfq_sched_data { |
| 94 | /* entity in service */ |
| 95 | struct bfq_entity *in_service_entity; |
| 96 | /* head-of-line entity (see comments above) */ |
| 97 | struct bfq_entity *next_in_service; |
| 98 | /* array of service trees, one per ioprio_class */ |
| 99 | struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES]; |
| 100 | /* last time CLASS_IDLE was served */ |
| 101 | unsigned long bfq_class_idle_last_service; |
| 102 | |
| 103 | }; |
| 104 | |
| 105 | /** |
Federico Motta | 2d29c9f | 2018-10-12 11:55:57 +0200 | [diff] [blame] | 106 | * struct bfq_weight_counter - counter of the number of all active queues |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 107 | * with a given weight. |
| 108 | */ |
| 109 | struct bfq_weight_counter { |
Federico Motta | 2d29c9f | 2018-10-12 11:55:57 +0200 | [diff] [blame] | 110 | unsigned int weight; /* weight of the queues this counter refers to */ |
| 111 | unsigned int num_active; /* nr of active queues with this weight */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 112 | /* |
Federico Motta | 2d29c9f | 2018-10-12 11:55:57 +0200 | [diff] [blame] | 113 | * Weights tree member (see bfq_data's @queue_weights_tree) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 114 | */ |
| 115 | struct rb_node weights_node; |
| 116 | }; |
| 117 | |
| 118 | /** |
| 119 | * struct bfq_entity - schedulable entity. |
| 120 | * |
| 121 | * A bfq_entity is used to represent either a bfq_queue (leaf node in the |
| 122 | * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each |
| 123 | * entity belongs to the sched_data of the parent group in the cgroup |
| 124 | * hierarchy. Non-leaf entities have also their own sched_data, stored |
| 125 | * in @my_sched_data. |
| 126 | * |
| 127 | * Each entity stores independently its priority values; this would |
| 128 | * allow different weights on different devices, but this |
| 129 | * functionality is not exported to userspace by now. Priorities and |
| 130 | * weights are updated lazily, first storing the new values into the |
| 131 | * new_* fields, then setting the @prio_changed flag. As soon as |
| 132 | * there is a transition in the entity state that allows the priority |
| 133 | * update to take place the effective and the requested priority |
| 134 | * values are synchronized. |
| 135 | * |
| 136 | * Unless cgroups are used, the weight value is calculated from the |
| 137 | * ioprio to export the same interface as CFQ. When dealing with |
Angelo Ruocco | 636b8fe | 2019-04-08 17:35:34 +0200 | [diff] [blame] | 138 | * "well-behaved" queues (i.e., queues that do not spend too much |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 139 | * time to consume their budget and have true sequential behavior, and |
| 140 | * when there are no external factors breaking anticipation) the |
| 141 | * relative weights at each level of the cgroups hierarchy should be |
| 142 | * guaranteed. All the fields are protected by the queue lock of the |
| 143 | * containing bfqd. |
| 144 | */ |
| 145 | struct bfq_entity { |
| 146 | /* service_tree member */ |
| 147 | struct rb_node rb_node; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 148 | |
| 149 | /* |
| 150 | * Flag, true if the entity is on a tree (either the active or |
| 151 | * the idle one of its service_tree) or is in service. |
| 152 | */ |
Paolo Valente | 33a16a9 | 2020-02-03 11:40:57 +0100 | [diff] [blame] | 153 | bool on_st_or_in_serv; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 154 | |
| 155 | /* B-WF2Q+ start and finish timestamps [sectors/weight] */ |
| 156 | u64 start, finish; |
| 157 | |
| 158 | /* tree the entity is enqueued into; %NULL if not on a tree */ |
| 159 | struct rb_root *tree; |
| 160 | |
| 161 | /* |
| 162 | * minimum start time of the (active) subtree rooted at this |
| 163 | * entity; used for O(log N) lookups into active trees |
| 164 | */ |
| 165 | u64 min_start; |
| 166 | |
| 167 | /* amount of service received during the last service slot */ |
| 168 | int service; |
| 169 | |
| 170 | /* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */ |
| 171 | int budget; |
| 172 | |
Fam Zheng | 795fe54 | 2019-08-28 11:54:53 +0800 | [diff] [blame] | 173 | /* device weight, if non-zero, it overrides the default weight of |
| 174 | * bfq_group_data */ |
| 175 | int dev_weight; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 176 | /* weight of the queue */ |
| 177 | int weight; |
| 178 | /* next weight if a change is in progress */ |
| 179 | int new_weight; |
| 180 | |
| 181 | /* original weight, used to implement weight boosting */ |
| 182 | int orig_weight; |
| 183 | |
| 184 | /* parent entity, for hierarchical scheduling */ |
| 185 | struct bfq_entity *parent; |
| 186 | |
| 187 | /* |
| 188 | * For non-leaf nodes in the hierarchy, the associated |
| 189 | * scheduler queue, %NULL on leaf nodes. |
| 190 | */ |
| 191 | struct bfq_sched_data *my_sched_data; |
| 192 | /* the scheduler queue this entity belongs to */ |
| 193 | struct bfq_sched_data *sched_data; |
| 194 | |
| 195 | /* flag, set to request a weight, ioprio or ioprio_class change */ |
| 196 | int prio_changed; |
Paolo Valente | ba7aeae | 2018-12-06 19:18:18 +0100 | [diff] [blame] | 197 | |
| 198 | /* flag, set if the entity is counted in groups_with_pending_reqs */ |
| 199 | bool in_groups_with_pending_reqs; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 200 | }; |
| 201 | |
| 202 | struct bfq_group; |
| 203 | |
| 204 | /** |
| 205 | * struct bfq_ttime - per process thinktime stats. |
| 206 | */ |
| 207 | struct bfq_ttime { |
| 208 | /* completion time of the last request */ |
| 209 | u64 last_end_request; |
| 210 | |
| 211 | /* total process thinktime */ |
| 212 | u64 ttime_total; |
| 213 | /* number of thinktime samples */ |
| 214 | unsigned long ttime_samples; |
| 215 | /* average process thinktime */ |
| 216 | u64 ttime_mean; |
| 217 | }; |
| 218 | |
| 219 | /** |
| 220 | * struct bfq_queue - leaf schedulable entity. |
| 221 | * |
| 222 | * A bfq_queue is a leaf request queue; it can be associated with an |
| 223 | * io_context or more, if it is async or shared between cooperating |
| 224 | * processes. @cgroup holds a reference to the cgroup, to be sure that it |
| 225 | * does not disappear while a bfqq still references it (mostly to avoid |
| 226 | * races between request issuing and task migration followed by cgroup |
| 227 | * destruction). |
| 228 | * All the fields are protected by the queue lock of the containing bfqd. |
| 229 | */ |
| 230 | struct bfq_queue { |
| 231 | /* reference counter */ |
| 232 | int ref; |
| 233 | /* parent bfq_data */ |
| 234 | struct bfq_data *bfqd; |
| 235 | |
| 236 | /* current ioprio and ioprio class */ |
| 237 | unsigned short ioprio, ioprio_class; |
| 238 | /* next ioprio and ioprio class if a change is in progress */ |
| 239 | unsigned short new_ioprio, new_ioprio_class; |
| 240 | |
Paolo Valente | 2341d662 | 2019-03-12 09:59:29 +0100 | [diff] [blame] | 241 | /* last total-service-time sample, see bfq_update_inject_limit() */ |
| 242 | u64 last_serv_time_ns; |
| 243 | /* limit for request injection */ |
| 244 | unsigned int inject_limit; |
| 245 | /* last time the inject limit has been decreased, in jiffies */ |
| 246 | unsigned long decrease_time_jif; |
| 247 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 248 | /* |
| 249 | * Shared bfq_queue if queue is cooperating with one or more |
| 250 | * other queues. |
| 251 | */ |
| 252 | struct bfq_queue *new_bfqq; |
| 253 | /* request-position tree member (see bfq_group's @rq_pos_tree) */ |
| 254 | struct rb_node pos_node; |
| 255 | /* request-position tree root (see bfq_group's @rq_pos_tree) */ |
| 256 | struct rb_root *pos_root; |
| 257 | |
| 258 | /* sorted list of pending requests */ |
| 259 | struct rb_root sort_list; |
| 260 | /* if fifo isn't expired, next request to serve */ |
| 261 | struct request *next_rq; |
| 262 | /* number of sync and async requests queued */ |
| 263 | int queued[2]; |
| 264 | /* number of requests currently allocated */ |
| 265 | int allocated; |
| 266 | /* number of pending metadata requests */ |
| 267 | int meta_pending; |
| 268 | /* fifo list of requests in sort_list */ |
| 269 | struct list_head fifo; |
| 270 | |
| 271 | /* entity representing this queue in the scheduler */ |
| 272 | struct bfq_entity entity; |
| 273 | |
Federico Motta | 2d29c9f | 2018-10-12 11:55:57 +0200 | [diff] [blame] | 274 | /* pointer to the weight counter associated with this entity */ |
| 275 | struct bfq_weight_counter *weight_counter; |
| 276 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 277 | /* maximum budget allowed from the feedback mechanism */ |
| 278 | int max_budget; |
| 279 | /* budget expiration (in jiffies) */ |
| 280 | unsigned long budget_timeout; |
| 281 | |
| 282 | /* number of requests on the dispatch list or inside driver */ |
| 283 | int dispatched; |
| 284 | |
| 285 | /* status flags */ |
| 286 | unsigned long flags; |
| 287 | |
| 288 | /* node for active/idle bfqq list inside parent bfqd */ |
| 289 | struct list_head bfqq_list; |
| 290 | |
| 291 | /* associated @bfq_ttime struct */ |
| 292 | struct bfq_ttime ttime; |
| 293 | |
Paolo Valente | eb2fd80 | 2021-01-25 20:02:43 +0100 | [diff] [blame] | 294 | /* when bfqq started to do I/O within the last observation window */ |
| 295 | u64 io_start_time; |
| 296 | /* how long bfqq has remained empty during the last observ. window */ |
| 297 | u64 tot_idle_time; |
| 298 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 299 | /* bit vector: a 1 for each seeky requests in history */ |
| 300 | u32 seek_history; |
| 301 | |
| 302 | /* node for the device's burst list */ |
| 303 | struct hlist_node burst_list_node; |
| 304 | |
| 305 | /* position of the last request enqueued */ |
| 306 | sector_t last_request_pos; |
| 307 | |
| 308 | /* Number of consecutive pairs of request completion and |
| 309 | * arrival, such that the queue becomes idle after the |
| 310 | * completion, but the next request arrives within an idle |
| 311 | * time slice; used only if the queue's IO_bound flag has been |
| 312 | * cleared. |
| 313 | */ |
| 314 | unsigned int requests_within_timer; |
| 315 | |
| 316 | /* pid of the process owning the queue, used for logging purposes */ |
| 317 | pid_t pid; |
| 318 | |
| 319 | /* |
| 320 | * Pointer to the bfq_io_cq owning the bfq_queue, set to %NULL |
| 321 | * if the queue is shared. |
| 322 | */ |
| 323 | struct bfq_io_cq *bic; |
| 324 | |
| 325 | /* current maximum weight-raising time for this queue */ |
| 326 | unsigned long wr_cur_max_time; |
| 327 | /* |
| 328 | * Minimum time instant such that, only if a new request is |
| 329 | * enqueued after this time instant in an idle @bfq_queue with |
| 330 | * no outstanding requests, then the task associated with the |
| 331 | * queue it is deemed as soft real-time (see the comments on |
| 332 | * the function bfq_bfqq_softrt_next_start()) |
| 333 | */ |
| 334 | unsigned long soft_rt_next_start; |
| 335 | /* |
| 336 | * Start time of the current weight-raising period if |
| 337 | * the @bfq-queue is being weight-raised, otherwise |
| 338 | * finish time of the last weight-raising period. |
| 339 | */ |
| 340 | unsigned long last_wr_start_finish; |
| 341 | /* factor by which the weight of this queue is multiplied */ |
| 342 | unsigned int wr_coeff; |
| 343 | /* |
| 344 | * Time of the last transition of the @bfq_queue from idle to |
| 345 | * backlogged. |
| 346 | */ |
| 347 | unsigned long last_idle_bklogged; |
| 348 | /* |
| 349 | * Cumulative service received from the @bfq_queue since the |
| 350 | * last transition from idle to backlogged. |
| 351 | */ |
| 352 | unsigned long service_from_backlogged; |
Paolo Valente | 8a8747d | 2018-01-13 12:05:18 +0100 | [diff] [blame] | 353 | /* |
| 354 | * Cumulative service received from the @bfq_queue since its |
| 355 | * last transition to weight-raised state. |
| 356 | */ |
| 357 | unsigned long service_from_wr; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 358 | |
| 359 | /* |
| 360 | * Value of wr start time when switching to soft rt |
| 361 | */ |
| 362 | unsigned long wr_start_at_switch_to_srt; |
| 363 | |
| 364 | unsigned long split_time; /* time of last split */ |
Paolo Valente | 7b8fa3b | 2017-12-20 12:38:33 +0100 | [diff] [blame] | 365 | |
| 366 | unsigned long first_IO_time; /* time of first I/O for this queue */ |
Paolo Valente | d0edc24 | 2018-09-14 16:23:08 +0200 | [diff] [blame] | 367 | |
| 368 | /* max service rate measured so far */ |
| 369 | u32 max_service_rate; |
Paolo Valente | 13a857a | 2019-06-25 07:12:47 +0200 | [diff] [blame] | 370 | |
| 371 | /* |
| 372 | * Pointer to the waker queue for this queue, i.e., to the |
| 373 | * queue Q such that this queue happens to get new I/O right |
| 374 | * after some I/O request of Q is completed. For details, see |
| 375 | * the comments on the choice of the queue for injection in |
| 376 | * bfq_select_queue(). |
| 377 | */ |
| 378 | struct bfq_queue *waker_bfqq; |
Paolo Valente | 71217df | 2021-01-25 20:02:48 +0100 | [diff] [blame^] | 379 | /* pointer to the curr. tentative waker queue, see bfq_check_waker() */ |
| 380 | struct bfq_queue *tentative_waker_bfqq; |
| 381 | /* number of times the same tentative waker has been detected */ |
| 382 | unsigned int num_waker_detections; |
| 383 | |
Paolo Valente | 13a857a | 2019-06-25 07:12:47 +0200 | [diff] [blame] | 384 | /* node for woken_list, see below */ |
| 385 | struct hlist_node woken_list_node; |
| 386 | /* |
| 387 | * Head of the list of the woken queues for this queue, i.e., |
| 388 | * of the list of the queues for which this queue is a waker |
| 389 | * queue. This list is used to reset the waker_bfqq pointer in |
| 390 | * the woken queues when this queue exits. |
| 391 | */ |
| 392 | struct hlist_head woken_list; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 393 | }; |
| 394 | |
| 395 | /** |
| 396 | * struct bfq_io_cq - per (request_queue, io_context) structure. |
| 397 | */ |
| 398 | struct bfq_io_cq { |
| 399 | /* associated io_cq structure */ |
| 400 | struct io_cq icq; /* must be the first member */ |
| 401 | /* array of two process queues, the sync and the async */ |
| 402 | struct bfq_queue *bfqq[2]; |
| 403 | /* per (request_queue, blkcg) ioprio */ |
| 404 | int ioprio; |
| 405 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 406 | uint64_t blkcg_serial_nr; /* the current blkcg serial */ |
| 407 | #endif |
| 408 | /* |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 409 | * Snapshot of the has_short_time flag before merging; taken |
| 410 | * to remember its value while the queue is merged, so as to |
| 411 | * be able to restore it in case of split. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 412 | */ |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 413 | bool saved_has_short_ttime; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 414 | /* |
| 415 | * Same purpose as the previous two fields for the I/O bound |
| 416 | * classification of a queue. |
| 417 | */ |
| 418 | bool saved_IO_bound; |
| 419 | |
Paolo Valente | eb2fd80 | 2021-01-25 20:02:43 +0100 | [diff] [blame] | 420 | u64 saved_io_start_time; |
| 421 | u64 saved_tot_idle_time; |
| 422 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 423 | /* |
| 424 | * Same purpose as the previous fields for the value of the |
| 425 | * field keeping the queue's belonging to a large burst |
| 426 | */ |
| 427 | bool saved_in_large_burst; |
| 428 | /* |
| 429 | * True if the queue belonged to a burst list before its merge |
| 430 | * with another cooperating queue. |
| 431 | */ |
| 432 | bool was_in_burst_list; |
| 433 | |
| 434 | /* |
Francesco Pollicino | fffca08 | 2019-03-12 09:59:34 +0100 | [diff] [blame] | 435 | * Save the weight when a merge occurs, to be able |
| 436 | * to restore it in case of split. If the weight is not |
| 437 | * correctly resumed when the queue is recycled, |
| 438 | * then the weight of the recycled queue could differ |
| 439 | * from the weight of the original queue. |
| 440 | */ |
| 441 | unsigned int saved_weight; |
| 442 | |
| 443 | /* |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 444 | * Similar to previous fields: save wr information. |
| 445 | */ |
| 446 | unsigned long saved_wr_coeff; |
| 447 | unsigned long saved_last_wr_start_finish; |
Paolo Valente | e673914 | 2021-01-25 20:02:46 +0100 | [diff] [blame] | 448 | unsigned long saved_service_from_wr; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 449 | unsigned long saved_wr_start_at_switch_to_srt; |
| 450 | unsigned int saved_wr_cur_max_time; |
| 451 | struct bfq_ttime saved_ttime; |
Paolo Valente | 5a5436b | 2021-01-25 20:02:47 +0100 | [diff] [blame] | 452 | |
| 453 | /* Save also injection state */ |
| 454 | u64 saved_last_serv_time_ns; |
| 455 | unsigned int saved_inject_limit; |
| 456 | unsigned long saved_decrease_time_jif; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 457 | }; |
| 458 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 459 | /** |
| 460 | * struct bfq_data - per-device data structure. |
| 461 | * |
| 462 | * All the fields are protected by @lock. |
| 463 | */ |
| 464 | struct bfq_data { |
| 465 | /* device request queue */ |
| 466 | struct request_queue *queue; |
| 467 | /* dispatch queue */ |
| 468 | struct list_head dispatch; |
| 469 | |
| 470 | /* root bfq_group for the device */ |
| 471 | struct bfq_group *root_group; |
| 472 | |
| 473 | /* |
| 474 | * rbtree of weight counters of @bfq_queues, sorted by |
| 475 | * weight. Used to keep track of whether all @bfq_queues have |
| 476 | * the same weight. The tree contains one counter for each |
| 477 | * distinct weight associated to some active and not |
| 478 | * weight-raised @bfq_queue (see the comments to the functions |
| 479 | * bfq_weights_tree_[add|remove] for further details). |
| 480 | */ |
Paolo Valente | fb53ac6 | 2019-03-12 09:59:28 +0100 | [diff] [blame] | 481 | struct rb_root_cached queue_weights_tree; |
Paolo Valente | ba7aeae | 2018-12-06 19:18:18 +0100 | [diff] [blame] | 482 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 483 | /* |
Paolo Valente | ba7aeae | 2018-12-06 19:18:18 +0100 | [diff] [blame] | 484 | * Number of groups with at least one descendant process that |
| 485 | * has at least one request waiting for completion. Note that |
| 486 | * this accounts for also requests already dispatched, but not |
| 487 | * yet completed. Therefore this number of groups may differ |
| 488 | * (be larger) than the number of active groups, as a group is |
| 489 | * considered active only if its corresponding entity has |
| 490 | * descendant queues with at least one request queued. This |
| 491 | * number is used to decide whether a scenario is symmetric. |
| 492 | * For a detailed explanation see comments on the computation |
| 493 | * of the variable asymmetric_scenario in the function |
| 494 | * bfq_better_to_idle(). |
| 495 | * |
| 496 | * However, it is hard to compute this number exactly, for |
| 497 | * groups with multiple descendant processes. Consider a group |
| 498 | * that is inactive, i.e., that has no descendant process with |
| 499 | * pending I/O inside BFQ queues. Then suppose that |
| 500 | * num_groups_with_pending_reqs is still accounting for this |
| 501 | * group, because the group has descendant processes with some |
| 502 | * I/O request still in flight. num_groups_with_pending_reqs |
| 503 | * should be decremented when the in-flight request of the |
| 504 | * last descendant process is finally completed (assuming that |
| 505 | * nothing else has changed for the group in the meantime, in |
| 506 | * terms of composition of the group and active/inactive state of child |
| 507 | * groups and processes). To accomplish this, an additional |
| 508 | * pending-request counter must be added to entities, and must |
| 509 | * be updated correctly. To avoid this additional field and operations, |
| 510 | * we resort to the following tradeoff between simplicity and |
| 511 | * accuracy: for an inactive group that is still counted in |
| 512 | * num_groups_with_pending_reqs, we decrement |
| 513 | * num_groups_with_pending_reqs when the first descendant |
| 514 | * process of the group remains with no request waiting for |
| 515 | * completion. |
| 516 | * |
| 517 | * Even this simpler decrement strategy requires a little |
| 518 | * carefulness: to avoid multiple decrements, we flag a group, |
| 519 | * more precisely an entity representing a group, as still |
| 520 | * counted in num_groups_with_pending_reqs when it becomes |
| 521 | * inactive. Then, when the first descendant queue of the |
| 522 | * entity remains with no request waiting for completion, |
| 523 | * num_groups_with_pending_reqs is decremented, and this flag |
| 524 | * is reset. After this flag is reset for the entity, |
| 525 | * num_groups_with_pending_reqs won't be decremented any |
| 526 | * longer in case a new descendant queue of the entity remains |
| 527 | * with no request waiting for completion. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 528 | */ |
Paolo Valente | ba7aeae | 2018-12-06 19:18:18 +0100 | [diff] [blame] | 529 | unsigned int num_groups_with_pending_reqs; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 530 | |
| 531 | /* |
Paolo Valente | 73d5811 | 2019-01-29 12:06:29 +0100 | [diff] [blame] | 532 | * Per-class (RT, BE, IDLE) number of bfq_queues containing |
| 533 | * requests (including the queue in service, even if it is |
| 534 | * idling). |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 535 | */ |
Paolo Valente | 73d5811 | 2019-01-29 12:06:29 +0100 | [diff] [blame] | 536 | unsigned int busy_queues[3]; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 537 | /* number of weight-raised busy @bfq_queues */ |
| 538 | int wr_busy_queues; |
| 539 | /* number of queued requests */ |
| 540 | int queued; |
| 541 | /* number of requests dispatched and waiting for completion */ |
| 542 | int rq_in_driver; |
| 543 | |
Paolo Valente | 8cacc5a | 2019-03-12 09:59:30 +0100 | [diff] [blame] | 544 | /* true if the device is non rotational and performs queueing */ |
| 545 | bool nonrot_with_queueing; |
| 546 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 547 | /* |
| 548 | * Maximum number of requests in driver in the last |
| 549 | * @hw_tag_samples completed requests. |
| 550 | */ |
| 551 | int max_rq_in_driver; |
| 552 | /* number of samples used to calculate hw_tag */ |
| 553 | int hw_tag_samples; |
| 554 | /* flag set to one if the driver is showing a queueing behavior */ |
| 555 | int hw_tag; |
| 556 | |
| 557 | /* number of budgets assigned */ |
| 558 | int budgets_assigned; |
| 559 | |
| 560 | /* |
| 561 | * Timer set when idling (waiting) for the next request from |
| 562 | * the queue in service. |
| 563 | */ |
| 564 | struct hrtimer idle_slice_timer; |
| 565 | |
| 566 | /* bfq_queue in service */ |
| 567 | struct bfq_queue *in_service_queue; |
| 568 | |
| 569 | /* on-disk position of the last served request */ |
| 570 | sector_t last_position; |
| 571 | |
Paolo Valente | 058fdec | 2019-01-29 12:06:38 +0100 | [diff] [blame] | 572 | /* position of the last served request for the in-service queue */ |
| 573 | sector_t in_serv_last_pos; |
| 574 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 575 | /* time of last request completion (ns) */ |
| 576 | u64 last_completion; |
| 577 | |
Paolo Valente | 13a857a | 2019-06-25 07:12:47 +0200 | [diff] [blame] | 578 | /* bfqq owning the last completed rq */ |
| 579 | struct bfq_queue *last_completed_rq_bfqq; |
| 580 | |
Paolo Valente | 2341d662 | 2019-03-12 09:59:29 +0100 | [diff] [blame] | 581 | /* time of last transition from empty to non-empty (ns) */ |
| 582 | u64 last_empty_occupied_ns; |
| 583 | |
| 584 | /* |
| 585 | * Flag set to activate the sampling of the total service time |
| 586 | * of a just-arrived first I/O request (see |
| 587 | * bfq_update_inject_limit()). This will cause the setting of |
| 588 | * waited_rq when the request is finally dispatched. |
| 589 | */ |
| 590 | bool wait_dispatch; |
| 591 | /* |
| 592 | * If set, then bfq_update_inject_limit() is invoked when |
| 593 | * waited_rq is eventually completed. |
| 594 | */ |
| 595 | struct request *waited_rq; |
| 596 | /* |
| 597 | * True if some request has been injected during the last service hole. |
| 598 | */ |
| 599 | bool rqs_injected; |
| 600 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 601 | /* time of first rq dispatch in current observation interval (ns) */ |
| 602 | u64 first_dispatch; |
| 603 | /* time of last rq dispatch in current observation interval (ns) */ |
| 604 | u64 last_dispatch; |
| 605 | |
| 606 | /* beginning of the last budget */ |
| 607 | ktime_t last_budget_start; |
| 608 | /* beginning of the last idle slice */ |
| 609 | ktime_t last_idling_start; |
Paolo Valente | 2341d662 | 2019-03-12 09:59:29 +0100 | [diff] [blame] | 610 | unsigned long last_idling_start_jiffies; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 611 | |
| 612 | /* number of samples in current observation interval */ |
| 613 | int peak_rate_samples; |
| 614 | /* num of samples of seq dispatches in current observation interval */ |
| 615 | u32 sequential_samples; |
| 616 | /* total num of sectors transferred in current observation interval */ |
| 617 | u64 tot_sectors_dispatched; |
| 618 | /* max rq size seen during current observation interval (sectors) */ |
| 619 | u32 last_rq_max_size; |
| 620 | /* time elapsed from first dispatch in current observ. interval (us) */ |
| 621 | u64 delta_from_first; |
| 622 | /* |
| 623 | * Current estimate of the device peak rate, measured in |
Paolo Valente | bc56e2c | 2018-03-26 16:06:24 +0200 | [diff] [blame] | 624 | * [(sectors/usec) / 2^BFQ_RATE_SHIFT]. The left-shift by |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 625 | * BFQ_RATE_SHIFT is performed to increase precision in |
| 626 | * fixed-point calculations. |
| 627 | */ |
| 628 | u32 peak_rate; |
| 629 | |
| 630 | /* maximum budget allotted to a bfq_queue before rescheduling */ |
| 631 | int bfq_max_budget; |
| 632 | |
| 633 | /* list of all the bfq_queues active on the device */ |
| 634 | struct list_head active_list; |
| 635 | /* list of all the bfq_queues idle on the device */ |
| 636 | struct list_head idle_list; |
| 637 | |
| 638 | /* |
| 639 | * Timeout for async/sync requests; when it fires, requests |
| 640 | * are served in fifo order. |
| 641 | */ |
| 642 | u64 bfq_fifo_expire[2]; |
| 643 | /* weight of backward seeks wrt forward ones */ |
| 644 | unsigned int bfq_back_penalty; |
| 645 | /* maximum allowed backward seek */ |
| 646 | unsigned int bfq_back_max; |
| 647 | /* maximum idling time */ |
| 648 | u32 bfq_slice_idle; |
| 649 | |
| 650 | /* user-configured max budget value (0 for auto-tuning) */ |
| 651 | int bfq_user_max_budget; |
| 652 | /* |
| 653 | * Timeout for bfq_queues to consume their budget; used to |
| 654 | * prevent seeky queues from imposing long latencies to |
| 655 | * sequential or quasi-sequential ones (this also implies that |
| 656 | * seeky queues cannot receive guarantees in the service |
| 657 | * domain; after a timeout they are charged for the time they |
| 658 | * have been in service, to preserve fairness among them, but |
| 659 | * without service-domain guarantees). |
| 660 | */ |
| 661 | unsigned int bfq_timeout; |
| 662 | |
| 663 | /* |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 664 | * Force device idling whenever needed to provide accurate |
| 665 | * service guarantees, without caring about throughput |
| 666 | * issues. CAVEAT: this may even increase latencies, in case |
| 667 | * of useless idling for processes that did stop doing I/O. |
| 668 | */ |
| 669 | bool strict_guarantees; |
| 670 | |
| 671 | /* |
| 672 | * Last time at which a queue entered the current burst of |
| 673 | * queues being activated shortly after each other; for more |
| 674 | * details about this and the following parameters related to |
| 675 | * a burst of activations, see the comments on the function |
| 676 | * bfq_handle_burst. |
| 677 | */ |
| 678 | unsigned long last_ins_in_burst; |
| 679 | /* |
| 680 | * Reference time interval used to decide whether a queue has |
| 681 | * been activated shortly after @last_ins_in_burst. |
| 682 | */ |
| 683 | unsigned long bfq_burst_interval; |
| 684 | /* number of queues in the current burst of queue activations */ |
| 685 | int burst_size; |
| 686 | |
| 687 | /* common parent entity for the queues in the burst */ |
| 688 | struct bfq_entity *burst_parent_entity; |
| 689 | /* Maximum burst size above which the current queue-activation |
| 690 | * burst is deemed as 'large'. |
| 691 | */ |
| 692 | unsigned long bfq_large_burst_thresh; |
| 693 | /* true if a large queue-activation burst is in progress */ |
| 694 | bool large_burst; |
| 695 | /* |
| 696 | * Head of the burst list (as for the above fields, more |
| 697 | * details in the comments on the function bfq_handle_burst). |
| 698 | */ |
| 699 | struct hlist_head burst_list; |
| 700 | |
| 701 | /* if set to true, low-latency heuristics are enabled */ |
| 702 | bool low_latency; |
| 703 | /* |
| 704 | * Maximum factor by which the weight of a weight-raised queue |
| 705 | * is multiplied. |
| 706 | */ |
| 707 | unsigned int bfq_wr_coeff; |
| 708 | /* maximum duration of a weight-raising period (jiffies) */ |
| 709 | unsigned int bfq_wr_max_time; |
| 710 | |
| 711 | /* Maximum weight-raising duration for soft real-time processes */ |
| 712 | unsigned int bfq_wr_rt_max_time; |
| 713 | /* |
| 714 | * Minimum idle period after which weight-raising may be |
| 715 | * reactivated for a queue (in jiffies). |
| 716 | */ |
| 717 | unsigned int bfq_wr_min_idle_time; |
| 718 | /* |
| 719 | * Minimum period between request arrivals after which |
| 720 | * weight-raising may be reactivated for an already busy async |
| 721 | * queue (in jiffies). |
| 722 | */ |
| 723 | unsigned long bfq_wr_min_inter_arr_async; |
| 724 | |
| 725 | /* Max service-rate for a soft real-time queue, in sectors/sec */ |
| 726 | unsigned int bfq_wr_max_softrt_rate; |
| 727 | /* |
Paolo Valente | e24f1c2 | 2018-05-31 16:45:06 +0200 | [diff] [blame] | 728 | * Cached value of the product ref_rate*ref_wr_duration, used |
| 729 | * for computing the maximum duration of weight raising |
| 730 | * automatically. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 731 | */ |
Paolo Valente | e24f1c2 | 2018-05-31 16:45:06 +0200 | [diff] [blame] | 732 | u64 rate_dur_prod; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 733 | |
| 734 | /* fallback dummy bfqq for extreme OOM conditions */ |
| 735 | struct bfq_queue oom_bfqq; |
| 736 | |
| 737 | spinlock_t lock; |
| 738 | |
| 739 | /* |
| 740 | * bic associated with the task issuing current bio for |
| 741 | * merging. This and the next field are used as a support to |
| 742 | * be able to perform the bic lookup, needed by bio-merge |
| 743 | * functions, before the scheduler lock is taken, and thus |
| 744 | * avoid taking the request-queue lock while the scheduler |
| 745 | * lock is being held. |
| 746 | */ |
| 747 | struct bfq_io_cq *bio_bic; |
| 748 | /* bfqq associated with the task issuing current bio for merging */ |
| 749 | struct bfq_queue *bio_bfqq; |
Paolo Valente | a52a69e | 2018-01-13 12:05:17 +0100 | [diff] [blame] | 750 | |
| 751 | /* |
Paolo Valente | a52a69e | 2018-01-13 12:05:17 +0100 | [diff] [blame] | 752 | * Depth limits used in bfq_limit_depth (see comments on the |
| 753 | * function) |
| 754 | */ |
| 755 | unsigned int word_depths[2][2]; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 756 | }; |
| 757 | |
| 758 | enum bfqq_state_flags { |
| 759 | BFQQF_just_created = 0, /* queue just allocated */ |
| 760 | BFQQF_busy, /* has requests or is in service */ |
| 761 | BFQQF_wait_request, /* waiting for a request */ |
| 762 | BFQQF_non_blocking_wait_rq, /* |
| 763 | * waiting for a request |
| 764 | * without idling the device |
| 765 | */ |
| 766 | BFQQF_fifo_expire, /* FIFO checked in this slice */ |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 767 | BFQQF_has_short_ttime, /* queue has a short think time */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 768 | BFQQF_sync, /* synchronous queue */ |
| 769 | BFQQF_IO_bound, /* |
| 770 | * bfqq has timed-out at least once |
| 771 | * having consumed at most 2/10 of |
| 772 | * its budget |
| 773 | */ |
| 774 | BFQQF_in_large_burst, /* |
| 775 | * bfqq activated in a large burst, |
| 776 | * see comments to bfq_handle_burst. |
| 777 | */ |
| 778 | BFQQF_softrt_update, /* |
| 779 | * may need softrt-next-start |
| 780 | * update |
| 781 | */ |
| 782 | BFQQF_coop, /* bfqq is shared */ |
Paolo Valente | 13a857a | 2019-06-25 07:12:47 +0200 | [diff] [blame] | 783 | BFQQF_split_coop, /* shared bfqq will be split */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 784 | }; |
| 785 | |
| 786 | #define BFQ_BFQQ_FNS(name) \ |
| 787 | void bfq_mark_bfqq_##name(struct bfq_queue *bfqq); \ |
| 788 | void bfq_clear_bfqq_##name(struct bfq_queue *bfqq); \ |
| 789 | int bfq_bfqq_##name(const struct bfq_queue *bfqq); |
| 790 | |
| 791 | BFQ_BFQQ_FNS(just_created); |
| 792 | BFQ_BFQQ_FNS(busy); |
| 793 | BFQ_BFQQ_FNS(wait_request); |
| 794 | BFQ_BFQQ_FNS(non_blocking_wait_rq); |
| 795 | BFQ_BFQQ_FNS(fifo_expire); |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 796 | BFQ_BFQQ_FNS(has_short_ttime); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 797 | BFQ_BFQQ_FNS(sync); |
| 798 | BFQ_BFQQ_FNS(IO_bound); |
| 799 | BFQ_BFQQ_FNS(in_large_burst); |
| 800 | BFQ_BFQQ_FNS(coop); |
| 801 | BFQ_BFQQ_FNS(split_coop); |
| 802 | BFQ_BFQQ_FNS(softrt_update); |
| 803 | #undef BFQ_BFQQ_FNS |
| 804 | |
| 805 | /* Expiration reasons. */ |
| 806 | enum bfqq_expiration { |
| 807 | BFQQE_TOO_IDLE = 0, /* |
| 808 | * queue has been idling for |
| 809 | * too long |
| 810 | */ |
| 811 | BFQQE_BUDGET_TIMEOUT, /* budget took too long to be used */ |
| 812 | BFQQE_BUDGET_EXHAUSTED, /* budget consumed */ |
| 813 | BFQQE_NO_MORE_REQUESTS, /* the queue has no more requests */ |
| 814 | BFQQE_PREEMPTED /* preemption in progress */ |
| 815 | }; |
| 816 | |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 817 | struct bfq_stat { |
| 818 | struct percpu_counter cpu_cnt; |
| 819 | atomic64_t aux_cnt; |
| 820 | }; |
| 821 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 822 | struct bfqg_stats { |
Tejun Heo | fd41e60 | 2019-11-07 11:18:00 -0800 | [diff] [blame] | 823 | /* basic stats */ |
| 824 | struct blkg_rwstat bytes; |
| 825 | struct blkg_rwstat ios; |
Christoph Hellwig | 8060c47 | 2019-06-06 12:26:24 +0200 | [diff] [blame] | 826 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 827 | /* number of ios merged */ |
| 828 | struct blkg_rwstat merged; |
| 829 | /* total time spent on device in ns, may not be accurate w/ queueing */ |
| 830 | struct blkg_rwstat service_time; |
| 831 | /* total time spent waiting in scheduler queue in ns */ |
| 832 | struct blkg_rwstat wait_time; |
| 833 | /* number of IOs queued up */ |
| 834 | struct blkg_rwstat queued; |
| 835 | /* total disk time and nr sectors dispatched by this group */ |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 836 | struct bfq_stat time; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 837 | /* sum of number of ios queued across all samples */ |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 838 | struct bfq_stat avg_queue_size_sum; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 839 | /* count of samples taken for average */ |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 840 | struct bfq_stat avg_queue_size_samples; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 841 | /* how many times this group has been removed from service tree */ |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 842 | struct bfq_stat dequeue; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 843 | /* total time spent waiting for it to be assigned a timeslice. */ |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 844 | struct bfq_stat group_wait_time; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 845 | /* time spent idling for this blkcg_gq */ |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 846 | struct bfq_stat idle_time; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 847 | /* total time with empty current active q with other requests queued */ |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 848 | struct bfq_stat empty_time; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 849 | /* fields after this shouldn't be cleared on stat reset */ |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 850 | u64 start_group_wait_time; |
| 851 | u64 start_idle_time; |
| 852 | u64 start_empty_time; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 853 | uint16_t flags; |
Christoph Hellwig | 8060c47 | 2019-06-06 12:26:24 +0200 | [diff] [blame] | 854 | #endif /* CONFIG_BFQ_CGROUP_DEBUG */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 855 | }; |
| 856 | |
| 857 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 858 | |
| 859 | /* |
| 860 | * struct bfq_group_data - per-blkcg storage for the blkio subsystem. |
| 861 | * |
| 862 | * @ps: @blkcg_policy_storage that this structure inherits |
| 863 | * @weight: weight of the bfq_group |
| 864 | */ |
| 865 | struct bfq_group_data { |
| 866 | /* must be the first member */ |
| 867 | struct blkcg_policy_data pd; |
| 868 | |
| 869 | unsigned int weight; |
| 870 | }; |
| 871 | |
| 872 | /** |
| 873 | * struct bfq_group - per (device, cgroup) data structure. |
| 874 | * @entity: schedulable entity to insert into the parent group sched_data. |
| 875 | * @sched_data: own sched_data, to contain child entities (they may be |
| 876 | * both bfq_queues and bfq_groups). |
| 877 | * @bfqd: the bfq_data for the device this group acts upon. |
| 878 | * @async_bfqq: array of async queues for all the tasks belonging to |
| 879 | * the group, one queue per ioprio value per ioprio_class, |
| 880 | * except for the idle class that has only one queue. |
| 881 | * @async_idle_bfqq: async queue for the idle class (ioprio is ignored). |
| 882 | * @my_entity: pointer to @entity, %NULL for the toplevel group; used |
| 883 | * to avoid too many special cases during group creation/ |
| 884 | * migration. |
| 885 | * @stats: stats for this bfqg. |
| 886 | * @active_entities: number of active entities belonging to the group; |
| 887 | * unused for the root group. Used to know whether there |
| 888 | * are groups with more than one active @bfq_entity |
| 889 | * (see the comments to the function |
| 890 | * bfq_bfqq_may_idle()). |
| 891 | * @rq_pos_tree: rbtree sorted by next_request position, used when |
| 892 | * determining if two or more queues have interleaving |
| 893 | * requests (see bfq_find_close_cooperator()). |
| 894 | * |
| 895 | * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup |
| 896 | * there is a set of bfq_groups, each one collecting the lower-level |
| 897 | * entities belonging to the group that are acting on the same device. |
| 898 | * |
| 899 | * Locking works as follows: |
| 900 | * o @bfqd is protected by the queue lock, RCU is used to access it |
| 901 | * from the readers. |
| 902 | * o All the other fields are protected by the @bfqd queue lock. |
| 903 | */ |
| 904 | struct bfq_group { |
| 905 | /* must be the first member */ |
| 906 | struct blkg_policy_data pd; |
| 907 | |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 908 | /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */ |
| 909 | char blkg_path[128]; |
| 910 | |
| 911 | /* reference counter (see comments in bfq_bic_update_cgroup) */ |
| 912 | int ref; |
| 913 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 914 | struct bfq_entity entity; |
| 915 | struct bfq_sched_data sched_data; |
| 916 | |
| 917 | void *bfqd; |
| 918 | |
| 919 | struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; |
| 920 | struct bfq_queue *async_idle_bfqq; |
| 921 | |
| 922 | struct bfq_entity *my_entity; |
| 923 | |
| 924 | int active_entities; |
| 925 | |
| 926 | struct rb_root rq_pos_tree; |
| 927 | |
| 928 | struct bfqg_stats stats; |
| 929 | }; |
| 930 | |
| 931 | #else |
| 932 | struct bfq_group { |
Paolo Valente | 4d8340d | 2020-02-03 11:40:58 +0100 | [diff] [blame] | 933 | struct bfq_entity entity; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 934 | struct bfq_sched_data sched_data; |
| 935 | |
| 936 | struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; |
| 937 | struct bfq_queue *async_idle_bfqq; |
| 938 | |
| 939 | struct rb_root rq_pos_tree; |
| 940 | }; |
| 941 | #endif |
| 942 | |
| 943 | struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity); |
| 944 | |
| 945 | /* --------------- main algorithm interface ----------------- */ |
| 946 | |
| 947 | #define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \ |
| 948 | { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) |
| 949 | |
| 950 | extern const int bfq_timeout; |
| 951 | |
| 952 | struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync); |
| 953 | void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync); |
| 954 | struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 955 | void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq); |
Federico Motta | 2d29c9f | 2018-10-12 11:55:57 +0200 | [diff] [blame] | 956 | void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
Paolo Valente | fb53ac6 | 2019-03-12 09:59:28 +0100 | [diff] [blame] | 957 | struct rb_root_cached *root); |
Paolo Valente | 0471559 | 2018-06-25 21:55:34 +0200 | [diff] [blame] | 958 | void __bfq_weights_tree_remove(struct bfq_data *bfqd, |
Federico Motta | 2d29c9f | 2018-10-12 11:55:57 +0200 | [diff] [blame] | 959 | struct bfq_queue *bfqq, |
Paolo Valente | fb53ac6 | 2019-03-12 09:59:28 +0100 | [diff] [blame] | 960 | struct rb_root_cached *root); |
Paolo Valente | 0471559 | 2018-06-25 21:55:34 +0200 | [diff] [blame] | 961 | void bfq_weights_tree_remove(struct bfq_data *bfqd, |
| 962 | struct bfq_queue *bfqq); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 963 | void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 964 | bool compensate, enum bfqq_expiration reason); |
| 965 | void bfq_put_queue(struct bfq_queue *bfqq); |
| 966 | void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); |
Paolo Valente | c899773 | 2020-03-21 10:45:19 +0100 | [diff] [blame] | 967 | void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 968 | void bfq_schedule_dispatch(struct bfq_data *bfqd); |
| 969 | void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); |
| 970 | |
| 971 | /* ------------ end of main algorithm interface -------------- */ |
| 972 | |
| 973 | /* ---------------- cgroups-support interface ---------------- */ |
| 974 | |
Tejun Heo | fd41e60 | 2019-11-07 11:18:00 -0800 | [diff] [blame] | 975 | void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 976 | void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, |
| 977 | unsigned int op); |
| 978 | void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op); |
| 979 | void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op); |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 980 | void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, |
| 981 | u64 io_start_time_ns, unsigned int op); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 982 | void bfqg_stats_update_dequeue(struct bfq_group *bfqg); |
| 983 | void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg); |
| 984 | void bfqg_stats_update_idle_time(struct bfq_group *bfqg); |
| 985 | void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg); |
| 986 | void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg); |
| 987 | void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 988 | struct bfq_group *bfqg); |
| 989 | |
| 990 | void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg); |
| 991 | void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio); |
| 992 | void bfq_end_wr_async(struct bfq_data *bfqd); |
| 993 | struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, |
| 994 | struct blkcg *blkcg); |
| 995 | struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); |
| 996 | struct bfq_group *bfqq_group(struct bfq_queue *bfqq); |
| 997 | struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 998 | void bfqg_and_blkg_put(struct bfq_group *bfqg); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 999 | |
| 1000 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
Jens Axboe | 659b339 | 2017-04-20 09:37:05 -0600 | [diff] [blame] | 1001 | extern struct cftype bfq_blkcg_legacy_files[]; |
| 1002 | extern struct cftype bfq_blkg_files[]; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1003 | extern struct blkcg_policy blkcg_policy_bfq; |
| 1004 | #endif |
| 1005 | |
| 1006 | /* ------------- end of cgroups-support interface ------------- */ |
| 1007 | |
| 1008 | /* - interface of the internal hierarchical B-WF2Q+ scheduler - */ |
| 1009 | |
| 1010 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 1011 | /* both next loops stop at one of the child entities of the root group */ |
| 1012 | #define for_each_entity(entity) \ |
| 1013 | for (; entity ; entity = entity->parent) |
| 1014 | |
| 1015 | /* |
| 1016 | * For each iteration, compute parent in advance, so as to be safe if |
| 1017 | * entity is deallocated during the iteration. Such a deallocation may |
| 1018 | * happen as a consequence of a bfq_put_queue that frees the bfq_queue |
| 1019 | * containing entity. |
| 1020 | */ |
| 1021 | #define for_each_entity_safe(entity, parent) \ |
| 1022 | for (; entity && ({ parent = entity->parent; 1; }); entity = parent) |
| 1023 | |
| 1024 | #else /* CONFIG_BFQ_GROUP_IOSCHED */ |
| 1025 | /* |
| 1026 | * Next two macros are fake loops when cgroups support is not |
| 1027 | * enabled. I fact, in such a case, there is only one level to go up |
| 1028 | * (to reach the root group). |
| 1029 | */ |
| 1030 | #define for_each_entity(entity) \ |
| 1031 | for (; entity ; entity = NULL) |
| 1032 | |
| 1033 | #define for_each_entity_safe(entity, parent) \ |
| 1034 | for (parent = NULL; entity ; entity = parent) |
| 1035 | #endif /* CONFIG_BFQ_GROUP_IOSCHED */ |
| 1036 | |
| 1037 | struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq); |
| 1038 | struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity); |
Paolo Valente | 73d5811 | 2019-01-29 12:06:29 +0100 | [diff] [blame] | 1039 | unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1040 | struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity); |
| 1041 | struct bfq_entity *bfq_entity_of(struct rb_node *node); |
| 1042 | unsigned short bfq_ioprio_to_weight(int ioprio); |
| 1043 | void bfq_put_idle_entity(struct bfq_service_tree *st, |
| 1044 | struct bfq_entity *entity); |
| 1045 | struct bfq_service_tree * |
| 1046 | __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, |
Paolo Valente | 431b17f | 2017-07-03 10:00:10 +0200 | [diff] [blame] | 1047 | struct bfq_entity *entity, |
| 1048 | bool update_class_too); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1049 | void bfq_bfqq_served(struct bfq_queue *bfqq, int served); |
| 1050 | void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 1051 | unsigned long time_ms); |
| 1052 | bool __bfq_deactivate_entity(struct bfq_entity *entity, |
| 1053 | bool ins_into_idle_tree); |
| 1054 | bool next_queue_may_preempt(struct bfq_data *bfqd); |
| 1055 | struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd); |
Paolo Valente | eed47d1 | 2019-04-10 10:38:33 +0200 | [diff] [blame] | 1056 | bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1057 | void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 1058 | bool ins_into_idle_tree, bool expiration); |
| 1059 | void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1060 | void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 1061 | bool expiration); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1062 | void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 1063 | bool expiration); |
| 1064 | void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq); |
| 1065 | |
| 1066 | /* --------------- end of interface of B-WF2Q+ ---------------- */ |
| 1067 | |
| 1068 | /* Logging facilities. */ |
Francesco Pollicino | 1e66413 | 2019-03-12 09:59:33 +0100 | [diff] [blame] | 1069 | static inline void bfq_pid_to_str(int pid, char *str, int len) |
| 1070 | { |
| 1071 | if (pid != -1) |
| 1072 | snprintf(str, len, "%d", pid); |
| 1073 | else |
| 1074 | snprintf(str, len, "SHARED-"); |
| 1075 | } |
| 1076 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1077 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 1078 | struct bfq_group *bfqq_group(struct bfq_queue *bfqq); |
| 1079 | |
| 1080 | #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ |
Francesco Pollicino | 1e66413 | 2019-03-12 09:59:33 +0100 | [diff] [blame] | 1081 | char pid_str[MAX_PID_STR_LENGTH]; \ |
Dmitry Monakhov | 40d47c1 | 2019-11-01 13:11:10 +0000 | [diff] [blame] | 1082 | if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \ |
| 1083 | break; \ |
Francesco Pollicino | 1e66413 | 2019-03-12 09:59:33 +0100 | [diff] [blame] | 1084 | bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \ |
Shaohua Li | 35fe6d7 | 2017-07-12 11:49:56 -0700 | [diff] [blame] | 1085 | blk_add_cgroup_trace_msg((bfqd)->queue, \ |
| 1086 | bfqg_to_blkg(bfqq_group(bfqq))->blkcg, \ |
Francesco Pollicino | 1e66413 | 2019-03-12 09:59:33 +0100 | [diff] [blame] | 1087 | "bfq%s%c " fmt, pid_str, \ |
Shaohua Li | 35fe6d7 | 2017-07-12 11:49:56 -0700 | [diff] [blame] | 1088 | bfq_bfqq_sync((bfqq)) ? 'S' : 'A', ##args); \ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1089 | } while (0) |
| 1090 | |
Shaohua Li | 35fe6d7 | 2017-07-12 11:49:56 -0700 | [diff] [blame] | 1091 | #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ |
| 1092 | blk_add_cgroup_trace_msg((bfqd)->queue, \ |
| 1093 | bfqg_to_blkg(bfqg)->blkcg, fmt, ##args); \ |
| 1094 | } while (0) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1095 | |
| 1096 | #else /* CONFIG_BFQ_GROUP_IOSCHED */ |
| 1097 | |
Francesco Pollicino | 1e66413 | 2019-03-12 09:59:33 +0100 | [diff] [blame] | 1098 | #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ |
| 1099 | char pid_str[MAX_PID_STR_LENGTH]; \ |
Dmitry Monakhov | 40d47c1 | 2019-11-01 13:11:10 +0000 | [diff] [blame] | 1100 | if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \ |
| 1101 | break; \ |
Francesco Pollicino | 1e66413 | 2019-03-12 09:59:33 +0100 | [diff] [blame] | 1102 | bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \ |
| 1103 | blk_add_trace_msg((bfqd)->queue, "bfq%s%c " fmt, pid_str, \ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1104 | bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ |
Francesco Pollicino | 1e66413 | 2019-03-12 09:59:33 +0100 | [diff] [blame] | 1105 | ##args); \ |
| 1106 | } while (0) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1107 | #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) |
| 1108 | |
| 1109 | #endif /* CONFIG_BFQ_GROUP_IOSCHED */ |
| 1110 | |
| 1111 | #define bfq_log(bfqd, fmt, args...) \ |
| 1112 | blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) |
| 1113 | |
| 1114 | #endif /* _BFQ_H */ |