Christoph Hellwig | a497ee3 | 2019-04-30 14:42:40 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 2 | /* |
| 3 | * Hierarchical Budget Worst-case Fair Weighted Fair Queueing |
| 4 | * (B-WF2Q+): hierarchical scheduling algorithm by which the BFQ I/O |
| 5 | * scheduler schedules generic entities. The latter can represent |
| 6 | * either single bfq queues (associated with processes) or groups of |
| 7 | * bfq queues (associated with cgroups). |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 8 | */ |
| 9 | #include "bfq-iosched.h" |
| 10 | |
| 11 | /** |
| 12 | * bfq_gt - compare two timestamps. |
| 13 | * @a: first ts. |
| 14 | * @b: second ts. |
| 15 | * |
| 16 | * Return @a > @b, dealing with wrapping correctly. |
| 17 | */ |
| 18 | static int bfq_gt(u64 a, u64 b) |
| 19 | { |
| 20 | return (s64)(a - b) > 0; |
| 21 | } |
| 22 | |
| 23 | static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree) |
| 24 | { |
| 25 | struct rb_node *node = tree->rb_node; |
| 26 | |
| 27 | return rb_entry(node, struct bfq_entity, rb_node); |
| 28 | } |
| 29 | |
| 30 | static unsigned int bfq_class_idx(struct bfq_entity *entity) |
| 31 | { |
| 32 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); |
| 33 | |
| 34 | return bfqq ? bfqq->ioprio_class - 1 : |
| 35 | BFQ_DEFAULT_GRP_CLASS - 1; |
| 36 | } |
| 37 | |
Paolo Valente | 73d5811 | 2019-01-29 12:06:29 +0100 | [diff] [blame] | 38 | unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd) |
| 39 | { |
| 40 | return bfqd->busy_queues[0] + bfqd->busy_queues[1] + |
| 41 | bfqd->busy_queues[2]; |
| 42 | } |
| 43 | |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 44 | static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, |
| 45 | bool expiration); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 46 | |
| 47 | static bool bfq_update_parent_budget(struct bfq_entity *next_in_service); |
| 48 | |
| 49 | /** |
| 50 | * bfq_update_next_in_service - update sd->next_in_service |
| 51 | * @sd: sched_data for which to perform the update. |
| 52 | * @new_entity: if not NULL, pointer to the entity whose activation, |
Angelo Ruocco | 636b8fe | 2019-04-08 17:35:34 +0200 | [diff] [blame] | 53 | * requeueing or repositioning triggered the invocation of |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 54 | * this function. |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 55 | * @expiration: id true, this function is being invoked after the |
| 56 | * expiration of the in-service entity |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 57 | * |
| 58 | * This function is called to update sd->next_in_service, which, in |
| 59 | * its turn, may change as a consequence of the insertion or |
| 60 | * extraction of an entity into/from one of the active trees of |
| 61 | * sd. These insertions/extractions occur as a consequence of |
| 62 | * activations/deactivations of entities, with some activations being |
| 63 | * 'true' activations, and other activations being requeueings (i.e., |
| 64 | * implementing the second, requeueing phase of the mechanism used to |
| 65 | * reposition an entity in its active tree; see comments on |
| 66 | * __bfq_activate_entity and __bfq_requeue_entity for details). In |
| 67 | * both the last two activation sub-cases, new_entity points to the |
| 68 | * just activated or requeued entity. |
| 69 | * |
| 70 | * Returns true if sd->next_in_service changes in such a way that |
| 71 | * entity->parent may become the next_in_service for its parent |
| 72 | * entity. |
| 73 | */ |
| 74 | static bool bfq_update_next_in_service(struct bfq_sched_data *sd, |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 75 | struct bfq_entity *new_entity, |
| 76 | bool expiration) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 77 | { |
| 78 | struct bfq_entity *next_in_service = sd->next_in_service; |
| 79 | bool parent_sched_may_change = false; |
Paolo Valente | 24d90bb | 2017-08-31 08:46:31 +0200 | [diff] [blame] | 80 | bool change_without_lookup = false; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 81 | |
| 82 | /* |
| 83 | * If this update is triggered by the activation, requeueing |
Angelo Ruocco | 636b8fe | 2019-04-08 17:35:34 +0200 | [diff] [blame] | 84 | * or repositioning of an entity that does not coincide with |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 85 | * sd->next_in_service, then a full lookup in the active tree |
| 86 | * can be avoided. In fact, it is enough to check whether the |
Paolo Valente | a02195c | 2017-08-31 08:46:30 +0200 | [diff] [blame] | 87 | * just-modified entity has the same priority as |
| 88 | * sd->next_in_service, is eligible and has a lower virtual |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 89 | * finish time than sd->next_in_service. If this compound |
| 90 | * condition holds, then the new entity becomes the new |
| 91 | * next_in_service. Otherwise no change is needed. |
| 92 | */ |
| 93 | if (new_entity && new_entity != sd->next_in_service) { |
| 94 | /* |
| 95 | * Flag used to decide whether to replace |
| 96 | * sd->next_in_service with new_entity. Tentatively |
| 97 | * set to true, and left as true if |
| 98 | * sd->next_in_service is NULL. |
| 99 | */ |
Paolo Valente | 24d90bb | 2017-08-31 08:46:31 +0200 | [diff] [blame] | 100 | change_without_lookup = true; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 101 | |
| 102 | /* |
| 103 | * If there is already a next_in_service candidate |
Paolo Valente | a02195c | 2017-08-31 08:46:30 +0200 | [diff] [blame] | 104 | * entity, then compare timestamps to decide whether |
| 105 | * to replace sd->service_tree with new_entity. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 106 | */ |
| 107 | if (next_in_service) { |
| 108 | unsigned int new_entity_class_idx = |
| 109 | bfq_class_idx(new_entity); |
| 110 | struct bfq_service_tree *st = |
| 111 | sd->service_tree + new_entity_class_idx; |
| 112 | |
Paolo Valente | 24d90bb | 2017-08-31 08:46:31 +0200 | [diff] [blame] | 113 | change_without_lookup = |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 114 | (new_entity_class_idx == |
| 115 | bfq_class_idx(next_in_service) |
| 116 | && |
| 117 | !bfq_gt(new_entity->start, st->vtime) |
| 118 | && |
| 119 | bfq_gt(next_in_service->finish, |
Paolo Valente | a02195c | 2017-08-31 08:46:30 +0200 | [diff] [blame] | 120 | new_entity->finish)); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 121 | } |
| 122 | |
Paolo Valente | 24d90bb | 2017-08-31 08:46:31 +0200 | [diff] [blame] | 123 | if (change_without_lookup) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 124 | next_in_service = new_entity; |
Paolo Valente | 24d90bb | 2017-08-31 08:46:31 +0200 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | if (!change_without_lookup) /* lookup needed */ |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 128 | next_in_service = bfq_lookup_next_entity(sd, expiration); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 129 | |
Paolo Valente | e02a0aa2 | 2018-08-16 18:51:16 +0200 | [diff] [blame] | 130 | if (next_in_service) { |
| 131 | bool new_budget_triggers_change = |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 132 | bfq_update_parent_budget(next_in_service); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 133 | |
Paolo Valente | e02a0aa2 | 2018-08-16 18:51:16 +0200 | [diff] [blame] | 134 | parent_sched_may_change = !sd->next_in_service || |
| 135 | new_budget_triggers_change; |
| 136 | } |
| 137 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 138 | sd->next_in_service = next_in_service; |
| 139 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 140 | return parent_sched_may_change; |
| 141 | } |
| 142 | |
| 143 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 144 | |
| 145 | struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) |
| 146 | { |
| 147 | struct bfq_entity *group_entity = bfqq->entity.parent; |
| 148 | |
| 149 | if (!group_entity) |
| 150 | group_entity = &bfqq->bfqd->root_group->entity; |
| 151 | |
| 152 | return container_of(group_entity, struct bfq_group, entity); |
| 153 | } |
| 154 | |
| 155 | /* |
| 156 | * Returns true if this budget changes may let next_in_service->parent |
| 157 | * become the next_in_service entity for its parent entity. |
| 158 | */ |
| 159 | static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) |
| 160 | { |
| 161 | struct bfq_entity *bfqg_entity; |
| 162 | struct bfq_group *bfqg; |
| 163 | struct bfq_sched_data *group_sd; |
| 164 | bool ret = false; |
| 165 | |
| 166 | group_sd = next_in_service->sched_data; |
| 167 | |
| 168 | bfqg = container_of(group_sd, struct bfq_group, sched_data); |
| 169 | /* |
| 170 | * bfq_group's my_entity field is not NULL only if the group |
| 171 | * is not the root group. We must not touch the root entity |
| 172 | * as it must never become an in-service entity. |
| 173 | */ |
| 174 | bfqg_entity = bfqg->my_entity; |
| 175 | if (bfqg_entity) { |
| 176 | if (bfqg_entity->budget > next_in_service->budget) |
| 177 | ret = true; |
| 178 | bfqg_entity->budget = next_in_service->budget; |
| 179 | } |
| 180 | |
| 181 | return ret; |
| 182 | } |
| 183 | |
| 184 | /* |
| 185 | * This function tells whether entity stops being a candidate for next |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 186 | * service, according to the restrictive definition of the field |
| 187 | * next_in_service. In particular, this function is invoked for an |
| 188 | * entity that is about to be set in service. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 189 | * |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 190 | * If entity is a queue, then the entity is no longer a candidate for |
| 191 | * next service according to the that definition, because entity is |
| 192 | * about to become the in-service queue. This function then returns |
| 193 | * true if entity is a queue. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 194 | * |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 195 | * In contrast, entity could still be a candidate for next service if |
| 196 | * it is not a queue, and has more than one active child. In fact, |
| 197 | * even if one of its children is about to be set in service, other |
| 198 | * active children may still be the next to serve, for the parent |
| 199 | * entity, even according to the above definition. As a consequence, a |
| 200 | * non-queue entity is not a candidate for next-service only if it has |
| 201 | * only one active child. And only if this condition holds, then this |
| 202 | * function returns true for a non-queue entity. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 203 | */ |
| 204 | static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) |
| 205 | { |
| 206 | struct bfq_group *bfqg; |
| 207 | |
| 208 | if (bfq_entity_to_bfqq(entity)) |
| 209 | return true; |
| 210 | |
| 211 | bfqg = container_of(entity, struct bfq_group, entity); |
| 212 | |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 213 | /* |
| 214 | * The field active_entities does not always contain the |
| 215 | * actual number of active children entities: it happens to |
| 216 | * not account for the in-service entity in case the latter is |
| 217 | * removed from its active tree (which may get done after |
| 218 | * invoking the function bfq_no_longer_next_in_service in |
| 219 | * bfq_get_next_queue). Fortunately, here, i.e., while |
| 220 | * bfq_no_longer_next_in_service is not yet completed in |
| 221 | * bfq_get_next_queue, bfq_active_extract has not yet been |
| 222 | * invoked, and thus active_entities still coincides with the |
| 223 | * actual number of active entities. |
| 224 | */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 225 | if (bfqg->active_entities == 1) |
| 226 | return true; |
| 227 | |
| 228 | return false; |
| 229 | } |
| 230 | |
| 231 | #else /* CONFIG_BFQ_GROUP_IOSCHED */ |
| 232 | |
| 233 | struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) |
| 234 | { |
| 235 | return bfqq->bfqd->root_group; |
| 236 | } |
| 237 | |
| 238 | static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) |
| 239 | { |
| 240 | return false; |
| 241 | } |
| 242 | |
| 243 | static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) |
| 244 | { |
| 245 | return true; |
| 246 | } |
| 247 | |
| 248 | #endif /* CONFIG_BFQ_GROUP_IOSCHED */ |
| 249 | |
| 250 | /* |
| 251 | * Shift for timestamp calculations. This actually limits the maximum |
| 252 | * service allowed in one timestamp delta (small shift values increase it), |
| 253 | * the maximum total weight that can be used for the queues in the system |
| 254 | * (big shift values increase it), and the period of virtual time |
| 255 | * wraparounds. |
| 256 | */ |
| 257 | #define WFQ_SERVICE_SHIFT 22 |
| 258 | |
| 259 | struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity) |
| 260 | { |
| 261 | struct bfq_queue *bfqq = NULL; |
| 262 | |
| 263 | if (!entity->my_sched_data) |
| 264 | bfqq = container_of(entity, struct bfq_queue, entity); |
| 265 | |
| 266 | return bfqq; |
| 267 | } |
| 268 | |
| 269 | |
| 270 | /** |
| 271 | * bfq_delta - map service into the virtual time domain. |
| 272 | * @service: amount of service. |
| 273 | * @weight: scale factor (weight of an entity or weight sum). |
| 274 | */ |
| 275 | static u64 bfq_delta(unsigned long service, unsigned long weight) |
| 276 | { |
Wen Yang | 554d21e | 2020-01-20 18:04:43 +0800 | [diff] [blame] | 277 | return div64_ul((u64)service << WFQ_SERVICE_SHIFT, weight); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 278 | } |
| 279 | |
| 280 | /** |
| 281 | * bfq_calc_finish - assign the finish time to an entity. |
| 282 | * @entity: the entity to act upon. |
| 283 | * @service: the service to be charged to the entity. |
| 284 | */ |
| 285 | static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service) |
| 286 | { |
| 287 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); |
| 288 | |
| 289 | entity->finish = entity->start + |
| 290 | bfq_delta(service, entity->weight); |
| 291 | |
| 292 | if (bfqq) { |
| 293 | bfq_log_bfqq(bfqq->bfqd, bfqq, |
| 294 | "calc_finish: serv %lu, w %d", |
| 295 | service, entity->weight); |
| 296 | bfq_log_bfqq(bfqq->bfqd, bfqq, |
| 297 | "calc_finish: start %llu, finish %llu, delta %llu", |
| 298 | entity->start, entity->finish, |
| 299 | bfq_delta(service, entity->weight)); |
| 300 | } |
| 301 | } |
| 302 | |
| 303 | /** |
| 304 | * bfq_entity_of - get an entity from a node. |
| 305 | * @node: the node field of the entity. |
| 306 | * |
| 307 | * Convert a node pointer to the relative entity. This is used only |
| 308 | * to simplify the logic of some functions and not as the generic |
| 309 | * conversion mechanism because, e.g., in the tree walking functions, |
| 310 | * the check for a %NULL value would be redundant. |
| 311 | */ |
| 312 | struct bfq_entity *bfq_entity_of(struct rb_node *node) |
| 313 | { |
| 314 | struct bfq_entity *entity = NULL; |
| 315 | |
| 316 | if (node) |
| 317 | entity = rb_entry(node, struct bfq_entity, rb_node); |
| 318 | |
| 319 | return entity; |
| 320 | } |
| 321 | |
| 322 | /** |
| 323 | * bfq_extract - remove an entity from a tree. |
| 324 | * @root: the tree root. |
| 325 | * @entity: the entity to remove. |
| 326 | */ |
| 327 | static void bfq_extract(struct rb_root *root, struct bfq_entity *entity) |
| 328 | { |
| 329 | entity->tree = NULL; |
| 330 | rb_erase(&entity->rb_node, root); |
| 331 | } |
| 332 | |
| 333 | /** |
| 334 | * bfq_idle_extract - extract an entity from the idle tree. |
| 335 | * @st: the service tree of the owning @entity. |
| 336 | * @entity: the entity being removed. |
| 337 | */ |
| 338 | static void bfq_idle_extract(struct bfq_service_tree *st, |
| 339 | struct bfq_entity *entity) |
| 340 | { |
| 341 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); |
| 342 | struct rb_node *next; |
| 343 | |
| 344 | if (entity == st->first_idle) { |
| 345 | next = rb_next(&entity->rb_node); |
| 346 | st->first_idle = bfq_entity_of(next); |
| 347 | } |
| 348 | |
| 349 | if (entity == st->last_idle) { |
| 350 | next = rb_prev(&entity->rb_node); |
| 351 | st->last_idle = bfq_entity_of(next); |
| 352 | } |
| 353 | |
| 354 | bfq_extract(&st->idle, entity); |
| 355 | |
| 356 | if (bfqq) |
| 357 | list_del(&bfqq->bfqq_list); |
| 358 | } |
| 359 | |
| 360 | /** |
| 361 | * bfq_insert - generic tree insertion. |
| 362 | * @root: tree root. |
| 363 | * @entity: entity to insert. |
| 364 | * |
| 365 | * This is used for the idle and the active tree, since they are both |
| 366 | * ordered by finish time. |
| 367 | */ |
| 368 | static void bfq_insert(struct rb_root *root, struct bfq_entity *entity) |
| 369 | { |
| 370 | struct bfq_entity *entry; |
| 371 | struct rb_node **node = &root->rb_node; |
| 372 | struct rb_node *parent = NULL; |
| 373 | |
| 374 | while (*node) { |
| 375 | parent = *node; |
| 376 | entry = rb_entry(parent, struct bfq_entity, rb_node); |
| 377 | |
| 378 | if (bfq_gt(entry->finish, entity->finish)) |
| 379 | node = &parent->rb_left; |
| 380 | else |
| 381 | node = &parent->rb_right; |
| 382 | } |
| 383 | |
| 384 | rb_link_node(&entity->rb_node, parent, node); |
| 385 | rb_insert_color(&entity->rb_node, root); |
| 386 | |
| 387 | entity->tree = root; |
| 388 | } |
| 389 | |
| 390 | /** |
| 391 | * bfq_update_min - update the min_start field of a entity. |
| 392 | * @entity: the entity to update. |
| 393 | * @node: one of its children. |
| 394 | * |
| 395 | * This function is called when @entity may store an invalid value for |
| 396 | * min_start due to updates to the active tree. The function assumes |
| 397 | * that the subtree rooted at @node (which may be its left or its right |
| 398 | * child) has a valid min_start value. |
| 399 | */ |
| 400 | static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node) |
| 401 | { |
| 402 | struct bfq_entity *child; |
| 403 | |
| 404 | if (node) { |
| 405 | child = rb_entry(node, struct bfq_entity, rb_node); |
| 406 | if (bfq_gt(entity->min_start, child->min_start)) |
| 407 | entity->min_start = child->min_start; |
| 408 | } |
| 409 | } |
| 410 | |
| 411 | /** |
| 412 | * bfq_update_active_node - recalculate min_start. |
| 413 | * @node: the node to update. |
| 414 | * |
| 415 | * @node may have changed position or one of its children may have moved, |
| 416 | * this function updates its min_start value. The left and right subtrees |
| 417 | * are assumed to hold a correct min_start value. |
| 418 | */ |
| 419 | static void bfq_update_active_node(struct rb_node *node) |
| 420 | { |
| 421 | struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node); |
| 422 | |
| 423 | entity->min_start = entity->start; |
| 424 | bfq_update_min(entity, node->rb_right); |
| 425 | bfq_update_min(entity, node->rb_left); |
| 426 | } |
| 427 | |
| 428 | /** |
| 429 | * bfq_update_active_tree - update min_start for the whole active tree. |
| 430 | * @node: the starting node. |
| 431 | * |
| 432 | * @node must be the deepest modified node after an update. This function |
| 433 | * updates its min_start using the values held by its children, assuming |
| 434 | * that they did not change, and then updates all the nodes that may have |
| 435 | * changed in the path to the root. The only nodes that may have changed |
| 436 | * are the ones in the path or their siblings. |
| 437 | */ |
| 438 | static void bfq_update_active_tree(struct rb_node *node) |
| 439 | { |
| 440 | struct rb_node *parent; |
| 441 | |
| 442 | up: |
| 443 | bfq_update_active_node(node); |
| 444 | |
| 445 | parent = rb_parent(node); |
| 446 | if (!parent) |
| 447 | return; |
| 448 | |
| 449 | if (node == parent->rb_left && parent->rb_right) |
| 450 | bfq_update_active_node(parent->rb_right); |
| 451 | else if (parent->rb_left) |
| 452 | bfq_update_active_node(parent->rb_left); |
| 453 | |
| 454 | node = parent; |
| 455 | goto up; |
| 456 | } |
| 457 | |
| 458 | /** |
| 459 | * bfq_active_insert - insert an entity in the active tree of its |
| 460 | * group/device. |
| 461 | * @st: the service tree of the entity. |
| 462 | * @entity: the entity being inserted. |
| 463 | * |
| 464 | * The active tree is ordered by finish time, but an extra key is kept |
| 465 | * per each node, containing the minimum value for the start times of |
| 466 | * its children (and the node itself), so it's possible to search for |
| 467 | * the eligible node with the lowest finish time in logarithmic time. |
| 468 | */ |
| 469 | static void bfq_active_insert(struct bfq_service_tree *st, |
| 470 | struct bfq_entity *entity) |
| 471 | { |
| 472 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); |
| 473 | struct rb_node *node = &entity->rb_node; |
| 474 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 475 | struct bfq_sched_data *sd = NULL; |
| 476 | struct bfq_group *bfqg = NULL; |
| 477 | struct bfq_data *bfqd = NULL; |
| 478 | #endif |
| 479 | |
| 480 | bfq_insert(&st->active, entity); |
| 481 | |
| 482 | if (node->rb_left) |
| 483 | node = node->rb_left; |
| 484 | else if (node->rb_right) |
| 485 | node = node->rb_right; |
| 486 | |
| 487 | bfq_update_active_tree(node); |
| 488 | |
| 489 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 490 | sd = entity->sched_data; |
| 491 | bfqg = container_of(sd, struct bfq_group, sched_data); |
| 492 | bfqd = (struct bfq_data *)bfqg->bfqd; |
| 493 | #endif |
| 494 | if (bfqq) |
| 495 | list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list); |
| 496 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 497 | if (bfqg != bfqd->root_group) |
| 498 | bfqg->active_entities++; |
| 499 | #endif |
| 500 | } |
| 501 | |
| 502 | /** |
| 503 | * bfq_ioprio_to_weight - calc a weight from an ioprio. |
| 504 | * @ioprio: the ioprio value to convert. |
| 505 | */ |
| 506 | unsigned short bfq_ioprio_to_weight(int ioprio) |
| 507 | { |
Damien Le Moal | 202bc94 | 2021-08-11 12:37:01 +0900 | [diff] [blame] | 508 | return (IOPRIO_NR_LEVELS - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 509 | } |
| 510 | |
| 511 | /** |
| 512 | * bfq_weight_to_ioprio - calc an ioprio from a weight. |
| 513 | * @weight: the weight value to convert. |
| 514 | * |
| 515 | * To preserve as much as possible the old only-ioprio user interface, |
| 516 | * 0 is used as an escape ioprio value for weights (numerically) equal or |
Damien Le Moal | 202bc94 | 2021-08-11 12:37:01 +0900 | [diff] [blame] | 517 | * larger than IOPRIO_NR_LEVELS * BFQ_WEIGHT_CONVERSION_COEFF. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 518 | */ |
| 519 | static unsigned short bfq_weight_to_ioprio(int weight) |
| 520 | { |
| 521 | return max_t(int, 0, |
Damien Le Moal | 202bc94 | 2021-08-11 12:37:01 +0900 | [diff] [blame] | 522 | IOPRIO_NR_LEVELS * BFQ_WEIGHT_CONVERSION_COEFF - weight); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 523 | } |
| 524 | |
| 525 | static void bfq_get_entity(struct bfq_entity *entity) |
| 526 | { |
| 527 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); |
| 528 | |
| 529 | if (bfqq) { |
| 530 | bfqq->ref++; |
| 531 | bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", |
| 532 | bfqq, bfqq->ref); |
Dmitry Monakhov | 2de791a | 2020-08-11 06:43:40 +0000 | [diff] [blame] | 533 | } |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 534 | } |
| 535 | |
| 536 | /** |
| 537 | * bfq_find_deepest - find the deepest node that an extraction can modify. |
| 538 | * @node: the node being removed. |
| 539 | * |
| 540 | * Do the first step of an extraction in an rb tree, looking for the |
| 541 | * node that will replace @node, and returning the deepest node that |
| 542 | * the following modifications to the tree can touch. If @node is the |
| 543 | * last node in the tree return %NULL. |
| 544 | */ |
| 545 | static struct rb_node *bfq_find_deepest(struct rb_node *node) |
| 546 | { |
| 547 | struct rb_node *deepest; |
| 548 | |
| 549 | if (!node->rb_right && !node->rb_left) |
| 550 | deepest = rb_parent(node); |
| 551 | else if (!node->rb_right) |
| 552 | deepest = node->rb_left; |
| 553 | else if (!node->rb_left) |
| 554 | deepest = node->rb_right; |
| 555 | else { |
| 556 | deepest = rb_next(node); |
| 557 | if (deepest->rb_right) |
| 558 | deepest = deepest->rb_right; |
| 559 | else if (rb_parent(deepest) != node) |
| 560 | deepest = rb_parent(deepest); |
| 561 | } |
| 562 | |
| 563 | return deepest; |
| 564 | } |
| 565 | |
| 566 | /** |
| 567 | * bfq_active_extract - remove an entity from the active tree. |
| 568 | * @st: the service_tree containing the tree. |
| 569 | * @entity: the entity being removed. |
| 570 | */ |
| 571 | static void bfq_active_extract(struct bfq_service_tree *st, |
| 572 | struct bfq_entity *entity) |
| 573 | { |
| 574 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); |
| 575 | struct rb_node *node; |
| 576 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 577 | struct bfq_sched_data *sd = NULL; |
| 578 | struct bfq_group *bfqg = NULL; |
| 579 | struct bfq_data *bfqd = NULL; |
| 580 | #endif |
| 581 | |
| 582 | node = bfq_find_deepest(&entity->rb_node); |
| 583 | bfq_extract(&st->active, entity); |
| 584 | |
| 585 | if (node) |
| 586 | bfq_update_active_tree(node); |
| 587 | |
| 588 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 589 | sd = entity->sched_data; |
| 590 | bfqg = container_of(sd, struct bfq_group, sched_data); |
| 591 | bfqd = (struct bfq_data *)bfqg->bfqd; |
| 592 | #endif |
| 593 | if (bfqq) |
| 594 | list_del(&bfqq->bfqq_list); |
| 595 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 596 | if (bfqg != bfqd->root_group) |
| 597 | bfqg->active_entities--; |
| 598 | #endif |
| 599 | } |
| 600 | |
| 601 | /** |
| 602 | * bfq_idle_insert - insert an entity into the idle tree. |
| 603 | * @st: the service tree containing the tree. |
| 604 | * @entity: the entity to insert. |
| 605 | */ |
| 606 | static void bfq_idle_insert(struct bfq_service_tree *st, |
| 607 | struct bfq_entity *entity) |
| 608 | { |
| 609 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); |
| 610 | struct bfq_entity *first_idle = st->first_idle; |
| 611 | struct bfq_entity *last_idle = st->last_idle; |
| 612 | |
| 613 | if (!first_idle || bfq_gt(first_idle->finish, entity->finish)) |
| 614 | st->first_idle = entity; |
| 615 | if (!last_idle || bfq_gt(entity->finish, last_idle->finish)) |
| 616 | st->last_idle = entity; |
| 617 | |
| 618 | bfq_insert(&st->idle, entity); |
| 619 | |
| 620 | if (bfqq) |
| 621 | list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list); |
| 622 | } |
| 623 | |
| 624 | /** |
| 625 | * bfq_forget_entity - do not consider entity any longer for scheduling |
| 626 | * @st: the service tree. |
| 627 | * @entity: the entity being removed. |
| 628 | * @is_in_service: true if entity is currently the in-service entity. |
| 629 | * |
| 630 | * Forget everything about @entity. In addition, if entity represents |
| 631 | * a queue, and the latter is not in service, then release the service |
| 632 | * reference to the queue (the one taken through bfq_get_entity). In |
| 633 | * fact, in this case, there is really no more service reference to |
| 634 | * the queue, as the latter is also outside any service tree. If, |
| 635 | * instead, the queue is in service, then __bfq_bfqd_reset_in_service |
| 636 | * will take care of putting the reference when the queue finally |
| 637 | * stops being served. |
| 638 | */ |
| 639 | static void bfq_forget_entity(struct bfq_service_tree *st, |
| 640 | struct bfq_entity *entity, |
| 641 | bool is_in_service) |
| 642 | { |
| 643 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); |
| 644 | |
Paolo Valente | 33a16a9 | 2020-02-03 11:40:57 +0100 | [diff] [blame] | 645 | entity->on_st_or_in_serv = false; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 646 | st->wsum -= entity->weight; |
Dmitry Monakhov | 2de791a | 2020-08-11 06:43:40 +0000 | [diff] [blame] | 647 | if (bfqq && !is_in_service) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 648 | bfq_put_queue(bfqq); |
| 649 | } |
| 650 | |
| 651 | /** |
| 652 | * bfq_put_idle_entity - release the idle tree ref of an entity. |
| 653 | * @st: service tree for the entity. |
| 654 | * @entity: the entity being released. |
| 655 | */ |
| 656 | void bfq_put_idle_entity(struct bfq_service_tree *st, struct bfq_entity *entity) |
| 657 | { |
| 658 | bfq_idle_extract(st, entity); |
| 659 | bfq_forget_entity(st, entity, |
| 660 | entity == entity->sched_data->in_service_entity); |
| 661 | } |
| 662 | |
| 663 | /** |
| 664 | * bfq_forget_idle - update the idle tree if necessary. |
| 665 | * @st: the service tree to act upon. |
| 666 | * |
| 667 | * To preserve the global O(log N) complexity we only remove one entry here; |
| 668 | * as the idle tree will not grow indefinitely this can be done safely. |
| 669 | */ |
| 670 | static void bfq_forget_idle(struct bfq_service_tree *st) |
| 671 | { |
| 672 | struct bfq_entity *first_idle = st->first_idle; |
| 673 | struct bfq_entity *last_idle = st->last_idle; |
| 674 | |
| 675 | if (RB_EMPTY_ROOT(&st->active) && last_idle && |
| 676 | !bfq_gt(last_idle->finish, st->vtime)) { |
| 677 | /* |
| 678 | * Forget the whole idle tree, increasing the vtime past |
| 679 | * the last finish time of idle entities. |
| 680 | */ |
| 681 | st->vtime = last_idle->finish; |
| 682 | } |
| 683 | |
| 684 | if (first_idle && !bfq_gt(first_idle->finish, st->vtime)) |
| 685 | bfq_put_idle_entity(st, first_idle); |
| 686 | } |
| 687 | |
| 688 | struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity) |
| 689 | { |
| 690 | struct bfq_sched_data *sched_data = entity->sched_data; |
| 691 | unsigned int idx = bfq_class_idx(entity); |
| 692 | |
| 693 | return sched_data->service_tree + idx; |
| 694 | } |
| 695 | |
Paolo Valente | 431b17f | 2017-07-03 10:00:10 +0200 | [diff] [blame] | 696 | /* |
| 697 | * Update weight and priority of entity. If update_class_too is true, |
| 698 | * then update the ioprio_class of entity too. |
| 699 | * |
| 700 | * The reason why the update of ioprio_class is controlled through the |
| 701 | * last parameter is as follows. Changing the ioprio class of an |
| 702 | * entity implies changing the destination service trees for that |
| 703 | * entity. If such a change occurred when the entity is already on one |
| 704 | * of the service trees for its previous class, then the state of the |
| 705 | * entity would become more complex: none of the new possible service |
| 706 | * trees for the entity, according to bfq_entity_service_tree(), would |
| 707 | * match any of the possible service trees on which the entity |
| 708 | * is. Complex operations involving these trees, such as entity |
| 709 | * activations and deactivations, should take into account this |
| 710 | * additional complexity. To avoid this issue, this function is |
| 711 | * invoked with update_class_too unset in the points in the code where |
| 712 | * entity may happen to be on some tree. |
| 713 | */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 714 | struct bfq_service_tree * |
| 715 | __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, |
Paolo Valente | 431b17f | 2017-07-03 10:00:10 +0200 | [diff] [blame] | 716 | struct bfq_entity *entity, |
| 717 | bool update_class_too) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 718 | { |
| 719 | struct bfq_service_tree *new_st = old_st; |
| 720 | |
| 721 | if (entity->prio_changed) { |
| 722 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); |
| 723 | unsigned int prev_weight, new_weight; |
| 724 | struct bfq_data *bfqd = NULL; |
Paolo Valente | fb53ac6 | 2019-03-12 09:59:28 +0100 | [diff] [blame] | 725 | struct rb_root_cached *root; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 726 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 727 | struct bfq_sched_data *sd; |
| 728 | struct bfq_group *bfqg; |
| 729 | #endif |
| 730 | |
| 731 | if (bfqq) |
| 732 | bfqd = bfqq->bfqd; |
| 733 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 734 | else { |
| 735 | sd = entity->my_sched_data; |
| 736 | bfqg = container_of(sd, struct bfq_group, sched_data); |
| 737 | bfqd = (struct bfq_data *)bfqg->bfqd; |
| 738 | } |
| 739 | #endif |
| 740 | |
Fam Zheng | e9d3c86 | 2019-08-28 11:54:51 +0800 | [diff] [blame] | 741 | /* Matches the smp_wmb() in bfq_group_set_weight. */ |
| 742 | smp_rmb(); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 743 | old_st->wsum -= entity->weight; |
| 744 | |
| 745 | if (entity->new_weight != entity->orig_weight) { |
| 746 | if (entity->new_weight < BFQ_MIN_WEIGHT || |
| 747 | entity->new_weight > BFQ_MAX_WEIGHT) { |
| 748 | pr_crit("update_weight_prio: new_weight %d\n", |
| 749 | entity->new_weight); |
| 750 | if (entity->new_weight < BFQ_MIN_WEIGHT) |
| 751 | entity->new_weight = BFQ_MIN_WEIGHT; |
| 752 | else |
| 753 | entity->new_weight = BFQ_MAX_WEIGHT; |
| 754 | } |
| 755 | entity->orig_weight = entity->new_weight; |
| 756 | if (bfqq) |
| 757 | bfqq->ioprio = |
| 758 | bfq_weight_to_ioprio(entity->orig_weight); |
| 759 | } |
| 760 | |
Paolo Valente | 431b17f | 2017-07-03 10:00:10 +0200 | [diff] [blame] | 761 | if (bfqq && update_class_too) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 762 | bfqq->ioprio_class = bfqq->new_ioprio_class; |
Paolo Valente | 431b17f | 2017-07-03 10:00:10 +0200 | [diff] [blame] | 763 | |
| 764 | /* |
| 765 | * Reset prio_changed only if the ioprio_class change |
| 766 | * is not pending any longer. |
| 767 | */ |
| 768 | if (!bfqq || bfqq->ioprio_class == bfqq->new_ioprio_class) |
| 769 | entity->prio_changed = 0; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 770 | |
| 771 | /* |
| 772 | * NOTE: here we may be changing the weight too early, |
| 773 | * this will cause unfairness. The correct approach |
| 774 | * would have required additional complexity to defer |
| 775 | * weight changes to the proper time instants (i.e., |
| 776 | * when entity->finish <= old_st->vtime). |
| 777 | */ |
| 778 | new_st = bfq_entity_service_tree(entity); |
| 779 | |
| 780 | prev_weight = entity->weight; |
| 781 | new_weight = entity->orig_weight * |
| 782 | (bfqq ? bfqq->wr_coeff : 1); |
| 783 | /* |
Federico Motta | 2d29c9f | 2018-10-12 11:55:57 +0200 | [diff] [blame] | 784 | * If the weight of the entity changes, and the entity is a |
| 785 | * queue, remove the entity from its old weight counter (if |
| 786 | * there is a counter associated with the entity). |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 787 | */ |
Federico Motta | 98fa7a3 | 2018-10-24 19:13:25 +0200 | [diff] [blame] | 788 | if (prev_weight != new_weight && bfqq) { |
| 789 | root = &bfqd->queue_weights_tree; |
| 790 | __bfq_weights_tree_remove(bfqd, bfqq, root); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 791 | } |
| 792 | entity->weight = new_weight; |
| 793 | /* |
Federico Motta | 2d29c9f | 2018-10-12 11:55:57 +0200 | [diff] [blame] | 794 | * Add the entity, if it is not a weight-raised queue, |
| 795 | * to the counter associated with its new weight. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 796 | */ |
Federico Motta | 98fa7a3 | 2018-10-24 19:13:25 +0200 | [diff] [blame] | 797 | if (prev_weight != new_weight && bfqq && bfqq->wr_coeff == 1) { |
| 798 | /* If we get here, root has been initialized. */ |
| 799 | bfq_weights_tree_add(bfqd, bfqq, root); |
Federico Motta | 2d29c9f | 2018-10-12 11:55:57 +0200 | [diff] [blame] | 800 | } |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 801 | |
| 802 | new_st->wsum += entity->weight; |
| 803 | |
| 804 | if (new_st != old_st) |
| 805 | entity->start = new_st->vtime; |
| 806 | } |
| 807 | |
| 808 | return new_st; |
| 809 | } |
| 810 | |
| 811 | /** |
| 812 | * bfq_bfqq_served - update the scheduler status after selection for |
| 813 | * service. |
| 814 | * @bfqq: the queue being served. |
| 815 | * @served: bytes to transfer. |
| 816 | * |
| 817 | * NOTE: this can be optimized, as the timestamps of upper level entities |
| 818 | * are synchronized every time a new bfqq is selected for service. By now, |
| 819 | * we keep it to better check consistency. |
| 820 | */ |
| 821 | void bfq_bfqq_served(struct bfq_queue *bfqq, int served) |
| 822 | { |
| 823 | struct bfq_entity *entity = &bfqq->entity; |
| 824 | struct bfq_service_tree *st; |
| 825 | |
Paolo Valente | 7b8fa3b | 2017-12-20 12:38:33 +0100 | [diff] [blame] | 826 | if (!bfqq->service_from_backlogged) |
| 827 | bfqq->first_IO_time = jiffies; |
| 828 | |
Paolo Valente | 8a8747d | 2018-01-13 12:05:18 +0100 | [diff] [blame] | 829 | if (bfqq->wr_coeff > 1) |
| 830 | bfqq->service_from_wr += served; |
| 831 | |
Paolo Valente | 7b8fa3b | 2017-12-20 12:38:33 +0100 | [diff] [blame] | 832 | bfqq->service_from_backlogged += served; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 833 | for_each_entity(entity) { |
| 834 | st = bfq_entity_service_tree(entity); |
| 835 | |
| 836 | entity->service += served; |
| 837 | |
| 838 | st->vtime += bfq_delta(served, st->wsum); |
| 839 | bfq_forget_idle(st); |
| 840 | } |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 841 | bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs", served); |
| 842 | } |
| 843 | |
| 844 | /** |
| 845 | * bfq_bfqq_charge_time - charge an amount of service equivalent to the length |
| 846 | * of the time interval during which bfqq has been in |
| 847 | * service. |
| 848 | * @bfqd: the device |
| 849 | * @bfqq: the queue that needs a service update. |
| 850 | * @time_ms: the amount of time during which the queue has received service |
| 851 | * |
| 852 | * If a queue does not consume its budget fast enough, then providing |
| 853 | * the queue with service fairness may impair throughput, more or less |
| 854 | * severely. For this reason, queues that consume their budget slowly |
| 855 | * are provided with time fairness instead of service fairness. This |
| 856 | * goal is achieved through the BFQ scheduling engine, even if such an |
| 857 | * engine works in the service, and not in the time domain. The trick |
| 858 | * is charging these queues with an inflated amount of service, equal |
| 859 | * to the amount of service that they would have received during their |
| 860 | * service slot if they had been fast, i.e., if their requests had |
| 861 | * been dispatched at a rate equal to the estimated peak rate. |
| 862 | * |
| 863 | * It is worth noting that time fairness can cause important |
| 864 | * distortions in terms of bandwidth distribution, on devices with |
| 865 | * internal queueing. The reason is that I/O requests dispatched |
| 866 | * during the service slot of a queue may be served after that service |
| 867 | * slot is finished, and may have a total processing time loosely |
| 868 | * correlated with the duration of the service slot. This is |
| 869 | * especially true for short service slots. |
| 870 | */ |
| 871 | void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 872 | unsigned long time_ms) |
| 873 | { |
| 874 | struct bfq_entity *entity = &bfqq->entity; |
Paolo Valente | f812164 | 2018-08-16 18:51:18 +0200 | [diff] [blame] | 875 | unsigned long timeout_ms = jiffies_to_msecs(bfq_timeout); |
| 876 | unsigned long bounded_time_ms = min(time_ms, timeout_ms); |
| 877 | int serv_to_charge_for_time = |
| 878 | (bfqd->bfq_max_budget * bounded_time_ms) / timeout_ms; |
| 879 | int tot_serv_to_charge = max(serv_to_charge_for_time, entity->service); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 880 | |
| 881 | /* Increase budget to avoid inconsistencies */ |
| 882 | if (tot_serv_to_charge > entity->budget) |
| 883 | entity->budget = tot_serv_to_charge; |
| 884 | |
| 885 | bfq_bfqq_served(bfqq, |
| 886 | max_t(int, 0, tot_serv_to_charge - entity->service)); |
| 887 | } |
| 888 | |
| 889 | static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, |
| 890 | struct bfq_service_tree *st, |
| 891 | bool backshifted) |
| 892 | { |
| 893 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); |
| 894 | |
Paolo Valente | 431b17f | 2017-07-03 10:00:10 +0200 | [diff] [blame] | 895 | /* |
| 896 | * When this function is invoked, entity is not in any service |
| 897 | * tree, then it is safe to invoke next function with the last |
| 898 | * parameter set (see the comments on the function). |
| 899 | */ |
| 900 | st = __bfq_entity_update_weight_prio(st, entity, true); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 901 | bfq_calc_finish(entity, entity->budget); |
| 902 | |
| 903 | /* |
| 904 | * If some queues enjoy backshifting for a while, then their |
| 905 | * (virtual) finish timestamps may happen to become lower and |
| 906 | * lower than the system virtual time. In particular, if |
| 907 | * these queues often happen to be idle for short time |
| 908 | * periods, and during such time periods other queues with |
| 909 | * higher timestamps happen to be busy, then the backshifted |
| 910 | * timestamps of the former queues can become much lower than |
| 911 | * the system virtual time. In fact, to serve the queues with |
| 912 | * higher timestamps while the ones with lower timestamps are |
| 913 | * idle, the system virtual time may be pushed-up to much |
| 914 | * higher values than the finish timestamps of the idle |
| 915 | * queues. As a consequence, the finish timestamps of all new |
| 916 | * or newly activated queues may end up being much larger than |
| 917 | * those of lucky queues with backshifted timestamps. The |
| 918 | * latter queues may then monopolize the device for a lot of |
| 919 | * time. This would simply break service guarantees. |
| 920 | * |
| 921 | * To reduce this problem, push up a little bit the |
| 922 | * backshifted timestamps of the queue associated with this |
| 923 | * entity (only a queue can happen to have the backshifted |
| 924 | * flag set): just enough to let the finish timestamp of the |
| 925 | * queue be equal to the current value of the system virtual |
| 926 | * time. This may introduce a little unfairness among queues |
| 927 | * with backshifted timestamps, but it does not break |
| 928 | * worst-case fairness guarantees. |
| 929 | * |
| 930 | * As a special case, if bfqq is weight-raised, push up |
| 931 | * timestamps much less, to keep very low the probability that |
| 932 | * this push up causes the backshifted finish timestamps of |
| 933 | * weight-raised queues to become higher than the backshifted |
| 934 | * finish timestamps of non weight-raised queues. |
| 935 | */ |
| 936 | if (backshifted && bfq_gt(st->vtime, entity->finish)) { |
| 937 | unsigned long delta = st->vtime - entity->finish; |
| 938 | |
| 939 | if (bfqq) |
| 940 | delta /= bfqq->wr_coeff; |
| 941 | |
| 942 | entity->start += delta; |
| 943 | entity->finish += delta; |
| 944 | } |
| 945 | |
| 946 | bfq_active_insert(st, entity); |
| 947 | } |
| 948 | |
| 949 | /** |
| 950 | * __bfq_activate_entity - handle activation of entity. |
| 951 | * @entity: the entity being activated. |
| 952 | * @non_blocking_wait_rq: true if entity was waiting for a request |
| 953 | * |
| 954 | * Called for a 'true' activation, i.e., if entity is not active and |
| 955 | * one of its children receives a new request. |
| 956 | * |
| 957 | * Basically, this function updates the timestamps of entity and |
Paolo Valente | 0471559 | 2018-06-25 21:55:34 +0200 | [diff] [blame] | 958 | * inserts entity into its active tree, after possibly extracting it |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 959 | * from its idle tree. |
| 960 | */ |
| 961 | static void __bfq_activate_entity(struct bfq_entity *entity, |
| 962 | bool non_blocking_wait_rq) |
| 963 | { |
| 964 | struct bfq_service_tree *st = bfq_entity_service_tree(entity); |
| 965 | bool backshifted = false; |
| 966 | unsigned long long min_vstart; |
| 967 | |
| 968 | /* See comments on bfq_fqq_update_budg_for_activation */ |
| 969 | if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) { |
| 970 | backshifted = true; |
| 971 | min_vstart = entity->finish; |
| 972 | } else |
| 973 | min_vstart = st->vtime; |
| 974 | |
| 975 | if (entity->tree == &st->idle) { |
| 976 | /* |
| 977 | * Must be on the idle tree, bfq_idle_extract() will |
| 978 | * check for that. |
| 979 | */ |
| 980 | bfq_idle_extract(st, entity); |
| 981 | entity->start = bfq_gt(min_vstart, entity->finish) ? |
| 982 | min_vstart : entity->finish; |
| 983 | } else { |
| 984 | /* |
| 985 | * The finish time of the entity may be invalid, and |
| 986 | * it is in the past for sure, otherwise the queue |
| 987 | * would have been on the idle tree. |
| 988 | */ |
| 989 | entity->start = min_vstart; |
| 990 | st->wsum += entity->weight; |
| 991 | /* |
| 992 | * entity is about to be inserted into a service tree, |
| 993 | * and then set in service: get a reference to make |
| 994 | * sure entity does not disappear until it is no |
| 995 | * longer in service or scheduled for service. |
| 996 | */ |
| 997 | bfq_get_entity(entity); |
| 998 | |
Paolo Valente | 33a16a9 | 2020-02-03 11:40:57 +0100 | [diff] [blame] | 999 | entity->on_st_or_in_serv = true; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1000 | } |
| 1001 | |
Konstantin Khlebnikov | 42b1bd3 | 2019-03-29 17:01:18 +0300 | [diff] [blame] | 1002 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
Paolo Valente | 0471559 | 2018-06-25 21:55:34 +0200 | [diff] [blame] | 1003 | if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */ |
| 1004 | struct bfq_group *bfqg = |
| 1005 | container_of(entity, struct bfq_group, entity); |
Federico Motta | 2d29c9f | 2018-10-12 11:55:57 +0200 | [diff] [blame] | 1006 | struct bfq_data *bfqd = bfqg->bfqd; |
Paolo Valente | 0471559 | 2018-06-25 21:55:34 +0200 | [diff] [blame] | 1007 | |
Paolo Valente | ba7aeae | 2018-12-06 19:18:18 +0100 | [diff] [blame] | 1008 | if (!entity->in_groups_with_pending_reqs) { |
| 1009 | entity->in_groups_with_pending_reqs = true; |
| 1010 | bfqd->num_groups_with_pending_reqs++; |
| 1011 | } |
Paolo Valente | 0471559 | 2018-06-25 21:55:34 +0200 | [diff] [blame] | 1012 | } |
| 1013 | #endif |
| 1014 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1015 | bfq_update_fin_time_enqueue(entity, st, backshifted); |
| 1016 | } |
| 1017 | |
| 1018 | /** |
| 1019 | * __bfq_requeue_entity - handle requeueing or repositioning of an entity. |
| 1020 | * @entity: the entity being requeued or repositioned. |
| 1021 | * |
| 1022 | * Requeueing is needed if this entity stops being served, which |
| 1023 | * happens if a leaf descendant entity has expired. On the other hand, |
| 1024 | * repositioning is needed if the next_inservice_entity for the child |
| 1025 | * entity has changed. See the comments inside the function for |
| 1026 | * details. |
| 1027 | * |
| 1028 | * Basically, this function: 1) removes entity from its active tree if |
| 1029 | * present there, 2) updates the timestamps of entity and 3) inserts |
| 1030 | * entity back into its active tree (in the new, right position for |
| 1031 | * the new values of the timestamps). |
| 1032 | */ |
| 1033 | static void __bfq_requeue_entity(struct bfq_entity *entity) |
| 1034 | { |
| 1035 | struct bfq_sched_data *sd = entity->sched_data; |
| 1036 | struct bfq_service_tree *st = bfq_entity_service_tree(entity); |
| 1037 | |
| 1038 | if (entity == sd->in_service_entity) { |
| 1039 | /* |
| 1040 | * We are requeueing the current in-service entity, |
| 1041 | * which may have to be done for one of the following |
| 1042 | * reasons: |
| 1043 | * - entity represents the in-service queue, and the |
| 1044 | * in-service queue is being requeued after an |
| 1045 | * expiration; |
| 1046 | * - entity represents a group, and its budget has |
| 1047 | * changed because one of its child entities has |
| 1048 | * just been either activated or requeued for some |
| 1049 | * reason; the timestamps of the entity need then to |
| 1050 | * be updated, and the entity needs to be enqueued |
| 1051 | * or repositioned accordingly. |
| 1052 | * |
| 1053 | * In particular, before requeueing, the start time of |
| 1054 | * the entity must be moved forward to account for the |
| 1055 | * service that the entity has received while in |
| 1056 | * service. This is done by the next instructions. The |
| 1057 | * finish time will then be updated according to this |
| 1058 | * new value of the start time, and to the budget of |
| 1059 | * the entity. |
| 1060 | */ |
| 1061 | bfq_calc_finish(entity, entity->service); |
| 1062 | entity->start = entity->finish; |
| 1063 | /* |
| 1064 | * In addition, if the entity had more than one child |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 1065 | * when set in service, then it was not extracted from |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1066 | * the active tree. This implies that the position of |
| 1067 | * the entity in the active tree may need to be |
| 1068 | * changed now, because we have just updated the start |
| 1069 | * time of the entity, and we will update its finish |
| 1070 | * time in a moment (the requeueing is then, more |
| 1071 | * precisely, a repositioning in this case). To |
| 1072 | * implement this repositioning, we: 1) dequeue the |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 1073 | * entity here, 2) update the finish time and requeue |
| 1074 | * the entity according to the new timestamps below. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1075 | */ |
| 1076 | if (entity->tree) |
| 1077 | bfq_active_extract(st, entity); |
| 1078 | } else { /* The entity is already active, and not in service */ |
| 1079 | /* |
| 1080 | * In this case, this function gets called only if the |
| 1081 | * next_in_service entity below this entity has |
| 1082 | * changed, and this change has caused the budget of |
| 1083 | * this entity to change, which, finally implies that |
| 1084 | * the finish time of this entity must be |
| 1085 | * updated. Such an update may cause the scheduling, |
| 1086 | * i.e., the position in the active tree, of this |
| 1087 | * entity to change. We handle this change by: 1) |
| 1088 | * dequeueing the entity here, 2) updating the finish |
| 1089 | * time and requeueing the entity according to the new |
| 1090 | * timestamps below. This is the same approach as the |
| 1091 | * non-extracted-entity sub-case above. |
| 1092 | */ |
| 1093 | bfq_active_extract(st, entity); |
| 1094 | } |
| 1095 | |
| 1096 | bfq_update_fin_time_enqueue(entity, st, false); |
| 1097 | } |
| 1098 | |
| 1099 | static void __bfq_activate_requeue_entity(struct bfq_entity *entity, |
| 1100 | struct bfq_sched_data *sd, |
| 1101 | bool non_blocking_wait_rq) |
| 1102 | { |
| 1103 | struct bfq_service_tree *st = bfq_entity_service_tree(entity); |
| 1104 | |
| 1105 | if (sd->in_service_entity == entity || entity->tree == &st->active) |
| 1106 | /* |
| 1107 | * in service or already queued on the active tree, |
| 1108 | * requeue or reposition |
| 1109 | */ |
| 1110 | __bfq_requeue_entity(entity); |
| 1111 | else |
| 1112 | /* |
| 1113 | * Not in service and not queued on its active tree: |
| 1114 | * the activity is idle and this is a true activation. |
| 1115 | */ |
| 1116 | __bfq_activate_entity(entity, non_blocking_wait_rq); |
| 1117 | } |
| 1118 | |
| 1119 | |
| 1120 | /** |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 1121 | * bfq_activate_requeue_entity - activate or requeue an entity representing a |
| 1122 | * bfq_queue, and activate, requeue or reposition |
| 1123 | * all ancestors for which such an update becomes |
| 1124 | * necessary. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1125 | * @entity: the entity to activate. |
| 1126 | * @non_blocking_wait_rq: true if this entity was waiting for a request |
| 1127 | * @requeue: true if this is a requeue, which implies that bfqq is |
| 1128 | * being expired; thus ALL its ancestors stop being served and must |
| 1129 | * therefore be requeued |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1130 | * @expiration: true if this function is being invoked in the expiration path |
| 1131 | * of the in-service queue |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1132 | */ |
| 1133 | static void bfq_activate_requeue_entity(struct bfq_entity *entity, |
| 1134 | bool non_blocking_wait_rq, |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1135 | bool requeue, bool expiration) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1136 | { |
| 1137 | struct bfq_sched_data *sd; |
| 1138 | |
| 1139 | for_each_entity(entity) { |
| 1140 | sd = entity->sched_data; |
| 1141 | __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq); |
| 1142 | |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1143 | if (!bfq_update_next_in_service(sd, entity, expiration) && |
| 1144 | !requeue) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1145 | break; |
| 1146 | } |
| 1147 | } |
| 1148 | |
| 1149 | /** |
Paolo Valente | 5bf8590 | 2018-12-06 19:18:19 +0100 | [diff] [blame] | 1150 | * __bfq_deactivate_entity - update sched_data and service trees for |
| 1151 | * entity, so as to represent entity as inactive |
| 1152 | * @entity: the entity being deactivated. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1153 | * @ins_into_idle_tree: if false, the entity will not be put into the |
| 1154 | * idle tree. |
| 1155 | * |
Paolo Valente | 5bf8590 | 2018-12-06 19:18:19 +0100 | [diff] [blame] | 1156 | * If necessary and allowed, puts entity into the idle tree. NOTE: |
| 1157 | * entity may be on no tree if in service. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1158 | */ |
| 1159 | bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) |
| 1160 | { |
| 1161 | struct bfq_sched_data *sd = entity->sched_data; |
Paolo Valente | a66c38a | 2017-05-09 11:37:27 +0200 | [diff] [blame] | 1162 | struct bfq_service_tree *st; |
| 1163 | bool is_in_service; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1164 | |
Paolo Valente | 33a16a9 | 2020-02-03 11:40:57 +0100 | [diff] [blame] | 1165 | if (!entity->on_st_or_in_serv) /* |
| 1166 | * entity never activated, or |
| 1167 | * already inactive |
| 1168 | */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1169 | return false; |
| 1170 | |
Paolo Valente | a66c38a | 2017-05-09 11:37:27 +0200 | [diff] [blame] | 1171 | /* |
| 1172 | * If we get here, then entity is active, which implies that |
| 1173 | * bfq_group_set_parent has already been invoked for the group |
| 1174 | * represented by entity. Therefore, the field |
| 1175 | * entity->sched_data has been set, and we can safely use it. |
| 1176 | */ |
| 1177 | st = bfq_entity_service_tree(entity); |
| 1178 | is_in_service = entity == sd->in_service_entity; |
| 1179 | |
Paolo Valente | cbeb869 | 2018-09-14 16:23:07 +0200 | [diff] [blame] | 1180 | bfq_calc_finish(entity, entity->service); |
| 1181 | |
| 1182 | if (is_in_service) |
Paolo Valente | 6ab1d8d | 2017-07-28 21:41:18 +0200 | [diff] [blame] | 1183 | sd->in_service_entity = NULL; |
Paolo Valente | cbeb869 | 2018-09-14 16:23:07 +0200 | [diff] [blame] | 1184 | else |
| 1185 | /* |
| 1186 | * Non in-service entity: nobody will take care of |
| 1187 | * resetting its service counter on expiration. Do it |
| 1188 | * now. |
| 1189 | */ |
| 1190 | entity->service = 0; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1191 | |
| 1192 | if (entity->tree == &st->active) |
| 1193 | bfq_active_extract(st, entity); |
| 1194 | else if (!is_in_service && entity->tree == &st->idle) |
| 1195 | bfq_idle_extract(st, entity); |
| 1196 | |
| 1197 | if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime)) |
| 1198 | bfq_forget_entity(st, entity, is_in_service); |
| 1199 | else |
| 1200 | bfq_idle_insert(st, entity); |
| 1201 | |
| 1202 | return true; |
| 1203 | } |
| 1204 | |
| 1205 | /** |
| 1206 | * bfq_deactivate_entity - deactivate an entity representing a bfq_queue. |
| 1207 | * @entity: the entity to deactivate. |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 1208 | * @ins_into_idle_tree: true if the entity can be put into the idle tree |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1209 | * @expiration: true if this function is being invoked in the expiration path |
| 1210 | * of the in-service queue |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1211 | */ |
| 1212 | static void bfq_deactivate_entity(struct bfq_entity *entity, |
| 1213 | bool ins_into_idle_tree, |
| 1214 | bool expiration) |
| 1215 | { |
| 1216 | struct bfq_sched_data *sd; |
| 1217 | struct bfq_entity *parent = NULL; |
| 1218 | |
| 1219 | for_each_entity_safe(entity, parent) { |
| 1220 | sd = entity->sched_data; |
| 1221 | |
| 1222 | if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) { |
| 1223 | /* |
| 1224 | * entity is not in any tree any more, so |
| 1225 | * this deactivation is a no-op, and there is |
| 1226 | * nothing to change for upper-level entities |
| 1227 | * (in case of expiration, this can never |
| 1228 | * happen). |
| 1229 | */ |
| 1230 | return; |
| 1231 | } |
| 1232 | |
| 1233 | if (sd->next_in_service == entity) |
| 1234 | /* |
| 1235 | * entity was the next_in_service entity, |
| 1236 | * then, since entity has just been |
| 1237 | * deactivated, a new one must be found. |
| 1238 | */ |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1239 | bfq_update_next_in_service(sd, NULL, expiration); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1240 | |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 1241 | if (sd->next_in_service || sd->in_service_entity) { |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1242 | /* |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 1243 | * The parent entity is still active, because |
| 1244 | * either next_in_service or in_service_entity |
| 1245 | * is not NULL. So, no further upwards |
| 1246 | * deactivation must be performed. Yet, |
| 1247 | * next_in_service has changed. Then the |
| 1248 | * schedule does need to be updated upwards. |
| 1249 | * |
| 1250 | * NOTE If in_service_entity is not NULL, then |
| 1251 | * next_in_service may happen to be NULL, |
| 1252 | * although the parent entity is evidently |
| 1253 | * active. This happens if 1) the entity |
| 1254 | * pointed by in_service_entity is the only |
| 1255 | * active entity in the parent entity, and 2) |
| 1256 | * according to the definition of |
| 1257 | * next_in_service, the in_service_entity |
| 1258 | * cannot be considered as |
| 1259 | * next_in_service. See the comments on the |
| 1260 | * definition of next_in_service for details. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1261 | */ |
| 1262 | break; |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 1263 | } |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1264 | |
| 1265 | /* |
| 1266 | * If we get here, then the parent is no more |
| 1267 | * backlogged and we need to propagate the |
| 1268 | * deactivation upwards. Thus let the loop go on. |
| 1269 | */ |
| 1270 | |
| 1271 | /* |
| 1272 | * Also let parent be queued into the idle tree on |
| 1273 | * deactivation, to preserve service guarantees, and |
| 1274 | * assuming that who invoked this function does not |
| 1275 | * need parent entities too to be removed completely. |
| 1276 | */ |
| 1277 | ins_into_idle_tree = true; |
| 1278 | } |
| 1279 | |
| 1280 | /* |
| 1281 | * If the deactivation loop is fully executed, then there are |
| 1282 | * no more entities to touch and next loop is not executed at |
| 1283 | * all. Otherwise, requeue remaining entities if they are |
| 1284 | * about to stop receiving service, or reposition them if this |
| 1285 | * is not the case. |
| 1286 | */ |
| 1287 | entity = parent; |
| 1288 | for_each_entity(entity) { |
| 1289 | /* |
| 1290 | * Invoke __bfq_requeue_entity on entity, even if |
| 1291 | * already active, to requeue/reposition it in the |
| 1292 | * active tree (because sd->next_in_service has |
| 1293 | * changed) |
| 1294 | */ |
| 1295 | __bfq_requeue_entity(entity); |
| 1296 | |
| 1297 | sd = entity->sched_data; |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1298 | if (!bfq_update_next_in_service(sd, entity, expiration) && |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1299 | !expiration) |
| 1300 | /* |
| 1301 | * next_in_service unchanged or not causing |
| 1302 | * any change in entity->parent->sd, and no |
| 1303 | * requeueing needed for expiration: stop |
| 1304 | * here. |
| 1305 | */ |
| 1306 | break; |
| 1307 | } |
| 1308 | } |
| 1309 | |
| 1310 | /** |
| 1311 | * bfq_calc_vtime_jump - compute the value to which the vtime should jump, |
| 1312 | * if needed, to have at least one entity eligible. |
| 1313 | * @st: the service tree to act upon. |
| 1314 | * |
| 1315 | * Assumes that st is not empty. |
| 1316 | */ |
| 1317 | static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st) |
| 1318 | { |
| 1319 | struct bfq_entity *root_entity = bfq_root_active_entity(&st->active); |
| 1320 | |
| 1321 | if (bfq_gt(root_entity->min_start, st->vtime)) |
| 1322 | return root_entity->min_start; |
| 1323 | |
| 1324 | return st->vtime; |
| 1325 | } |
| 1326 | |
| 1327 | static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value) |
| 1328 | { |
| 1329 | if (new_value > st->vtime) { |
| 1330 | st->vtime = new_value; |
| 1331 | bfq_forget_idle(st); |
| 1332 | } |
| 1333 | } |
| 1334 | |
| 1335 | /** |
| 1336 | * bfq_first_active_entity - find the eligible entity with |
| 1337 | * the smallest finish time |
| 1338 | * @st: the service tree to select from. |
| 1339 | * @vtime: the system virtual to use as a reference for eligibility |
| 1340 | * |
| 1341 | * This function searches the first schedulable entity, starting from the |
| 1342 | * root of the tree and going on the left every time on this side there is |
Hou Tao | 38c9140 | 2017-07-12 15:25:01 +0800 | [diff] [blame] | 1343 | * a subtree with at least one eligible (start <= vtime) entity. The path on |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1344 | * the right is followed only if a) the left subtree contains no eligible |
| 1345 | * entities and b) no eligible entity has been found yet. |
| 1346 | */ |
| 1347 | static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st, |
| 1348 | u64 vtime) |
| 1349 | { |
| 1350 | struct bfq_entity *entry, *first = NULL; |
| 1351 | struct rb_node *node = st->active.rb_node; |
| 1352 | |
| 1353 | while (node) { |
| 1354 | entry = rb_entry(node, struct bfq_entity, rb_node); |
| 1355 | left: |
| 1356 | if (!bfq_gt(entry->start, vtime)) |
| 1357 | first = entry; |
| 1358 | |
| 1359 | if (node->rb_left) { |
| 1360 | entry = rb_entry(node->rb_left, |
| 1361 | struct bfq_entity, rb_node); |
| 1362 | if (!bfq_gt(entry->min_start, vtime)) { |
| 1363 | node = node->rb_left; |
| 1364 | goto left; |
| 1365 | } |
| 1366 | } |
| 1367 | if (first) |
| 1368 | break; |
| 1369 | node = node->rb_right; |
| 1370 | } |
| 1371 | |
| 1372 | return first; |
| 1373 | } |
| 1374 | |
| 1375 | /** |
| 1376 | * __bfq_lookup_next_entity - return the first eligible entity in @st. |
| 1377 | * @st: the service tree. |
| 1378 | * |
| 1379 | * If there is no in-service entity for the sched_data st belongs to, |
| 1380 | * then return the entity that will be set in service if: |
| 1381 | * 1) the parent entity this st belongs to is set in service; |
| 1382 | * 2) no entity belonging to such parent entity undergoes a state change |
| 1383 | * that would influence the timestamps of the entity (e.g., becomes idle, |
| 1384 | * becomes backlogged, changes its budget, ...). |
| 1385 | * |
| 1386 | * In this first case, update the virtual time in @st too (see the |
| 1387 | * comments on this update inside the function). |
| 1388 | * |
Angelo Ruocco | 636b8fe | 2019-04-08 17:35:34 +0200 | [diff] [blame] | 1389 | * In contrast, if there is an in-service entity, then return the |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1390 | * entity that would be set in service if not only the above |
| 1391 | * conditions, but also the next one held true: the currently |
| 1392 | * in-service entity, on expiration, |
| 1393 | * 1) gets a finish time equal to the current one, or |
| 1394 | * 2) is not eligible any more, or |
| 1395 | * 3) is idle. |
| 1396 | */ |
| 1397 | static struct bfq_entity * |
| 1398 | __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service) |
| 1399 | { |
| 1400 | struct bfq_entity *entity; |
| 1401 | u64 new_vtime; |
| 1402 | |
| 1403 | if (RB_EMPTY_ROOT(&st->active)) |
| 1404 | return NULL; |
| 1405 | |
| 1406 | /* |
| 1407 | * Get the value of the system virtual time for which at |
| 1408 | * least one entity is eligible. |
| 1409 | */ |
| 1410 | new_vtime = bfq_calc_vtime_jump(st); |
| 1411 | |
| 1412 | /* |
| 1413 | * If there is no in-service entity for the sched_data this |
| 1414 | * active tree belongs to, then push the system virtual time |
| 1415 | * up to the value that guarantees that at least one entity is |
| 1416 | * eligible. If, instead, there is an in-service entity, then |
| 1417 | * do not make any such update, because there is already an |
| 1418 | * eligible entity, namely the in-service one (even if the |
| 1419 | * entity is not on st, because it was extracted when set in |
| 1420 | * service). |
| 1421 | */ |
| 1422 | if (!in_service) |
| 1423 | bfq_update_vtime(st, new_vtime); |
| 1424 | |
| 1425 | entity = bfq_first_active_entity(st, new_vtime); |
| 1426 | |
| 1427 | return entity; |
| 1428 | } |
| 1429 | |
| 1430 | /** |
| 1431 | * bfq_lookup_next_entity - return the first eligible entity in @sd. |
| 1432 | * @sd: the sched_data. |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1433 | * @expiration: true if we are on the expiration path of the in-service queue |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1434 | * |
| 1435 | * This function is invoked when there has been a change in the trees |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1436 | * for sd, and we need to know what is the new next entity to serve |
| 1437 | * after this change. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1438 | */ |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1439 | static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, |
| 1440 | bool expiration) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1441 | { |
| 1442 | struct bfq_service_tree *st = sd->service_tree; |
| 1443 | struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1); |
| 1444 | struct bfq_entity *entity = NULL; |
| 1445 | int class_idx = 0; |
| 1446 | |
| 1447 | /* |
| 1448 | * Choose from idle class, if needed to guarantee a minimum |
| 1449 | * bandwidth to this class (and if there is some active entity |
| 1450 | * in idle class). This should also mitigate |
| 1451 | * priority-inversion problems in case a low priority task is |
| 1452 | * holding file system resources. |
| 1453 | */ |
| 1454 | if (time_is_before_jiffies(sd->bfq_class_idle_last_service + |
| 1455 | BFQ_CL_IDLE_TIMEOUT)) { |
| 1456 | if (!RB_EMPTY_ROOT(&idle_class_st->active)) |
| 1457 | class_idx = BFQ_IOPRIO_CLASSES - 1; |
| 1458 | /* About to be served if backlogged, or not yet backlogged */ |
| 1459 | sd->bfq_class_idle_last_service = jiffies; |
| 1460 | } |
| 1461 | |
| 1462 | /* |
| 1463 | * Find the next entity to serve for the highest-priority |
| 1464 | * class, unless the idle class needs to be served. |
| 1465 | */ |
| 1466 | for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) { |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1467 | /* |
| 1468 | * If expiration is true, then bfq_lookup_next_entity |
| 1469 | * is being invoked as a part of the expiration path |
| 1470 | * of the in-service queue. In this case, even if |
| 1471 | * sd->in_service_entity is not NULL, |
Angelo Ruocco | 636b8fe | 2019-04-08 17:35:34 +0200 | [diff] [blame] | 1472 | * sd->in_service_entity at this point is actually not |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1473 | * in service any more, and, if needed, has already |
| 1474 | * been properly queued or requeued into the right |
| 1475 | * tree. The reason why sd->in_service_entity is still |
| 1476 | * not NULL here, even if expiration is true, is that |
Angelo Ruocco | 636b8fe | 2019-04-08 17:35:34 +0200 | [diff] [blame] | 1477 | * sd->in_service_entity is reset as a last step in the |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1478 | * expiration path. So, if expiration is true, tell |
| 1479 | * __bfq_lookup_next_entity that there is no |
| 1480 | * sd->in_service_entity. |
| 1481 | */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1482 | entity = __bfq_lookup_next_entity(st + class_idx, |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1483 | sd->in_service_entity && |
| 1484 | !expiration); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1485 | |
| 1486 | if (entity) |
| 1487 | break; |
| 1488 | } |
| 1489 | |
| 1490 | if (!entity) |
| 1491 | return NULL; |
| 1492 | |
| 1493 | return entity; |
| 1494 | } |
| 1495 | |
| 1496 | bool next_queue_may_preempt(struct bfq_data *bfqd) |
| 1497 | { |
| 1498 | struct bfq_sched_data *sd = &bfqd->root_group->sched_data; |
| 1499 | |
| 1500 | return sd->next_in_service != sd->in_service_entity; |
| 1501 | } |
| 1502 | |
| 1503 | /* |
| 1504 | * Get next queue for service. |
| 1505 | */ |
| 1506 | struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) |
| 1507 | { |
| 1508 | struct bfq_entity *entity = NULL; |
| 1509 | struct bfq_sched_data *sd; |
| 1510 | struct bfq_queue *bfqq; |
| 1511 | |
Paolo Valente | 73d5811 | 2019-01-29 12:06:29 +0100 | [diff] [blame] | 1512 | if (bfq_tot_busy_queues(bfqd) == 0) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1513 | return NULL; |
| 1514 | |
| 1515 | /* |
| 1516 | * Traverse the path from the root to the leaf entity to |
| 1517 | * serve. Set in service all the entities visited along the |
| 1518 | * way. |
| 1519 | */ |
| 1520 | sd = &bfqd->root_group->sched_data; |
| 1521 | for (; sd ; sd = entity->my_sched_data) { |
| 1522 | /* |
| 1523 | * WARNING. We are about to set the in-service entity |
| 1524 | * to sd->next_in_service, i.e., to the (cached) value |
| 1525 | * returned by bfq_lookup_next_entity(sd) the last |
| 1526 | * time it was invoked, i.e., the last time when the |
| 1527 | * service order in sd changed as a consequence of the |
| 1528 | * activation or deactivation of an entity. In this |
| 1529 | * respect, if we execute bfq_lookup_next_entity(sd) |
| 1530 | * in this very moment, it may, although with low |
| 1531 | * probability, yield a different entity than that |
| 1532 | * pointed to by sd->next_in_service. This rare event |
| 1533 | * happens in case there was no CLASS_IDLE entity to |
| 1534 | * serve for sd when bfq_lookup_next_entity(sd) was |
| 1535 | * invoked for the last time, while there is now one |
| 1536 | * such entity. |
| 1537 | * |
| 1538 | * If the above event happens, then the scheduling of |
| 1539 | * such entity in CLASS_IDLE is postponed until the |
| 1540 | * service of the sd->next_in_service entity |
| 1541 | * finishes. In fact, when the latter is expired, |
| 1542 | * bfq_lookup_next_entity(sd) gets called again, |
| 1543 | * exactly to update sd->next_in_service. |
| 1544 | */ |
| 1545 | |
| 1546 | /* Make next_in_service entity become in_service_entity */ |
| 1547 | entity = sd->next_in_service; |
| 1548 | sd->in_service_entity = entity; |
| 1549 | |
| 1550 | /* |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1551 | * If entity is no longer a candidate for next |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 1552 | * service, then it must be extracted from its active |
| 1553 | * tree, so as to make sure that it won't be |
| 1554 | * considered when computing next_in_service. See the |
| 1555 | * comments on the function |
| 1556 | * bfq_no_longer_next_in_service() for details. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1557 | */ |
| 1558 | if (bfq_no_longer_next_in_service(entity)) |
| 1559 | bfq_active_extract(bfq_entity_service_tree(entity), |
| 1560 | entity); |
| 1561 | |
| 1562 | /* |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 1563 | * Even if entity is not to be extracted according to |
| 1564 | * the above check, a descendant entity may get |
| 1565 | * extracted in one of the next iterations of this |
| 1566 | * loop. Such an event could cause a change in |
| 1567 | * next_in_service for the level of the descendant |
| 1568 | * entity, and thus possibly back to this level. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1569 | * |
Paolo Valente | 46d556e | 2017-07-29 12:42:56 +0200 | [diff] [blame] | 1570 | * However, we cannot perform the resulting needed |
| 1571 | * update of next_in_service for this level before the |
| 1572 | * end of the whole loop, because, to know which is |
| 1573 | * the correct next-to-serve candidate entity for each |
| 1574 | * level, we need first to find the leaf entity to set |
| 1575 | * in service. In fact, only after we know which is |
| 1576 | * the next-to-serve leaf entity, we can discover |
| 1577 | * whether the parent entity of the leaf entity |
| 1578 | * becomes the next-to-serve, and so on. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1579 | */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1580 | } |
| 1581 | |
| 1582 | bfqq = bfq_entity_to_bfqq(entity); |
| 1583 | |
| 1584 | /* |
| 1585 | * We can finally update all next-to-serve entities along the |
| 1586 | * path from the leaf entity just set in service to the root. |
| 1587 | */ |
| 1588 | for_each_entity(entity) { |
| 1589 | struct bfq_sched_data *sd = entity->sched_data; |
| 1590 | |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1591 | if (!bfq_update_next_in_service(sd, NULL, false)) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1592 | break; |
| 1593 | } |
| 1594 | |
| 1595 | return bfqq; |
| 1596 | } |
| 1597 | |
Paolo Valente | eed47d1 | 2019-04-10 10:38:33 +0200 | [diff] [blame] | 1598 | /* returns true if the in-service queue gets freed */ |
| 1599 | bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1600 | { |
| 1601 | struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue; |
| 1602 | struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity; |
| 1603 | struct bfq_entity *entity = in_serv_entity; |
| 1604 | |
| 1605 | bfq_clear_bfqq_wait_request(in_serv_bfqq); |
| 1606 | hrtimer_try_to_cancel(&bfqd->idle_slice_timer); |
| 1607 | bfqd->in_service_queue = NULL; |
| 1608 | |
| 1609 | /* |
| 1610 | * When this function is called, all in-service entities have |
| 1611 | * been properly deactivated or requeued, so we can safely |
| 1612 | * execute the final step: reset in_service_entity along the |
| 1613 | * path from entity to the root. |
| 1614 | */ |
| 1615 | for_each_entity(entity) |
| 1616 | entity->sched_data->in_service_entity = NULL; |
| 1617 | |
| 1618 | /* |
| 1619 | * in_serv_entity is no longer in service, so, if it is in no |
| 1620 | * service tree either, then release the service reference to |
| 1621 | * the queue it represents (taken with bfq_get_entity). |
| 1622 | */ |
Paolo Valente | 33a16a9 | 2020-02-03 11:40:57 +0100 | [diff] [blame] | 1623 | if (!in_serv_entity->on_st_or_in_serv) { |
Paolo Valente | eed47d1 | 2019-04-10 10:38:33 +0200 | [diff] [blame] | 1624 | /* |
| 1625 | * If no process is referencing in_serv_bfqq any |
| 1626 | * longer, then the service reference may be the only |
| 1627 | * reference to the queue. If this is the case, then |
| 1628 | * bfqq gets freed here. |
| 1629 | */ |
| 1630 | int ref = in_serv_bfqq->ref; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1631 | bfq_put_queue(in_serv_bfqq); |
Paolo Valente | eed47d1 | 2019-04-10 10:38:33 +0200 | [diff] [blame] | 1632 | if (ref == 1) |
| 1633 | return true; |
| 1634 | } |
| 1635 | |
| 1636 | return false; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1637 | } |
| 1638 | |
| 1639 | void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 1640 | bool ins_into_idle_tree, bool expiration) |
| 1641 | { |
| 1642 | struct bfq_entity *entity = &bfqq->entity; |
| 1643 | |
| 1644 | bfq_deactivate_entity(entity, ins_into_idle_tree, expiration); |
| 1645 | } |
| 1646 | |
| 1647 | void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
| 1648 | { |
| 1649 | struct bfq_entity *entity = &bfqq->entity; |
| 1650 | |
| 1651 | bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq), |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1652 | false, false); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1653 | bfq_clear_bfqq_non_blocking_wait_rq(bfqq); |
| 1654 | } |
| 1655 | |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1656 | void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 1657 | bool expiration) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1658 | { |
| 1659 | struct bfq_entity *entity = &bfqq->entity; |
| 1660 | |
| 1661 | bfq_activate_requeue_entity(entity, false, |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 1662 | bfqq == bfqd->in_service_queue, expiration); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1663 | } |
| 1664 | |
| 1665 | /* |
| 1666 | * Called when the bfqq no longer has requests pending, remove it from |
| 1667 | * the service tree. As a special case, it can be invoked during an |
| 1668 | * expiration. |
| 1669 | */ |
| 1670 | void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 1671 | bool expiration) |
| 1672 | { |
| 1673 | bfq_log_bfqq(bfqd, bfqq, "del from busy"); |
| 1674 | |
| 1675 | bfq_clear_bfqq_busy(bfqq); |
| 1676 | |
Paolo Valente | 73d5811 | 2019-01-29 12:06:29 +0100 | [diff] [blame] | 1677 | bfqd->busy_queues[bfqq->ioprio_class - 1]--; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1678 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1679 | if (bfqq->wr_coeff > 1) |
| 1680 | bfqd->wr_busy_queues--; |
| 1681 | |
| 1682 | bfqg_stats_update_dequeue(bfqq_group(bfqq)); |
| 1683 | |
| 1684 | bfq_deactivate_bfqq(bfqd, bfqq, true, expiration); |
Paolo Valente | 9dee8b3 | 2019-01-29 12:06:34 +0100 | [diff] [blame] | 1685 | |
| 1686 | if (!bfqq->dispatched) |
| 1687 | bfq_weights_tree_remove(bfqd, bfqq); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1688 | } |
| 1689 | |
| 1690 | /* |
| 1691 | * Called when an inactive queue receives a new request. |
| 1692 | */ |
| 1693 | void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
| 1694 | { |
| 1695 | bfq_log_bfqq(bfqd, bfqq, "add to busy"); |
| 1696 | |
| 1697 | bfq_activate_bfqq(bfqd, bfqq); |
| 1698 | |
| 1699 | bfq_mark_bfqq_busy(bfqq); |
Paolo Valente | 73d5811 | 2019-01-29 12:06:29 +0100 | [diff] [blame] | 1700 | bfqd->busy_queues[bfqq->ioprio_class - 1]++; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1701 | |
| 1702 | if (!bfqq->dispatched) |
| 1703 | if (bfqq->wr_coeff == 1) |
Federico Motta | 2d29c9f | 2018-10-12 11:55:57 +0200 | [diff] [blame] | 1704 | bfq_weights_tree_add(bfqd, bfqq, |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1705 | &bfqd->queue_weights_tree); |
| 1706 | |
| 1707 | if (bfqq->wr_coeff > 1) |
| 1708 | bfqd->wr_busy_queues++; |
Paolo Valente | 2ec5a5c | 2021-03-04 18:46:22 +0100 | [diff] [blame] | 1709 | |
| 1710 | /* Move bfqq to the head of the woken list of its waker */ |
| 1711 | if (!hlist_unhashed(&bfqq->woken_list_node) && |
| 1712 | &bfqq->woken_list_node != bfqq->waker_bfqq->woken_list.first) { |
| 1713 | hlist_del_init(&bfqq->woken_list_node); |
| 1714 | hlist_add_head(&bfqq->woken_list_node, |
| 1715 | &bfqq->waker_bfqq->woken_list); |
| 1716 | } |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1717 | } |