Christoph Hellwig | a497ee3 | 2019-04-30 14:42:40 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 2 | /* |
| 3 | * cgroups support for the BFQ I/O scheduler. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 4 | */ |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/slab.h> |
| 7 | #include <linux/blkdev.h> |
| 8 | #include <linux/cgroup.h> |
| 9 | #include <linux/elevator.h> |
| 10 | #include <linux/ktime.h> |
| 11 | #include <linux/rbtree.h> |
| 12 | #include <linux/ioprio.h> |
| 13 | #include <linux/sbitmap.h> |
| 14 | #include <linux/delay.h> |
| 15 | |
| 16 | #include "bfq-iosched.h" |
| 17 | |
Christoph Hellwig | 8060c47 | 2019-06-06 12:26:24 +0200 | [diff] [blame] | 18 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 19 | static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp) |
| 20 | { |
| 21 | int ret; |
| 22 | |
| 23 | ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); |
| 24 | if (ret) |
| 25 | return ret; |
| 26 | |
| 27 | atomic64_set(&stat->aux_cnt, 0); |
| 28 | return 0; |
| 29 | } |
| 30 | |
| 31 | static void bfq_stat_exit(struct bfq_stat *stat) |
| 32 | { |
| 33 | percpu_counter_destroy(&stat->cpu_cnt); |
| 34 | } |
| 35 | |
| 36 | /** |
| 37 | * bfq_stat_add - add a value to a bfq_stat |
| 38 | * @stat: target bfq_stat |
| 39 | * @val: value to add |
| 40 | * |
| 41 | * Add @val to @stat. The caller must ensure that IRQ on the same CPU |
| 42 | * don't re-enter this function for the same counter. |
| 43 | */ |
| 44 | static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val) |
| 45 | { |
| 46 | percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); |
| 47 | } |
| 48 | |
| 49 | /** |
| 50 | * bfq_stat_read - read the current value of a bfq_stat |
| 51 | * @stat: bfq_stat to read |
| 52 | */ |
| 53 | static inline uint64_t bfq_stat_read(struct bfq_stat *stat) |
| 54 | { |
| 55 | return percpu_counter_sum_positive(&stat->cpu_cnt); |
| 56 | } |
| 57 | |
| 58 | /** |
| 59 | * bfq_stat_reset - reset a bfq_stat |
| 60 | * @stat: bfq_stat to reset |
| 61 | */ |
| 62 | static inline void bfq_stat_reset(struct bfq_stat *stat) |
| 63 | { |
| 64 | percpu_counter_set(&stat->cpu_cnt, 0); |
| 65 | atomic64_set(&stat->aux_cnt, 0); |
| 66 | } |
| 67 | |
| 68 | /** |
| 69 | * bfq_stat_add_aux - add a bfq_stat into another's aux count |
| 70 | * @to: the destination bfq_stat |
| 71 | * @from: the source |
| 72 | * |
| 73 | * Add @from's count including the aux one to @to's aux count. |
| 74 | */ |
| 75 | static inline void bfq_stat_add_aux(struct bfq_stat *to, |
| 76 | struct bfq_stat *from) |
| 77 | { |
| 78 | atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt), |
| 79 | &to->aux_cnt); |
| 80 | } |
| 81 | |
| 82 | /** |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 83 | * blkg_prfill_stat - prfill callback for bfq_stat |
| 84 | * @sf: seq_file to print to |
| 85 | * @pd: policy private data of interest |
| 86 | * @off: offset to the bfq_stat in @pd |
| 87 | * |
| 88 | * prfill callback for printing a bfq_stat. |
| 89 | */ |
| 90 | static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, |
| 91 | int off) |
| 92 | { |
| 93 | return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off)); |
| 94 | } |
| 95 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 96 | /* bfqg stats flags */ |
| 97 | enum bfqg_stats_flags { |
| 98 | BFQG_stats_waiting = 0, |
| 99 | BFQG_stats_idling, |
| 100 | BFQG_stats_empty, |
| 101 | }; |
| 102 | |
| 103 | #define BFQG_FLAG_FNS(name) \ |
| 104 | static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \ |
| 105 | { \ |
| 106 | stats->flags |= (1 << BFQG_stats_##name); \ |
| 107 | } \ |
| 108 | static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \ |
| 109 | { \ |
| 110 | stats->flags &= ~(1 << BFQG_stats_##name); \ |
| 111 | } \ |
| 112 | static int bfqg_stats_##name(struct bfqg_stats *stats) \ |
| 113 | { \ |
| 114 | return (stats->flags & (1 << BFQG_stats_##name)) != 0; \ |
| 115 | } \ |
| 116 | |
| 117 | BFQG_FLAG_FNS(waiting) |
| 118 | BFQG_FLAG_FNS(idling) |
| 119 | BFQG_FLAG_FNS(empty) |
| 120 | #undef BFQG_FLAG_FNS |
| 121 | |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 122 | /* This should be called with the scheduler lock held. */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 123 | static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) |
| 124 | { |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 125 | u64 now; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 126 | |
| 127 | if (!bfqg_stats_waiting(stats)) |
| 128 | return; |
| 129 | |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 130 | now = ktime_get_ns(); |
| 131 | if (now > stats->start_group_wait_time) |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 132 | bfq_stat_add(&stats->group_wait_time, |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 133 | now - stats->start_group_wait_time); |
| 134 | bfqg_stats_clear_waiting(stats); |
| 135 | } |
| 136 | |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 137 | /* This should be called with the scheduler lock held. */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 138 | static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, |
| 139 | struct bfq_group *curr_bfqg) |
| 140 | { |
| 141 | struct bfqg_stats *stats = &bfqg->stats; |
| 142 | |
| 143 | if (bfqg_stats_waiting(stats)) |
| 144 | return; |
| 145 | if (bfqg == curr_bfqg) |
| 146 | return; |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 147 | stats->start_group_wait_time = ktime_get_ns(); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 148 | bfqg_stats_mark_waiting(stats); |
| 149 | } |
| 150 | |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 151 | /* This should be called with the scheduler lock held. */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 152 | static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) |
| 153 | { |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 154 | u64 now; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 155 | |
| 156 | if (!bfqg_stats_empty(stats)) |
| 157 | return; |
| 158 | |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 159 | now = ktime_get_ns(); |
| 160 | if (now > stats->start_empty_time) |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 161 | bfq_stat_add(&stats->empty_time, |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 162 | now - stats->start_empty_time); |
| 163 | bfqg_stats_clear_empty(stats); |
| 164 | } |
| 165 | |
| 166 | void bfqg_stats_update_dequeue(struct bfq_group *bfqg) |
| 167 | { |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 168 | bfq_stat_add(&bfqg->stats.dequeue, 1); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 169 | } |
| 170 | |
| 171 | void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) |
| 172 | { |
| 173 | struct bfqg_stats *stats = &bfqg->stats; |
| 174 | |
| 175 | if (blkg_rwstat_total(&stats->queued)) |
| 176 | return; |
| 177 | |
| 178 | /* |
| 179 | * group is already marked empty. This can happen if bfqq got new |
| 180 | * request in parent group and moved to this group while being added |
| 181 | * to service tree. Just ignore the event and move on. |
| 182 | */ |
| 183 | if (bfqg_stats_empty(stats)) |
| 184 | return; |
| 185 | |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 186 | stats->start_empty_time = ktime_get_ns(); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 187 | bfqg_stats_mark_empty(stats); |
| 188 | } |
| 189 | |
| 190 | void bfqg_stats_update_idle_time(struct bfq_group *bfqg) |
| 191 | { |
| 192 | struct bfqg_stats *stats = &bfqg->stats; |
| 193 | |
| 194 | if (bfqg_stats_idling(stats)) { |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 195 | u64 now = ktime_get_ns(); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 196 | |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 197 | if (now > stats->start_idle_time) |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 198 | bfq_stat_add(&stats->idle_time, |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 199 | now - stats->start_idle_time); |
| 200 | bfqg_stats_clear_idling(stats); |
| 201 | } |
| 202 | } |
| 203 | |
| 204 | void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) |
| 205 | { |
| 206 | struct bfqg_stats *stats = &bfqg->stats; |
| 207 | |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 208 | stats->start_idle_time = ktime_get_ns(); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 209 | bfqg_stats_mark_idling(stats); |
| 210 | } |
| 211 | |
| 212 | void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) |
| 213 | { |
| 214 | struct bfqg_stats *stats = &bfqg->stats; |
| 215 | |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 216 | bfq_stat_add(&stats->avg_queue_size_sum, |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 217 | blkg_rwstat_total(&stats->queued)); |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 218 | bfq_stat_add(&stats->avg_queue_size_samples, 1); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 219 | bfqg_stats_update_group_wait_time(stats); |
| 220 | } |
| 221 | |
Luca Miccio | a33801e | 2017-11-13 07:34:10 +0100 | [diff] [blame] | 222 | void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, |
| 223 | unsigned int op) |
| 224 | { |
| 225 | blkg_rwstat_add(&bfqg->stats.queued, op, 1); |
| 226 | bfqg_stats_end_empty_time(&bfqg->stats); |
| 227 | if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) |
| 228 | bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); |
| 229 | } |
| 230 | |
| 231 | void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) |
| 232 | { |
| 233 | blkg_rwstat_add(&bfqg->stats.queued, op, -1); |
| 234 | } |
| 235 | |
| 236 | void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) |
| 237 | { |
| 238 | blkg_rwstat_add(&bfqg->stats.merged, op, 1); |
| 239 | } |
| 240 | |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 241 | void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, |
| 242 | u64 io_start_time_ns, unsigned int op) |
Luca Miccio | a33801e | 2017-11-13 07:34:10 +0100 | [diff] [blame] | 243 | { |
| 244 | struct bfqg_stats *stats = &bfqg->stats; |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 245 | u64 now = ktime_get_ns(); |
Luca Miccio | a33801e | 2017-11-13 07:34:10 +0100 | [diff] [blame] | 246 | |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 247 | if (now > io_start_time_ns) |
Luca Miccio | a33801e | 2017-11-13 07:34:10 +0100 | [diff] [blame] | 248 | blkg_rwstat_add(&stats->service_time, op, |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 249 | now - io_start_time_ns); |
| 250 | if (io_start_time_ns > start_time_ns) |
Luca Miccio | a33801e | 2017-11-13 07:34:10 +0100 | [diff] [blame] | 251 | blkg_rwstat_add(&stats->wait_time, op, |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 252 | io_start_time_ns - start_time_ns); |
Luca Miccio | a33801e | 2017-11-13 07:34:10 +0100 | [diff] [blame] | 253 | } |
| 254 | |
Christoph Hellwig | 8060c47 | 2019-06-06 12:26:24 +0200 | [diff] [blame] | 255 | #else /* CONFIG_BFQ_CGROUP_DEBUG */ |
Luca Miccio | a33801e | 2017-11-13 07:34:10 +0100 | [diff] [blame] | 256 | |
| 257 | void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, |
| 258 | unsigned int op) { } |
| 259 | void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { } |
| 260 | void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { } |
Omar Sandoval | 84c7afc | 2018-05-09 02:08:51 -0700 | [diff] [blame] | 261 | void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, |
| 262 | u64 io_start_time_ns, unsigned int op) { } |
Luca Miccio | a33801e | 2017-11-13 07:34:10 +0100 | [diff] [blame] | 263 | void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } |
| 264 | void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { } |
| 265 | void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { } |
| 266 | void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } |
| 267 | void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { } |
| 268 | |
Christoph Hellwig | 8060c47 | 2019-06-06 12:26:24 +0200 | [diff] [blame] | 269 | #endif /* CONFIG_BFQ_CGROUP_DEBUG */ |
Luca Miccio | a33801e | 2017-11-13 07:34:10 +0100 | [diff] [blame] | 270 | |
| 271 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 272 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 273 | /* |
| 274 | * blk-cgroup policy-related handlers |
| 275 | * The following functions help in converting between blk-cgroup |
| 276 | * internal structures and BFQ-specific structures. |
| 277 | */ |
| 278 | |
| 279 | static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd) |
| 280 | { |
| 281 | return pd ? container_of(pd, struct bfq_group, pd) : NULL; |
| 282 | } |
| 283 | |
| 284 | struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg) |
| 285 | { |
| 286 | return pd_to_blkg(&bfqg->pd); |
| 287 | } |
| 288 | |
| 289 | static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) |
| 290 | { |
| 291 | return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq)); |
| 292 | } |
| 293 | |
| 294 | /* |
| 295 | * bfq_group handlers |
| 296 | * The following functions help in navigating the bfq_group hierarchy |
| 297 | * by allowing to find the parent of a bfq_group or the bfq_group |
| 298 | * associated to a bfq_queue. |
| 299 | */ |
| 300 | |
| 301 | static struct bfq_group *bfqg_parent(struct bfq_group *bfqg) |
| 302 | { |
| 303 | struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent; |
| 304 | |
| 305 | return pblkg ? blkg_to_bfqg(pblkg) : NULL; |
| 306 | } |
| 307 | |
| 308 | struct bfq_group *bfqq_group(struct bfq_queue *bfqq) |
| 309 | { |
| 310 | struct bfq_entity *group_entity = bfqq->entity.parent; |
| 311 | |
| 312 | return group_entity ? container_of(group_entity, struct bfq_group, |
| 313 | entity) : |
| 314 | bfqq->bfqd->root_group; |
| 315 | } |
| 316 | |
| 317 | /* |
| 318 | * The following two functions handle get and put of a bfq_group by |
| 319 | * wrapping the related blk-cgroup hooks. |
| 320 | */ |
| 321 | |
| 322 | static void bfqg_get(struct bfq_group *bfqg) |
| 323 | { |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 324 | bfqg->ref++; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 325 | } |
| 326 | |
Bart Van Assche | dfb79af | 2017-08-30 11:42:08 -0700 | [diff] [blame] | 327 | static void bfqg_put(struct bfq_group *bfqg) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 328 | { |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 329 | bfqg->ref--; |
| 330 | |
| 331 | if (bfqg->ref == 0) |
| 332 | kfree(bfqg); |
| 333 | } |
| 334 | |
Dmitry Monakhov | 2de791a | 2020-08-11 06:43:40 +0000 | [diff] [blame] | 335 | static void bfqg_and_blkg_get(struct bfq_group *bfqg) |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 336 | { |
| 337 | /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ |
| 338 | bfqg_get(bfqg); |
| 339 | |
| 340 | blkg_get(bfqg_to_blkg(bfqg)); |
| 341 | } |
| 342 | |
| 343 | void bfqg_and_blkg_put(struct bfq_group *bfqg) |
| 344 | { |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 345 | blkg_put(bfqg_to_blkg(bfqg)); |
Konstantin Khlebnikov | d5274b3 | 2018-09-06 11:05:44 +0300 | [diff] [blame] | 346 | |
| 347 | bfqg_put(bfqg); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 348 | } |
| 349 | |
Tejun Heo | fd41e60 | 2019-11-07 11:18:00 -0800 | [diff] [blame] | 350 | void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq) |
| 351 | { |
| 352 | struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg); |
| 353 | |
Hou Tao | 08802ed | 2019-12-05 20:53:11 +0800 | [diff] [blame] | 354 | if (!bfqg) |
| 355 | return; |
| 356 | |
Tejun Heo | fd41e60 | 2019-11-07 11:18:00 -0800 | [diff] [blame] | 357 | blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq)); |
| 358 | blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1); |
| 359 | } |
| 360 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 361 | /* @stats = 0 */ |
| 362 | static void bfqg_stats_reset(struct bfqg_stats *stats) |
| 363 | { |
Christoph Hellwig | 8060c47 | 2019-06-06 12:26:24 +0200 | [diff] [blame] | 364 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 365 | /* queued stats shouldn't be cleared */ |
| 366 | blkg_rwstat_reset(&stats->merged); |
| 367 | blkg_rwstat_reset(&stats->service_time); |
| 368 | blkg_rwstat_reset(&stats->wait_time); |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 369 | bfq_stat_reset(&stats->time); |
| 370 | bfq_stat_reset(&stats->avg_queue_size_sum); |
| 371 | bfq_stat_reset(&stats->avg_queue_size_samples); |
| 372 | bfq_stat_reset(&stats->dequeue); |
| 373 | bfq_stat_reset(&stats->group_wait_time); |
| 374 | bfq_stat_reset(&stats->idle_time); |
| 375 | bfq_stat_reset(&stats->empty_time); |
Luca Miccio | a33801e | 2017-11-13 07:34:10 +0100 | [diff] [blame] | 376 | #endif |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 377 | } |
| 378 | |
| 379 | /* @to += @from */ |
| 380 | static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) |
| 381 | { |
| 382 | if (!to || !from) |
| 383 | return; |
| 384 | |
Christoph Hellwig | 8060c47 | 2019-06-06 12:26:24 +0200 | [diff] [blame] | 385 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 386 | /* queued stats shouldn't be cleared */ |
| 387 | blkg_rwstat_add_aux(&to->merged, &from->merged); |
| 388 | blkg_rwstat_add_aux(&to->service_time, &from->service_time); |
| 389 | blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 390 | bfq_stat_add_aux(&from->time, &from->time); |
| 391 | bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); |
| 392 | bfq_stat_add_aux(&to->avg_queue_size_samples, |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 393 | &from->avg_queue_size_samples); |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 394 | bfq_stat_add_aux(&to->dequeue, &from->dequeue); |
| 395 | bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time); |
| 396 | bfq_stat_add_aux(&to->idle_time, &from->idle_time); |
| 397 | bfq_stat_add_aux(&to->empty_time, &from->empty_time); |
Luca Miccio | a33801e | 2017-11-13 07:34:10 +0100 | [diff] [blame] | 398 | #endif |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 399 | } |
| 400 | |
| 401 | /* |
| 402 | * Transfer @bfqg's stats to its parent's aux counts so that the ancestors' |
| 403 | * recursive stats can still account for the amount used by this bfqg after |
| 404 | * it's gone. |
| 405 | */ |
| 406 | static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) |
| 407 | { |
| 408 | struct bfq_group *parent; |
| 409 | |
| 410 | if (!bfqg) /* root_group */ |
| 411 | return; |
| 412 | |
| 413 | parent = bfqg_parent(bfqg); |
| 414 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 415 | lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 416 | |
| 417 | if (unlikely(!parent)) |
| 418 | return; |
| 419 | |
| 420 | bfqg_stats_add_aux(&parent->stats, &bfqg->stats); |
| 421 | bfqg_stats_reset(&bfqg->stats); |
| 422 | } |
| 423 | |
| 424 | void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) |
| 425 | { |
| 426 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); |
| 427 | |
| 428 | entity->weight = entity->new_weight; |
| 429 | entity->orig_weight = entity->new_weight; |
| 430 | if (bfqq) { |
| 431 | bfqq->ioprio = bfqq->new_ioprio; |
| 432 | bfqq->ioprio_class = bfqq->new_ioprio_class; |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 433 | /* |
| 434 | * Make sure that bfqg and its associated blkg do not |
| 435 | * disappear before entity. |
| 436 | */ |
| 437 | bfqg_and_blkg_get(bfqg); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 438 | } |
| 439 | entity->parent = bfqg->my_entity; /* NULL for root group */ |
| 440 | entity->sched_data = &bfqg->sched_data; |
| 441 | } |
| 442 | |
| 443 | static void bfqg_stats_exit(struct bfqg_stats *stats) |
| 444 | { |
Tejun Heo | fd41e60 | 2019-11-07 11:18:00 -0800 | [diff] [blame] | 445 | blkg_rwstat_exit(&stats->bytes); |
| 446 | blkg_rwstat_exit(&stats->ios); |
Christoph Hellwig | 8060c47 | 2019-06-06 12:26:24 +0200 | [diff] [blame] | 447 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 448 | blkg_rwstat_exit(&stats->merged); |
| 449 | blkg_rwstat_exit(&stats->service_time); |
| 450 | blkg_rwstat_exit(&stats->wait_time); |
| 451 | blkg_rwstat_exit(&stats->queued); |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 452 | bfq_stat_exit(&stats->time); |
| 453 | bfq_stat_exit(&stats->avg_queue_size_sum); |
| 454 | bfq_stat_exit(&stats->avg_queue_size_samples); |
| 455 | bfq_stat_exit(&stats->dequeue); |
| 456 | bfq_stat_exit(&stats->group_wait_time); |
| 457 | bfq_stat_exit(&stats->idle_time); |
| 458 | bfq_stat_exit(&stats->empty_time); |
Luca Miccio | a33801e | 2017-11-13 07:34:10 +0100 | [diff] [blame] | 459 | #endif |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 460 | } |
| 461 | |
| 462 | static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) |
| 463 | { |
Tejun Heo | fd41e60 | 2019-11-07 11:18:00 -0800 | [diff] [blame] | 464 | if (blkg_rwstat_init(&stats->bytes, gfp) || |
| 465 | blkg_rwstat_init(&stats->ios, gfp)) |
| 466 | return -ENOMEM; |
| 467 | |
Christoph Hellwig | 8060c47 | 2019-06-06 12:26:24 +0200 | [diff] [blame] | 468 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 469 | if (blkg_rwstat_init(&stats->merged, gfp) || |
| 470 | blkg_rwstat_init(&stats->service_time, gfp) || |
| 471 | blkg_rwstat_init(&stats->wait_time, gfp) || |
| 472 | blkg_rwstat_init(&stats->queued, gfp) || |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 473 | bfq_stat_init(&stats->time, gfp) || |
| 474 | bfq_stat_init(&stats->avg_queue_size_sum, gfp) || |
| 475 | bfq_stat_init(&stats->avg_queue_size_samples, gfp) || |
| 476 | bfq_stat_init(&stats->dequeue, gfp) || |
| 477 | bfq_stat_init(&stats->group_wait_time, gfp) || |
| 478 | bfq_stat_init(&stats->idle_time, gfp) || |
| 479 | bfq_stat_init(&stats->empty_time, gfp)) { |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 480 | bfqg_stats_exit(stats); |
| 481 | return -ENOMEM; |
| 482 | } |
Luca Miccio | a33801e | 2017-11-13 07:34:10 +0100 | [diff] [blame] | 483 | #endif |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 484 | |
| 485 | return 0; |
| 486 | } |
| 487 | |
| 488 | static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) |
| 489 | { |
| 490 | return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL; |
| 491 | } |
| 492 | |
| 493 | static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) |
| 494 | { |
| 495 | return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); |
| 496 | } |
| 497 | |
Bart Van Assche | dfb79af | 2017-08-30 11:42:08 -0700 | [diff] [blame] | 498 | static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 499 | { |
| 500 | struct bfq_group_data *bgd; |
| 501 | |
| 502 | bgd = kzalloc(sizeof(*bgd), gfp); |
| 503 | if (!bgd) |
| 504 | return NULL; |
| 505 | return &bgd->pd; |
| 506 | } |
| 507 | |
Bart Van Assche | dfb79af | 2017-08-30 11:42:08 -0700 | [diff] [blame] | 508 | static void bfq_cpd_init(struct blkcg_policy_data *cpd) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 509 | { |
| 510 | struct bfq_group_data *d = cpd_to_bfqgd(cpd); |
| 511 | |
| 512 | d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ? |
| 513 | CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL; |
| 514 | } |
| 515 | |
Bart Van Assche | dfb79af | 2017-08-30 11:42:08 -0700 | [diff] [blame] | 516 | static void bfq_cpd_free(struct blkcg_policy_data *cpd) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 517 | { |
| 518 | kfree(cpd_to_bfqgd(cpd)); |
| 519 | } |
| 520 | |
Tejun Heo | cf09a8e | 2019-08-28 15:05:51 -0700 | [diff] [blame] | 521 | static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q, |
| 522 | struct blkcg *blkcg) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 523 | { |
| 524 | struct bfq_group *bfqg; |
| 525 | |
Tejun Heo | cf09a8e | 2019-08-28 15:05:51 -0700 | [diff] [blame] | 526 | bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 527 | if (!bfqg) |
| 528 | return NULL; |
| 529 | |
| 530 | if (bfqg_stats_init(&bfqg->stats, gfp)) { |
| 531 | kfree(bfqg); |
| 532 | return NULL; |
| 533 | } |
| 534 | |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 535 | /* see comments in bfq_bic_update_cgroup for why refcounting */ |
| 536 | bfqg_get(bfqg); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 537 | return &bfqg->pd; |
| 538 | } |
| 539 | |
Bart Van Assche | dfb79af | 2017-08-30 11:42:08 -0700 | [diff] [blame] | 540 | static void bfq_pd_init(struct blkg_policy_data *pd) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 541 | { |
| 542 | struct blkcg_gq *blkg = pd_to_blkg(pd); |
| 543 | struct bfq_group *bfqg = blkg_to_bfqg(blkg); |
| 544 | struct bfq_data *bfqd = blkg->q->elevator->elevator_data; |
| 545 | struct bfq_entity *entity = &bfqg->entity; |
| 546 | struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg); |
| 547 | |
| 548 | entity->orig_weight = entity->weight = entity->new_weight = d->weight; |
| 549 | entity->my_sched_data = &bfqg->sched_data; |
Paolo Valente | 430a67f | 2021-03-04 18:46:27 +0100 | [diff] [blame^] | 550 | entity->last_bfqq_created = NULL; |
| 551 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 552 | bfqg->my_entity = entity; /* |
| 553 | * the root_group's will be set to NULL |
| 554 | * in bfq_init_queue() |
| 555 | */ |
| 556 | bfqg->bfqd = bfqd; |
| 557 | bfqg->active_entities = 0; |
| 558 | bfqg->rq_pos_tree = RB_ROOT; |
| 559 | } |
| 560 | |
Bart Van Assche | dfb79af | 2017-08-30 11:42:08 -0700 | [diff] [blame] | 561 | static void bfq_pd_free(struct blkg_policy_data *pd) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 562 | { |
| 563 | struct bfq_group *bfqg = pd_to_bfqg(pd); |
| 564 | |
| 565 | bfqg_stats_exit(&bfqg->stats); |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 566 | bfqg_put(bfqg); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 567 | } |
| 568 | |
Bart Van Assche | dfb79af | 2017-08-30 11:42:08 -0700 | [diff] [blame] | 569 | static void bfq_pd_reset_stats(struct blkg_policy_data *pd) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 570 | { |
| 571 | struct bfq_group *bfqg = pd_to_bfqg(pd); |
| 572 | |
| 573 | bfqg_stats_reset(&bfqg->stats); |
| 574 | } |
| 575 | |
| 576 | static void bfq_group_set_parent(struct bfq_group *bfqg, |
| 577 | struct bfq_group *parent) |
| 578 | { |
| 579 | struct bfq_entity *entity; |
| 580 | |
| 581 | entity = &bfqg->entity; |
| 582 | entity->parent = parent->my_entity; |
| 583 | entity->sched_data = &parent->sched_data; |
| 584 | } |
| 585 | |
| 586 | static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd, |
| 587 | struct blkcg *blkcg) |
| 588 | { |
| 589 | struct blkcg_gq *blkg; |
| 590 | |
| 591 | blkg = blkg_lookup(blkcg, bfqd->queue); |
| 592 | if (likely(blkg)) |
| 593 | return blkg_to_bfqg(blkg); |
| 594 | return NULL; |
| 595 | } |
| 596 | |
| 597 | struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, |
| 598 | struct blkcg *blkcg) |
| 599 | { |
| 600 | struct bfq_group *bfqg, *parent; |
| 601 | struct bfq_entity *entity; |
| 602 | |
| 603 | bfqg = bfq_lookup_bfqg(bfqd, blkcg); |
| 604 | |
| 605 | if (unlikely(!bfqg)) |
| 606 | return NULL; |
| 607 | |
| 608 | /* |
| 609 | * Update chain of bfq_groups as we might be handling a leaf group |
| 610 | * which, along with some of its relatives, has not been hooked yet |
| 611 | * to the private hierarchy of BFQ. |
| 612 | */ |
| 613 | entity = &bfqg->entity; |
| 614 | for_each_entity(entity) { |
Carlo Nonato | 14afc59 | 2020-03-06 13:27:31 +0100 | [diff] [blame] | 615 | struct bfq_group *curr_bfqg = container_of(entity, |
| 616 | struct bfq_group, entity); |
| 617 | if (curr_bfqg != bfqd->root_group) { |
| 618 | parent = bfqg_parent(curr_bfqg); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 619 | if (!parent) |
| 620 | parent = bfqd->root_group; |
Carlo Nonato | 14afc59 | 2020-03-06 13:27:31 +0100 | [diff] [blame] | 621 | bfq_group_set_parent(curr_bfqg, parent); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 622 | } |
| 623 | } |
| 624 | |
| 625 | return bfqg; |
| 626 | } |
| 627 | |
| 628 | /** |
| 629 | * bfq_bfqq_move - migrate @bfqq to @bfqg. |
| 630 | * @bfqd: queue descriptor. |
| 631 | * @bfqq: the queue to move. |
| 632 | * @bfqg: the group to move to. |
| 633 | * |
| 634 | * Move @bfqq to @bfqg, deactivating it from its old group and reactivating |
| 635 | * it on the new one. Avoid putting the entity on the old group idle tree. |
| 636 | * |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 637 | * Must be called under the scheduler lock, to make sure that the blkg |
| 638 | * owning @bfqg does not disappear (see comments in |
| 639 | * bfq_bic_update_cgroup on guaranteeing the consistency of blkg |
| 640 | * objects). |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 641 | */ |
| 642 | void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 643 | struct bfq_group *bfqg) |
| 644 | { |
| 645 | struct bfq_entity *entity = &bfqq->entity; |
| 646 | |
Paolo Valente | fd1bb3a | 2020-03-21 10:45:18 +0100 | [diff] [blame] | 647 | /* |
| 648 | * Get extra reference to prevent bfqq from being freed in |
| 649 | * next possible expire or deactivate. |
| 650 | */ |
| 651 | bfqq->ref++; |
| 652 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 653 | /* If bfqq is empty, then bfq_bfqq_expire also invokes |
| 654 | * bfq_del_bfqq_busy, thereby removing bfqq and its entity |
| 655 | * from data structures related to current group. Otherwise we |
| 656 | * need to remove bfqq explicitly with bfq_deactivate_bfqq, as |
| 657 | * we do below. |
| 658 | */ |
| 659 | if (bfqq == bfqd->in_service_queue) |
| 660 | bfq_bfqq_expire(bfqd, bfqd->in_service_queue, |
| 661 | false, BFQQE_PREEMPTED); |
| 662 | |
| 663 | if (bfq_bfqq_busy(bfqq)) |
| 664 | bfq_deactivate_bfqq(bfqd, bfqq, false, false); |
Paolo Valente | 33a16a9 | 2020-02-03 11:40:57 +0100 | [diff] [blame] | 665 | else if (entity->on_st_or_in_serv) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 666 | bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 667 | bfqg_and_blkg_put(bfqq_group(bfqq)); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 668 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 669 | entity->parent = bfqg->my_entity; |
| 670 | entity->sched_data = &bfqg->sched_data; |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 671 | /* pin down bfqg and its associated blkg */ |
| 672 | bfqg_and_blkg_get(bfqg); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 673 | |
| 674 | if (bfq_bfqq_busy(bfqq)) { |
Paolo Valente | 8cacc5a | 2019-03-12 09:59:30 +0100 | [diff] [blame] | 675 | if (unlikely(!bfqd->nonrot_with_queueing)) |
| 676 | bfq_pos_tree_add_move(bfqd, bfqq); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 677 | bfq_activate_bfqq(bfqd, bfqq); |
| 678 | } |
| 679 | |
| 680 | if (!bfqd->in_service_queue && !bfqd->rq_in_driver) |
| 681 | bfq_schedule_dispatch(bfqd); |
Paolo Valente | fd1bb3a | 2020-03-21 10:45:18 +0100 | [diff] [blame] | 682 | /* release extra ref taken above, bfqq may happen to be freed now */ |
Paolo Valente | ecedd3d | 2020-02-03 11:40:56 +0100 | [diff] [blame] | 683 | bfq_put_queue(bfqq); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 684 | } |
| 685 | |
| 686 | /** |
| 687 | * __bfq_bic_change_cgroup - move @bic to @cgroup. |
| 688 | * @bfqd: the queue descriptor. |
| 689 | * @bic: the bic to move. |
| 690 | * @blkcg: the blk-cgroup to move to. |
| 691 | * |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 692 | * Move bic to blkcg, assuming that bfqd->lock is held; which makes |
| 693 | * sure that the reference to cgroup is valid across the call (see |
| 694 | * comments in bfq_bic_update_cgroup on this issue) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 695 | * |
| 696 | * NOTE: an alternative approach might have been to store the current |
| 697 | * cgroup in bfqq and getting a reference to it, reducing the lookup |
| 698 | * time here, at the price of slightly more complex code. |
| 699 | */ |
| 700 | static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, |
| 701 | struct bfq_io_cq *bic, |
| 702 | struct blkcg *blkcg) |
| 703 | { |
| 704 | struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); |
| 705 | struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); |
| 706 | struct bfq_group *bfqg; |
| 707 | struct bfq_entity *entity; |
| 708 | |
| 709 | bfqg = bfq_find_set_group(bfqd, blkcg); |
| 710 | |
| 711 | if (unlikely(!bfqg)) |
| 712 | bfqg = bfqd->root_group; |
| 713 | |
| 714 | if (async_bfqq) { |
| 715 | entity = &async_bfqq->entity; |
| 716 | |
| 717 | if (entity->sched_data != &bfqg->sched_data) { |
| 718 | bic_set_bfqq(bic, NULL, 0); |
Paolo Valente | c899773 | 2020-03-21 10:45:19 +0100 | [diff] [blame] | 719 | bfq_release_process_ref(bfqd, async_bfqq); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 720 | } |
| 721 | } |
| 722 | |
| 723 | if (sync_bfqq) { |
| 724 | entity = &sync_bfqq->entity; |
| 725 | if (entity->sched_data != &bfqg->sched_data) |
| 726 | bfq_bfqq_move(bfqd, sync_bfqq, bfqg); |
| 727 | } |
| 728 | |
| 729 | return bfqg; |
| 730 | } |
| 731 | |
| 732 | void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) |
| 733 | { |
| 734 | struct bfq_data *bfqd = bic_to_bfqd(bic); |
| 735 | struct bfq_group *bfqg = NULL; |
| 736 | uint64_t serial_nr; |
| 737 | |
| 738 | rcu_read_lock(); |
Dennis Zhou | 0fe061b | 2018-12-05 12:10:26 -0500 | [diff] [blame] | 739 | serial_nr = __bio_blkcg(bio)->css.serial_nr; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 740 | |
| 741 | /* |
| 742 | * Check whether blkcg has changed. The condition may trigger |
| 743 | * spuriously on a newly created cic but there's no harm. |
| 744 | */ |
| 745 | if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) |
| 746 | goto out; |
| 747 | |
Dennis Zhou | 0fe061b | 2018-12-05 12:10:26 -0500 | [diff] [blame] | 748 | bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio)); |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 749 | /* |
| 750 | * Update blkg_path for bfq_log_* functions. We cache this |
| 751 | * path, and update it here, for the following |
| 752 | * reasons. Operations on blkg objects in blk-cgroup are |
| 753 | * protected with the request_queue lock, and not with the |
| 754 | * lock that protects the instances of this scheduler |
| 755 | * (bfqd->lock). This exposes BFQ to the following sort of |
| 756 | * race. |
| 757 | * |
| 758 | * The blkg_lookup performed in bfq_get_queue, protected |
| 759 | * through rcu, may happen to return the address of a copy of |
| 760 | * the original blkg. If this is the case, then the |
| 761 | * bfqg_and_blkg_get performed in bfq_get_queue, to pin down |
| 762 | * the blkg, is useless: it does not prevent blk-cgroup code |
| 763 | * from destroying both the original blkg and all objects |
| 764 | * directly or indirectly referred by the copy of the |
| 765 | * blkg. |
| 766 | * |
| 767 | * On the bright side, destroy operations on a blkg invoke, as |
| 768 | * a first step, hooks of the scheduler associated with the |
| 769 | * blkg. And these hooks are executed with bfqd->lock held for |
| 770 | * BFQ. As a consequence, for any blkg associated with the |
| 771 | * request queue this instance of the scheduler is attached |
| 772 | * to, we are guaranteed that such a blkg is not destroyed, and |
| 773 | * that all the pointers it contains are consistent, while we |
| 774 | * are holding bfqd->lock. A blkg_lookup performed with |
| 775 | * bfqd->lock held then returns a fully consistent blkg, which |
| 776 | * remains consistent until this lock is held. |
| 777 | * |
| 778 | * Thanks to the last fact, and to the fact that: (1) bfqg has |
| 779 | * been obtained through a blkg_lookup in the above |
| 780 | * assignment, and (2) bfqd->lock is being held, here we can |
| 781 | * safely use the policy data for the involved blkg (i.e., the |
| 782 | * field bfqg->pd) to get to the blkg associated with bfqg, |
| 783 | * and then we can safely use any field of blkg. After we |
| 784 | * release bfqd->lock, even just getting blkg through this |
| 785 | * bfqg may cause dangling references to be traversed, as |
| 786 | * bfqg->pd may not exist any more. |
| 787 | * |
| 788 | * In view of the above facts, here we cache, in the bfqg, any |
| 789 | * blkg data we may need for this bic, and for its associated |
| 790 | * bfq_queue. As of now, we need to cache only the path of the |
| 791 | * blkg, which is used in the bfq_log_* functions. |
| 792 | * |
| 793 | * Finally, note that bfqg itself needs to be protected from |
| 794 | * destruction on the blkg_free of the original blkg (which |
| 795 | * invokes bfq_pd_free). We use an additional private |
| 796 | * refcounter for bfqg, to let it disappear only after no |
| 797 | * bfq_queue refers to it any longer. |
| 798 | */ |
| 799 | blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path)); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 800 | bic->blkcg_serial_nr = serial_nr; |
| 801 | out: |
| 802 | rcu_read_unlock(); |
| 803 | } |
| 804 | |
| 805 | /** |
| 806 | * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. |
| 807 | * @st: the service tree being flushed. |
| 808 | */ |
| 809 | static void bfq_flush_idle_tree(struct bfq_service_tree *st) |
| 810 | { |
| 811 | struct bfq_entity *entity = st->first_idle; |
| 812 | |
| 813 | for (; entity ; entity = st->first_idle) |
| 814 | __bfq_deactivate_entity(entity, false); |
| 815 | } |
| 816 | |
| 817 | /** |
| 818 | * bfq_reparent_leaf_entity - move leaf entity to the root_group. |
| 819 | * @bfqd: the device data structure with the root group. |
Paolo Valente | 576682f | 2020-03-21 10:45:20 +0100 | [diff] [blame] | 820 | * @entity: the entity to move, if entity is a leaf; or the parent entity |
| 821 | * of an active leaf entity to move, if entity is not a leaf. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 822 | */ |
| 823 | static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, |
Paolo Valente | 576682f | 2020-03-21 10:45:20 +0100 | [diff] [blame] | 824 | struct bfq_entity *entity, |
| 825 | int ioprio_class) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 826 | { |
Paolo Valente | 576682f | 2020-03-21 10:45:20 +0100 | [diff] [blame] | 827 | struct bfq_queue *bfqq; |
| 828 | struct bfq_entity *child_entity = entity; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 829 | |
Paolo Valente | 576682f | 2020-03-21 10:45:20 +0100 | [diff] [blame] | 830 | while (child_entity->my_sched_data) { /* leaf not reached yet */ |
| 831 | struct bfq_sched_data *child_sd = child_entity->my_sched_data; |
| 832 | struct bfq_service_tree *child_st = child_sd->service_tree + |
| 833 | ioprio_class; |
| 834 | struct rb_root *child_active = &child_st->active; |
| 835 | |
| 836 | child_entity = bfq_entity_of(rb_first(child_active)); |
| 837 | |
| 838 | if (!child_entity) |
| 839 | child_entity = child_sd->in_service_entity; |
| 840 | } |
| 841 | |
| 842 | bfqq = bfq_entity_to_bfqq(child_entity); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 843 | bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); |
| 844 | } |
| 845 | |
| 846 | /** |
Paolo Valente | 576682f | 2020-03-21 10:45:20 +0100 | [diff] [blame] | 847 | * bfq_reparent_active_queues - move to the root group all active queues. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 848 | * @bfqd: the device data structure with the root group. |
| 849 | * @bfqg: the group to move from. |
Paolo Valente | 576682f | 2020-03-21 10:45:20 +0100 | [diff] [blame] | 850 | * @st: the service tree to start the search from. |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 851 | */ |
Paolo Valente | 576682f | 2020-03-21 10:45:20 +0100 | [diff] [blame] | 852 | static void bfq_reparent_active_queues(struct bfq_data *bfqd, |
| 853 | struct bfq_group *bfqg, |
| 854 | struct bfq_service_tree *st, |
| 855 | int ioprio_class) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 856 | { |
| 857 | struct rb_root *active = &st->active; |
Paolo Valente | 576682f | 2020-03-21 10:45:20 +0100 | [diff] [blame] | 858 | struct bfq_entity *entity; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 859 | |
Paolo Valente | 576682f | 2020-03-21 10:45:20 +0100 | [diff] [blame] | 860 | while ((entity = bfq_entity_of(rb_first(active)))) |
| 861 | bfq_reparent_leaf_entity(bfqd, entity, ioprio_class); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 862 | |
| 863 | if (bfqg->sched_data.in_service_entity) |
| 864 | bfq_reparent_leaf_entity(bfqd, |
Paolo Valente | 576682f | 2020-03-21 10:45:20 +0100 | [diff] [blame] | 865 | bfqg->sched_data.in_service_entity, |
| 866 | ioprio_class); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 867 | } |
| 868 | |
| 869 | /** |
| 870 | * bfq_pd_offline - deactivate the entity associated with @pd, |
| 871 | * and reparent its children entities. |
| 872 | * @pd: descriptor of the policy going offline. |
| 873 | * |
| 874 | * blkio already grabs the queue_lock for us, so no need to use |
| 875 | * RCU-based magic |
| 876 | */ |
Bart Van Assche | dfb79af | 2017-08-30 11:42:08 -0700 | [diff] [blame] | 877 | static void bfq_pd_offline(struct blkg_policy_data *pd) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 878 | { |
| 879 | struct bfq_service_tree *st; |
| 880 | struct bfq_group *bfqg = pd_to_bfqg(pd); |
| 881 | struct bfq_data *bfqd = bfqg->bfqd; |
| 882 | struct bfq_entity *entity = bfqg->my_entity; |
| 883 | unsigned long flags; |
| 884 | int i; |
| 885 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 886 | spin_lock_irqsave(&bfqd->lock, flags); |
Paolo Valente | 52257ff | 2018-01-09 10:27:58 +0100 | [diff] [blame] | 887 | |
| 888 | if (!entity) /* root group */ |
| 889 | goto put_async_queues; |
| 890 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 891 | /* |
| 892 | * Empty all service_trees belonging to this group before |
| 893 | * deactivating the group itself. |
| 894 | */ |
| 895 | for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { |
| 896 | st = bfqg->sched_data.service_tree + i; |
| 897 | |
| 898 | /* |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 899 | * It may happen that some queues are still active |
| 900 | * (busy) upon group destruction (if the corresponding |
| 901 | * processes have been forced to terminate). We move |
| 902 | * all the leaf entities corresponding to these queues |
| 903 | * to the root_group. |
| 904 | * Also, it may happen that the group has an entity |
| 905 | * in service, which is disconnected from the active |
| 906 | * tree: it must be moved, too. |
| 907 | * There is no need to put the sync queues, as the |
| 908 | * scheduler has taken no reference. |
| 909 | */ |
Paolo Valente | 576682f | 2020-03-21 10:45:20 +0100 | [diff] [blame] | 910 | bfq_reparent_active_queues(bfqd, bfqg, st, i); |
Paolo Valente | 4d38a87 | 2020-03-21 10:45:21 +0100 | [diff] [blame] | 911 | |
| 912 | /* |
| 913 | * The idle tree may still contain bfq_queues |
| 914 | * belonging to exited task because they never |
| 915 | * migrated to a different cgroup from the one being |
| 916 | * destroyed now. In addition, even |
| 917 | * bfq_reparent_active_queues() may happen to add some |
| 918 | * entities to the idle tree. It happens if, in some |
| 919 | * of the calls to bfq_bfqq_move() performed by |
| 920 | * bfq_reparent_active_queues(), the queue to move is |
| 921 | * empty and gets expired. |
| 922 | */ |
| 923 | bfq_flush_idle_tree(st); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 924 | } |
| 925 | |
| 926 | __bfq_deactivate_entity(entity, false); |
Paolo Valente | 52257ff | 2018-01-09 10:27:58 +0100 | [diff] [blame] | 927 | |
| 928 | put_async_queues: |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 929 | bfq_put_async_queues(bfqd, bfqg); |
| 930 | |
| 931 | spin_unlock_irqrestore(&bfqd->lock, flags); |
| 932 | /* |
| 933 | * @blkg is going offline and will be ignored by |
| 934 | * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so |
| 935 | * that they don't get lost. If IOs complete after this point, the |
| 936 | * stats for them will be lost. Oh well... |
| 937 | */ |
| 938 | bfqg_stats_xfer_dead(bfqg); |
| 939 | } |
| 940 | |
| 941 | void bfq_end_wr_async(struct bfq_data *bfqd) |
| 942 | { |
| 943 | struct blkcg_gq *blkg; |
| 944 | |
| 945 | list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { |
| 946 | struct bfq_group *bfqg = blkg_to_bfqg(blkg); |
| 947 | |
| 948 | bfq_end_wr_async_queues(bfqd, bfqg); |
| 949 | } |
| 950 | bfq_end_wr_async_queues(bfqd, bfqd->root_group); |
| 951 | } |
| 952 | |
Fam Zheng | 795fe54 | 2019-08-28 11:54:53 +0800 | [diff] [blame] | 953 | static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v) |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 954 | { |
| 955 | struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); |
| 956 | struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); |
| 957 | unsigned int val = 0; |
| 958 | |
| 959 | if (bfqgd) |
| 960 | val = bfqgd->weight; |
| 961 | |
| 962 | seq_printf(sf, "%u\n", val); |
| 963 | |
| 964 | return 0; |
| 965 | } |
| 966 | |
Fam Zheng | 795fe54 | 2019-08-28 11:54:53 +0800 | [diff] [blame] | 967 | static u64 bfqg_prfill_weight_device(struct seq_file *sf, |
| 968 | struct blkg_policy_data *pd, int off) |
Fam Zheng | 5ff047e | 2019-08-28 11:54:52 +0800 | [diff] [blame] | 969 | { |
Fam Zheng | 795fe54 | 2019-08-28 11:54:53 +0800 | [diff] [blame] | 970 | struct bfq_group *bfqg = pd_to_bfqg(pd); |
| 971 | |
| 972 | if (!bfqg->entity.dev_weight) |
| 973 | return 0; |
| 974 | return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight); |
| 975 | } |
| 976 | |
| 977 | static int bfq_io_show_weight(struct seq_file *sf, void *v) |
| 978 | { |
| 979 | struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); |
| 980 | struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); |
| 981 | |
| 982 | seq_printf(sf, "default %u\n", bfqgd->weight); |
| 983 | blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device, |
| 984 | &blkcg_policy_bfq, 0, false); |
| 985 | return 0; |
| 986 | } |
| 987 | |
| 988 | static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight) |
| 989 | { |
| 990 | weight = dev_weight ?: weight; |
| 991 | |
| 992 | bfqg->entity.dev_weight = dev_weight; |
Fam Zheng | 5ff047e | 2019-08-28 11:54:52 +0800 | [diff] [blame] | 993 | /* |
| 994 | * Setting the prio_changed flag of the entity |
| 995 | * to 1 with new_weight == weight would re-set |
| 996 | * the value of the weight to its ioprio mapping. |
| 997 | * Set the flag only if necessary. |
| 998 | */ |
| 999 | if ((unsigned short)weight != bfqg->entity.new_weight) { |
| 1000 | bfqg->entity.new_weight = (unsigned short)weight; |
| 1001 | /* |
| 1002 | * Make sure that the above new value has been |
| 1003 | * stored in bfqg->entity.new_weight before |
| 1004 | * setting the prio_changed flag. In fact, |
| 1005 | * this flag may be read asynchronously (in |
| 1006 | * critical sections protected by a different |
| 1007 | * lock than that held here), and finding this |
| 1008 | * flag set may cause the execution of the code |
| 1009 | * for updating parameters whose value may |
| 1010 | * depend also on bfqg->entity.new_weight (in |
| 1011 | * __bfq_entity_update_weight_prio). |
| 1012 | * This barrier makes sure that the new value |
| 1013 | * of bfqg->entity.new_weight is correctly |
| 1014 | * seen in that code. |
| 1015 | */ |
| 1016 | smp_wmb(); |
| 1017 | bfqg->entity.prio_changed = 1; |
| 1018 | } |
| 1019 | } |
| 1020 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1021 | static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css, |
| 1022 | struct cftype *cftype, |
| 1023 | u64 val) |
| 1024 | { |
| 1025 | struct blkcg *blkcg = css_to_blkcg(css); |
| 1026 | struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); |
| 1027 | struct blkcg_gq *blkg; |
| 1028 | int ret = -ERANGE; |
| 1029 | |
| 1030 | if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT) |
| 1031 | return ret; |
| 1032 | |
| 1033 | ret = 0; |
| 1034 | spin_lock_irq(&blkcg->lock); |
| 1035 | bfqgd->weight = (unsigned short)val; |
| 1036 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { |
| 1037 | struct bfq_group *bfqg = blkg_to_bfqg(blkg); |
| 1038 | |
Fam Zheng | 5ff047e | 2019-08-28 11:54:52 +0800 | [diff] [blame] | 1039 | if (bfqg) |
Fam Zheng | 795fe54 | 2019-08-28 11:54:53 +0800 | [diff] [blame] | 1040 | bfq_group_set_weight(bfqg, val, 0); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1041 | } |
| 1042 | spin_unlock_irq(&blkcg->lock); |
| 1043 | |
| 1044 | return ret; |
| 1045 | } |
| 1046 | |
Fam Zheng | 795fe54 | 2019-08-28 11:54:53 +0800 | [diff] [blame] | 1047 | static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of, |
| 1048 | char *buf, size_t nbytes, |
| 1049 | loff_t off) |
| 1050 | { |
| 1051 | int ret; |
| 1052 | struct blkg_conf_ctx ctx; |
| 1053 | struct blkcg *blkcg = css_to_blkcg(of_css(of)); |
| 1054 | struct bfq_group *bfqg; |
| 1055 | u64 v; |
| 1056 | |
| 1057 | ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx); |
| 1058 | if (ret) |
| 1059 | return ret; |
| 1060 | |
| 1061 | if (sscanf(ctx.body, "%llu", &v) == 1) { |
| 1062 | /* require "default" on dfl */ |
| 1063 | ret = -ERANGE; |
| 1064 | if (!v) |
| 1065 | goto out; |
| 1066 | } else if (!strcmp(strim(ctx.body), "default")) { |
| 1067 | v = 0; |
| 1068 | } else { |
| 1069 | ret = -EINVAL; |
| 1070 | goto out; |
| 1071 | } |
| 1072 | |
| 1073 | bfqg = blkg_to_bfqg(ctx.blkg); |
| 1074 | |
| 1075 | ret = -ERANGE; |
| 1076 | if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) { |
| 1077 | bfq_group_set_weight(bfqg, bfqg->entity.weight, v); |
| 1078 | ret = 0; |
| 1079 | } |
| 1080 | out: |
| 1081 | blkg_conf_finish(&ctx); |
| 1082 | return ret ?: nbytes; |
| 1083 | } |
| 1084 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1085 | static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, |
| 1086 | char *buf, size_t nbytes, |
| 1087 | loff_t off) |
| 1088 | { |
Fam Zheng | 795fe54 | 2019-08-28 11:54:53 +0800 | [diff] [blame] | 1089 | char *endp; |
| 1090 | int ret; |
| 1091 | u64 v; |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1092 | |
Fam Zheng | 795fe54 | 2019-08-28 11:54:53 +0800 | [diff] [blame] | 1093 | buf = strim(buf); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1094 | |
Fam Zheng | 795fe54 | 2019-08-28 11:54:53 +0800 | [diff] [blame] | 1095 | /* "WEIGHT" or "default WEIGHT" sets the default weight */ |
| 1096 | v = simple_strtoull(buf, &endp, 0); |
| 1097 | if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) { |
| 1098 | ret = bfq_io_set_weight_legacy(of_css(of), NULL, v); |
| 1099 | return ret ?: nbytes; |
| 1100 | } |
| 1101 | |
| 1102 | return bfq_io_set_device_weight(of, buf, nbytes, off); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1103 | } |
| 1104 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1105 | static int bfqg_print_rwstat(struct seq_file *sf, void *v) |
| 1106 | { |
| 1107 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat, |
| 1108 | &blkcg_policy_bfq, seq_cft(sf)->private, true); |
| 1109 | return 0; |
| 1110 | } |
| 1111 | |
Tejun Heo | a557f1c | 2019-11-07 11:17:59 -0800 | [diff] [blame] | 1112 | static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf, |
| 1113 | struct blkg_policy_data *pd, int off) |
| 1114 | { |
| 1115 | struct blkg_rwstat_sample sum; |
| 1116 | |
| 1117 | blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum); |
| 1118 | return __blkg_prfill_rwstat(sf, pd, &sum); |
| 1119 | } |
| 1120 | |
| 1121 | static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v) |
| 1122 | { |
| 1123 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), |
| 1124 | bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq, |
| 1125 | seq_cft(sf)->private, true); |
| 1126 | return 0; |
| 1127 | } |
| 1128 | |
Tejun Heo | fd41e60 | 2019-11-07 11:18:00 -0800 | [diff] [blame] | 1129 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
Tejun Heo | a557f1c | 2019-11-07 11:17:59 -0800 | [diff] [blame] | 1130 | static int bfqg_print_stat(struct seq_file *sf, void *v) |
| 1131 | { |
| 1132 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat, |
| 1133 | &blkcg_policy_bfq, seq_cft(sf)->private, false); |
| 1134 | return 0; |
| 1135 | } |
| 1136 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1137 | static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, |
| 1138 | struct blkg_policy_data *pd, int off) |
| 1139 | { |
Christoph Hellwig | d625898 | 2019-06-06 12:26:23 +0200 | [diff] [blame] | 1140 | struct blkcg_gq *blkg = pd_to_blkg(pd); |
| 1141 | struct blkcg_gq *pos_blkg; |
| 1142 | struct cgroup_subsys_state *pos_css; |
| 1143 | u64 sum = 0; |
| 1144 | |
| 1145 | lockdep_assert_held(&blkg->q->queue_lock); |
| 1146 | |
| 1147 | rcu_read_lock(); |
| 1148 | blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { |
| 1149 | struct bfq_stat *stat; |
| 1150 | |
| 1151 | if (!pos_blkg->online) |
| 1152 | continue; |
| 1153 | |
| 1154 | stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off; |
| 1155 | sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt); |
| 1156 | } |
| 1157 | rcu_read_unlock(); |
| 1158 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1159 | return __blkg_prfill_u64(sf, pd, sum); |
| 1160 | } |
| 1161 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1162 | static int bfqg_print_stat_recursive(struct seq_file *sf, void *v) |
| 1163 | { |
| 1164 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), |
| 1165 | bfqg_prfill_stat_recursive, &blkcg_policy_bfq, |
| 1166 | seq_cft(sf)->private, false); |
| 1167 | return 0; |
| 1168 | } |
| 1169 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1170 | static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd, |
| 1171 | int off) |
| 1172 | { |
Tejun Heo | fd41e60 | 2019-11-07 11:18:00 -0800 | [diff] [blame] | 1173 | struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg); |
| 1174 | u64 sum = blkg_rwstat_total(&bfqg->stats.bytes); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1175 | |
| 1176 | return __blkg_prfill_u64(sf, pd, sum >> 9); |
| 1177 | } |
| 1178 | |
| 1179 | static int bfqg_print_stat_sectors(struct seq_file *sf, void *v) |
| 1180 | { |
| 1181 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), |
| 1182 | bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false); |
| 1183 | return 0; |
| 1184 | } |
| 1185 | |
| 1186 | static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf, |
| 1187 | struct blkg_policy_data *pd, int off) |
| 1188 | { |
Christoph Hellwig | 7af6fd9 | 2019-06-06 12:26:21 +0200 | [diff] [blame] | 1189 | struct blkg_rwstat_sample tmp; |
Christoph Hellwig | 5d0b6e4 | 2019-06-06 12:26:20 +0200 | [diff] [blame] | 1190 | |
Tejun Heo | fd41e60 | 2019-11-07 11:18:00 -0800 | [diff] [blame] | 1191 | blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq, |
| 1192 | offsetof(struct bfq_group, stats.bytes), &tmp); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1193 | |
Christoph Hellwig | 7af6fd9 | 2019-06-06 12:26:21 +0200 | [diff] [blame] | 1194 | return __blkg_prfill_u64(sf, pd, |
| 1195 | (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1196 | } |
| 1197 | |
| 1198 | static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v) |
| 1199 | { |
| 1200 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), |
| 1201 | bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0, |
| 1202 | false); |
| 1203 | return 0; |
| 1204 | } |
| 1205 | |
| 1206 | static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf, |
| 1207 | struct blkg_policy_data *pd, int off) |
| 1208 | { |
| 1209 | struct bfq_group *bfqg = pd_to_bfqg(pd); |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 1210 | u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1211 | u64 v = 0; |
| 1212 | |
| 1213 | if (samples) { |
Christoph Hellwig | c0ce79dc | 2019-06-06 12:26:22 +0200 | [diff] [blame] | 1214 | v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1215 | v = div64_u64(v, samples); |
| 1216 | } |
| 1217 | __blkg_prfill_u64(sf, pd, v); |
| 1218 | return 0; |
| 1219 | } |
| 1220 | |
| 1221 | /* print avg_queue_size */ |
| 1222 | static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v) |
| 1223 | { |
| 1224 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), |
| 1225 | bfqg_prfill_avg_queue_size, &blkcg_policy_bfq, |
| 1226 | 0, false); |
| 1227 | return 0; |
| 1228 | } |
Christoph Hellwig | 8060c47 | 2019-06-06 12:26:24 +0200 | [diff] [blame] | 1229 | #endif /* CONFIG_BFQ_CGROUP_DEBUG */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1230 | |
| 1231 | struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) |
| 1232 | { |
| 1233 | int ret; |
| 1234 | |
| 1235 | ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq); |
| 1236 | if (ret) |
| 1237 | return NULL; |
| 1238 | |
| 1239 | return blkg_to_bfqg(bfqd->queue->root_blkg); |
| 1240 | } |
| 1241 | |
| 1242 | struct blkcg_policy blkcg_policy_bfq = { |
| 1243 | .dfl_cftypes = bfq_blkg_files, |
| 1244 | .legacy_cftypes = bfq_blkcg_legacy_files, |
| 1245 | |
| 1246 | .cpd_alloc_fn = bfq_cpd_alloc, |
| 1247 | .cpd_init_fn = bfq_cpd_init, |
| 1248 | .cpd_bind_fn = bfq_cpd_init, |
| 1249 | .cpd_free_fn = bfq_cpd_free, |
| 1250 | |
| 1251 | .pd_alloc_fn = bfq_pd_alloc, |
| 1252 | .pd_init_fn = bfq_pd_init, |
| 1253 | .pd_offline_fn = bfq_pd_offline, |
| 1254 | .pd_free_fn = bfq_pd_free, |
| 1255 | .pd_reset_stats_fn = bfq_pd_reset_stats, |
| 1256 | }; |
| 1257 | |
| 1258 | struct cftype bfq_blkcg_legacy_files[] = { |
| 1259 | { |
| 1260 | .name = "bfq.weight", |
Jens Axboe | cf89298 | 2019-06-10 03:35:41 -0600 | [diff] [blame] | 1261 | .flags = CFTYPE_NOT_ON_ROOT, |
Fam Zheng | 795fe54 | 2019-08-28 11:54:53 +0800 | [diff] [blame] | 1262 | .seq_show = bfq_io_show_weight_legacy, |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1263 | .write_u64 = bfq_io_set_weight_legacy, |
| 1264 | }, |
Fam Zheng | 795fe54 | 2019-08-28 11:54:53 +0800 | [diff] [blame] | 1265 | { |
| 1266 | .name = "bfq.weight_device", |
| 1267 | .flags = CFTYPE_NOT_ON_ROOT, |
| 1268 | .seq_show = bfq_io_show_weight, |
| 1269 | .write = bfq_io_set_weight, |
| 1270 | }, |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1271 | |
| 1272 | /* statistics, covers only the tasks in the bfqg */ |
| 1273 | { |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1274 | .name = "bfq.io_service_bytes", |
Tejun Heo | fd41e60 | 2019-11-07 11:18:00 -0800 | [diff] [blame] | 1275 | .private = offsetof(struct bfq_group, stats.bytes), |
| 1276 | .seq_show = bfqg_print_rwstat, |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1277 | }, |
| 1278 | { |
| 1279 | .name = "bfq.io_serviced", |
Tejun Heo | fd41e60 | 2019-11-07 11:18:00 -0800 | [diff] [blame] | 1280 | .private = offsetof(struct bfq_group, stats.ios), |
| 1281 | .seq_show = bfqg_print_rwstat, |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1282 | }, |
Christoph Hellwig | 8060c47 | 2019-06-06 12:26:24 +0200 | [diff] [blame] | 1283 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
Luca Miccio | a33801e | 2017-11-13 07:34:10 +0100 | [diff] [blame] | 1284 | { |
| 1285 | .name = "bfq.time", |
| 1286 | .private = offsetof(struct bfq_group, stats.time), |
| 1287 | .seq_show = bfqg_print_stat, |
| 1288 | }, |
| 1289 | { |
| 1290 | .name = "bfq.sectors", |
| 1291 | .seq_show = bfqg_print_stat_sectors, |
| 1292 | }, |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1293 | { |
| 1294 | .name = "bfq.io_service_time", |
| 1295 | .private = offsetof(struct bfq_group, stats.service_time), |
| 1296 | .seq_show = bfqg_print_rwstat, |
| 1297 | }, |
| 1298 | { |
| 1299 | .name = "bfq.io_wait_time", |
| 1300 | .private = offsetof(struct bfq_group, stats.wait_time), |
| 1301 | .seq_show = bfqg_print_rwstat, |
| 1302 | }, |
| 1303 | { |
| 1304 | .name = "bfq.io_merged", |
| 1305 | .private = offsetof(struct bfq_group, stats.merged), |
| 1306 | .seq_show = bfqg_print_rwstat, |
| 1307 | }, |
| 1308 | { |
| 1309 | .name = "bfq.io_queued", |
| 1310 | .private = offsetof(struct bfq_group, stats.queued), |
| 1311 | .seq_show = bfqg_print_rwstat, |
| 1312 | }, |
Christoph Hellwig | 8060c47 | 2019-06-06 12:26:24 +0200 | [diff] [blame] | 1313 | #endif /* CONFIG_BFQ_CGROUP_DEBUG */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1314 | |
Angelo Ruocco | 636b8fe | 2019-04-08 17:35:34 +0200 | [diff] [blame] | 1315 | /* the same statistics which cover the bfqg and its descendants */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1316 | { |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1317 | .name = "bfq.io_service_bytes_recursive", |
Tejun Heo | fd41e60 | 2019-11-07 11:18:00 -0800 | [diff] [blame] | 1318 | .private = offsetof(struct bfq_group, stats.bytes), |
| 1319 | .seq_show = bfqg_print_rwstat_recursive, |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1320 | }, |
| 1321 | { |
| 1322 | .name = "bfq.io_serviced_recursive", |
Tejun Heo | fd41e60 | 2019-11-07 11:18:00 -0800 | [diff] [blame] | 1323 | .private = offsetof(struct bfq_group, stats.ios), |
| 1324 | .seq_show = bfqg_print_rwstat_recursive, |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1325 | }, |
Christoph Hellwig | 8060c47 | 2019-06-06 12:26:24 +0200 | [diff] [blame] | 1326 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
Luca Miccio | a33801e | 2017-11-13 07:34:10 +0100 | [diff] [blame] | 1327 | { |
| 1328 | .name = "bfq.time_recursive", |
| 1329 | .private = offsetof(struct bfq_group, stats.time), |
| 1330 | .seq_show = bfqg_print_stat_recursive, |
| 1331 | }, |
| 1332 | { |
| 1333 | .name = "bfq.sectors_recursive", |
| 1334 | .seq_show = bfqg_print_stat_sectors_recursive, |
| 1335 | }, |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1336 | { |
| 1337 | .name = "bfq.io_service_time_recursive", |
| 1338 | .private = offsetof(struct bfq_group, stats.service_time), |
| 1339 | .seq_show = bfqg_print_rwstat_recursive, |
| 1340 | }, |
| 1341 | { |
| 1342 | .name = "bfq.io_wait_time_recursive", |
| 1343 | .private = offsetof(struct bfq_group, stats.wait_time), |
| 1344 | .seq_show = bfqg_print_rwstat_recursive, |
| 1345 | }, |
| 1346 | { |
| 1347 | .name = "bfq.io_merged_recursive", |
| 1348 | .private = offsetof(struct bfq_group, stats.merged), |
| 1349 | .seq_show = bfqg_print_rwstat_recursive, |
| 1350 | }, |
| 1351 | { |
| 1352 | .name = "bfq.io_queued_recursive", |
| 1353 | .private = offsetof(struct bfq_group, stats.queued), |
| 1354 | .seq_show = bfqg_print_rwstat_recursive, |
| 1355 | }, |
| 1356 | { |
| 1357 | .name = "bfq.avg_queue_size", |
| 1358 | .seq_show = bfqg_print_avg_queue_size, |
| 1359 | }, |
| 1360 | { |
| 1361 | .name = "bfq.group_wait_time", |
| 1362 | .private = offsetof(struct bfq_group, stats.group_wait_time), |
| 1363 | .seq_show = bfqg_print_stat, |
| 1364 | }, |
| 1365 | { |
| 1366 | .name = "bfq.idle_time", |
| 1367 | .private = offsetof(struct bfq_group, stats.idle_time), |
| 1368 | .seq_show = bfqg_print_stat, |
| 1369 | }, |
| 1370 | { |
| 1371 | .name = "bfq.empty_time", |
| 1372 | .private = offsetof(struct bfq_group, stats.empty_time), |
| 1373 | .seq_show = bfqg_print_stat, |
| 1374 | }, |
| 1375 | { |
| 1376 | .name = "bfq.dequeue", |
| 1377 | .private = offsetof(struct bfq_group, stats.dequeue), |
| 1378 | .seq_show = bfqg_print_stat, |
| 1379 | }, |
Christoph Hellwig | 8060c47 | 2019-06-06 12:26:24 +0200 | [diff] [blame] | 1380 | #endif /* CONFIG_BFQ_CGROUP_DEBUG */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1381 | { } /* terminate */ |
| 1382 | }; |
| 1383 | |
| 1384 | struct cftype bfq_blkg_files[] = { |
| 1385 | { |
| 1386 | .name = "bfq.weight", |
Jens Axboe | cf89298 | 2019-06-10 03:35:41 -0600 | [diff] [blame] | 1387 | .flags = CFTYPE_NOT_ON_ROOT, |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1388 | .seq_show = bfq_io_show_weight, |
| 1389 | .write = bfq_io_set_weight, |
| 1390 | }, |
| 1391 | {} /* terminate */ |
| 1392 | }; |
| 1393 | |
| 1394 | #else /* CONFIG_BFQ_GROUP_IOSCHED */ |
| 1395 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1396 | void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 1397 | struct bfq_group *bfqg) {} |
| 1398 | |
| 1399 | void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) |
| 1400 | { |
| 1401 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); |
| 1402 | |
| 1403 | entity->weight = entity->new_weight; |
| 1404 | entity->orig_weight = entity->new_weight; |
| 1405 | if (bfqq) { |
| 1406 | bfqq->ioprio = bfqq->new_ioprio; |
| 1407 | bfqq->ioprio_class = bfqq->new_ioprio_class; |
| 1408 | } |
| 1409 | entity->sched_data = &bfqg->sched_data; |
| 1410 | } |
| 1411 | |
| 1412 | void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {} |
| 1413 | |
| 1414 | void bfq_end_wr_async(struct bfq_data *bfqd) |
| 1415 | { |
| 1416 | bfq_end_wr_async_queues(bfqd, bfqd->root_group); |
| 1417 | } |
| 1418 | |
| 1419 | struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg) |
| 1420 | { |
| 1421 | return bfqd->root_group; |
| 1422 | } |
| 1423 | |
| 1424 | struct bfq_group *bfqq_group(struct bfq_queue *bfqq) |
| 1425 | { |
| 1426 | return bfqq->bfqd->root_group; |
| 1427 | } |
| 1428 | |
Paolo Valente | 4d8340d | 2020-02-03 11:40:58 +0100 | [diff] [blame] | 1429 | void bfqg_and_blkg_get(struct bfq_group *bfqg) {} |
| 1430 | |
| 1431 | void bfqg_and_blkg_put(struct bfq_group *bfqg) {} |
| 1432 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1433 | struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) |
| 1434 | { |
| 1435 | struct bfq_group *bfqg; |
| 1436 | int i; |
| 1437 | |
| 1438 | bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); |
| 1439 | if (!bfqg) |
| 1440 | return NULL; |
| 1441 | |
| 1442 | for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) |
| 1443 | bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; |
| 1444 | |
| 1445 | return bfqg; |
| 1446 | } |
| 1447 | #endif /* CONFIG_BFQ_GROUP_IOSCHED */ |