Christoph Hellwig | 3dcf60b | 2019-04-30 14:42:43 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Block stat tracking code |
| 4 | * |
| 5 | * Copyright (C) 2016 Jens Axboe |
| 6 | */ |
| 7 | #include <linux/kernel.h> |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 8 | #include <linux/rculist.h> |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 9 | #include <linux/blk-mq.h> |
| 10 | |
| 11 | #include "blk-stat.h" |
| 12 | #include "blk-mq.h" |
Shaohua Li | b9147dd | 2017-03-27 15:19:42 -0700 | [diff] [blame] | 13 | #include "blk.h" |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 14 | |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 15 | struct blk_queue_stats { |
| 16 | struct list_head callbacks; |
| 17 | spinlock_t lock; |
Shaohua Li | b9147dd | 2017-03-27 15:19:42 -0700 | [diff] [blame] | 18 | bool enable_accounting; |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 19 | }; |
| 20 | |
Josef Bacik | 2ecbf45 | 2018-07-03 11:14:57 -0400 | [diff] [blame] | 21 | void blk_rq_stat_init(struct blk_rq_stat *stat) |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 22 | { |
| 23 | stat->min = -1ULL; |
| 24 | stat->max = stat->nr_samples = stat->mean = 0; |
Shaohua Li | eca8b53 | 2017-10-06 17:55:59 -0700 | [diff] [blame] | 25 | stat->batch = 0; |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 26 | } |
| 27 | |
Shaohua Li | eca8b53 | 2017-10-06 17:55:59 -0700 | [diff] [blame] | 28 | /* src is a per-cpu stat, mean isn't initialized */ |
Josef Bacik | 2ecbf45 | 2018-07-03 11:14:57 -0400 | [diff] [blame] | 29 | void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 30 | { |
| 31 | if (!src->nr_samples) |
| 32 | return; |
| 33 | |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 34 | dst->min = min(dst->min, src->min); |
| 35 | dst->max = max(dst->max, src->max); |
| 36 | |
Shaohua Li | eca8b53 | 2017-10-06 17:55:59 -0700 | [diff] [blame] | 37 | dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples, |
| 38 | dst->nr_samples + src->nr_samples); |
| 39 | |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 40 | dst->nr_samples += src->nr_samples; |
| 41 | } |
| 42 | |
Josef Bacik | 2ecbf45 | 2018-07-03 11:14:57 -0400 | [diff] [blame] | 43 | void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value) |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 44 | { |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 45 | stat->min = min(stat->min, value); |
| 46 | stat->max = max(stat->max, value); |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 47 | stat->batch += value; |
Shaohua Li | eca8b53 | 2017-10-06 17:55:59 -0700 | [diff] [blame] | 48 | stat->nr_samples++; |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 49 | } |
| 50 | |
Omar Sandoval | 522a777 | 2018-05-09 02:08:53 -0700 | [diff] [blame] | 51 | void blk_stat_add(struct request *rq, u64 now) |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 52 | { |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 53 | struct request_queue *q = rq->q; |
| 54 | struct blk_stat_callback *cb; |
| 55 | struct blk_rq_stat *stat; |
Pavel Begunkov | 8148f0b | 2019-10-08 00:16:51 +0300 | [diff] [blame] | 56 | int bucket, cpu; |
Omar Sandoval | 522a777 | 2018-05-09 02:08:53 -0700 | [diff] [blame] | 57 | u64 value; |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 58 | |
Omar Sandoval | 544ccc8d | 2018-05-09 02:08:50 -0700 | [diff] [blame] | 59 | value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0; |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 60 | |
Shaohua Li | b9147dd | 2017-03-27 15:19:42 -0700 | [diff] [blame] | 61 | blk_throtl_stat_add(rq, value); |
| 62 | |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 63 | rcu_read_lock(); |
Pavel Begunkov | 8148f0b | 2019-10-08 00:16:51 +0300 | [diff] [blame] | 64 | cpu = get_cpu(); |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 65 | list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { |
Jens Axboe | d373812 | 2017-05-09 11:39:56 -0600 | [diff] [blame] | 66 | if (!blk_stat_is_active(cb)) |
| 67 | continue; |
| 68 | |
| 69 | bucket = cb->bucket_fn(rq); |
| 70 | if (bucket < 0) |
| 71 | continue; |
| 72 | |
Pavel Begunkov | 8148f0b | 2019-10-08 00:16:51 +0300 | [diff] [blame] | 73 | stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; |
Josef Bacik | 2ecbf45 | 2018-07-03 11:14:57 -0400 | [diff] [blame] | 74 | blk_rq_stat_add(stat, value); |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 75 | } |
Pavel Begunkov | 8148f0b | 2019-10-08 00:16:51 +0300 | [diff] [blame] | 76 | put_cpu(); |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 77 | rcu_read_unlock(); |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 78 | } |
| 79 | |
Kees Cook | e99e88a | 2017-10-16 14:43:17 -0700 | [diff] [blame] | 80 | static void blk_stat_timer_fn(struct timer_list *t) |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 81 | { |
Kees Cook | e99e88a | 2017-10-16 14:43:17 -0700 | [diff] [blame] | 82 | struct blk_stat_callback *cb = from_timer(cb, t, timer); |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 83 | unsigned int bucket; |
| 84 | int cpu; |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 85 | |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 86 | for (bucket = 0; bucket < cb->buckets; bucket++) |
Josef Bacik | 2ecbf45 | 2018-07-03 11:14:57 -0400 | [diff] [blame] | 87 | blk_rq_stat_init(&cb->stat[bucket]); |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 88 | |
| 89 | for_each_online_cpu(cpu) { |
| 90 | struct blk_rq_stat *cpu_stat; |
| 91 | |
| 92 | cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); |
| 93 | for (bucket = 0; bucket < cb->buckets; bucket++) { |
Josef Bacik | 2ecbf45 | 2018-07-03 11:14:57 -0400 | [diff] [blame] | 94 | blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); |
| 95 | blk_rq_stat_init(&cpu_stat[bucket]); |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 96 | } |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 97 | } |
| 98 | |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 99 | cb->timer_fn(cb); |
| 100 | } |
| 101 | |
| 102 | struct blk_stat_callback * |
| 103 | blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *), |
Stephen Bates | a37244e | 2017-04-20 15:29:16 -0600 | [diff] [blame] | 104 | int (*bucket_fn)(const struct request *), |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 105 | unsigned int buckets, void *data) |
| 106 | { |
| 107 | struct blk_stat_callback *cb; |
| 108 | |
| 109 | cb = kmalloc(sizeof(*cb), GFP_KERNEL); |
| 110 | if (!cb) |
| 111 | return NULL; |
| 112 | |
| 113 | cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat), |
| 114 | GFP_KERNEL); |
| 115 | if (!cb->stat) { |
| 116 | kfree(cb); |
| 117 | return NULL; |
| 118 | } |
| 119 | cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat), |
| 120 | __alignof__(struct blk_rq_stat)); |
| 121 | if (!cb->cpu_stat) { |
| 122 | kfree(cb->stat); |
| 123 | kfree(cb); |
| 124 | return NULL; |
| 125 | } |
| 126 | |
| 127 | cb->timer_fn = timer_fn; |
| 128 | cb->bucket_fn = bucket_fn; |
| 129 | cb->data = data; |
| 130 | cb->buckets = buckets; |
Kees Cook | e99e88a | 2017-10-16 14:43:17 -0700 | [diff] [blame] | 131 | timer_setup(&cb->timer, blk_stat_timer_fn, 0); |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 132 | |
| 133 | return cb; |
| 134 | } |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 135 | |
| 136 | void blk_stat_add_callback(struct request_queue *q, |
| 137 | struct blk_stat_callback *cb) |
| 138 | { |
| 139 | unsigned int bucket; |
| 140 | int cpu; |
| 141 | |
| 142 | for_each_possible_cpu(cpu) { |
| 143 | struct blk_rq_stat *cpu_stat; |
| 144 | |
| 145 | cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); |
| 146 | for (bucket = 0; bucket < cb->buckets; bucket++) |
Josef Bacik | 2ecbf45 | 2018-07-03 11:14:57 -0400 | [diff] [blame] | 147 | blk_rq_stat_init(&cpu_stat[bucket]); |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 148 | } |
| 149 | |
| 150 | spin_lock(&q->stats->lock); |
| 151 | list_add_tail_rcu(&cb->list, &q->stats->callbacks); |
Bart Van Assche | 7dfdbc7 | 2018-03-07 17:10:05 -0800 | [diff] [blame] | 152 | blk_queue_flag_set(QUEUE_FLAG_STATS, q); |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 153 | spin_unlock(&q->stats->lock); |
| 154 | } |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 155 | |
| 156 | void blk_stat_remove_callback(struct request_queue *q, |
| 157 | struct blk_stat_callback *cb) |
| 158 | { |
| 159 | spin_lock(&q->stats->lock); |
| 160 | list_del_rcu(&cb->list); |
Shaohua Li | b9147dd | 2017-03-27 15:19:42 -0700 | [diff] [blame] | 161 | if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting) |
Bart Van Assche | 7dfdbc7 | 2018-03-07 17:10:05 -0800 | [diff] [blame] | 162 | blk_queue_flag_clear(QUEUE_FLAG_STATS, q); |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 163 | spin_unlock(&q->stats->lock); |
| 164 | |
| 165 | del_timer_sync(&cb->timer); |
| 166 | } |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 167 | |
| 168 | static void blk_stat_free_callback_rcu(struct rcu_head *head) |
| 169 | { |
| 170 | struct blk_stat_callback *cb; |
| 171 | |
| 172 | cb = container_of(head, struct blk_stat_callback, rcu); |
| 173 | free_percpu(cb->cpu_stat); |
| 174 | kfree(cb->stat); |
| 175 | kfree(cb); |
| 176 | } |
| 177 | |
| 178 | void blk_stat_free_callback(struct blk_stat_callback *cb) |
| 179 | { |
Jens Axboe | a83b576 | 2017-03-21 17:20:01 -0600 | [diff] [blame] | 180 | if (cb) |
| 181 | call_rcu(&cb->rcu, blk_stat_free_callback_rcu); |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 182 | } |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 183 | |
Shaohua Li | b9147dd | 2017-03-27 15:19:42 -0700 | [diff] [blame] | 184 | void blk_stat_enable_accounting(struct request_queue *q) |
| 185 | { |
| 186 | spin_lock(&q->stats->lock); |
| 187 | q->stats->enable_accounting = true; |
Bart Van Assche | 7dfdbc7 | 2018-03-07 17:10:05 -0800 | [diff] [blame] | 188 | blk_queue_flag_set(QUEUE_FLAG_STATS, q); |
Shaohua Li | b9147dd | 2017-03-27 15:19:42 -0700 | [diff] [blame] | 189 | spin_unlock(&q->stats->lock); |
| 190 | } |
Omar Sandoval | f8232f2 | 2018-09-27 15:55:52 -0700 | [diff] [blame] | 191 | EXPORT_SYMBOL_GPL(blk_stat_enable_accounting); |
Shaohua Li | b9147dd | 2017-03-27 15:19:42 -0700 | [diff] [blame] | 192 | |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 193 | struct blk_queue_stats *blk_alloc_queue_stats(void) |
| 194 | { |
| 195 | struct blk_queue_stats *stats; |
| 196 | |
| 197 | stats = kmalloc(sizeof(*stats), GFP_KERNEL); |
| 198 | if (!stats) |
| 199 | return NULL; |
| 200 | |
| 201 | INIT_LIST_HEAD(&stats->callbacks); |
| 202 | spin_lock_init(&stats->lock); |
Shaohua Li | b9147dd | 2017-03-27 15:19:42 -0700 | [diff] [blame] | 203 | stats->enable_accounting = false; |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 204 | |
| 205 | return stats; |
| 206 | } |
| 207 | |
| 208 | void blk_free_queue_stats(struct blk_queue_stats *stats) |
| 209 | { |
| 210 | if (!stats) |
| 211 | return; |
| 212 | |
| 213 | WARN_ON(!list_empty(&stats->callbacks)); |
| 214 | |
| 215 | kfree(stats); |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 216 | } |