blob: 725a881723b0140833dc79ea6c5caa588e6256d5 [file] [log] [blame]
Jens Axboecf43e6b2016-11-07 21:32:37 -07001/*
2 * Block stat tracking code
3 *
4 * Copyright (C) 2016 Jens Axboe
5 */
6#include <linux/kernel.h>
Omar Sandoval34dbad52017-03-21 08:56:08 -07007#include <linux/rculist.h>
Jens Axboecf43e6b2016-11-07 21:32:37 -07008#include <linux/blk-mq.h>
9
10#include "blk-stat.h"
11#include "blk-mq.h"
Shaohua Lib9147dd2017-03-27 15:19:42 -070012#include "blk.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -070013
Omar Sandoval34dbad52017-03-21 08:56:08 -070014struct blk_queue_stats {
15 struct list_head callbacks;
16 spinlock_t lock;
Shaohua Lib9147dd2017-03-27 15:19:42 -070017 bool enable_accounting;
Omar Sandoval34dbad52017-03-21 08:56:08 -070018};
19
Omar Sandoval34dbad52017-03-21 08:56:08 -070020static void blk_stat_init(struct blk_rq_stat *stat)
21{
22 stat->min = -1ULL;
23 stat->max = stat->nr_samples = stat->mean = 0;
Shaohua Lieca8b532017-10-06 17:55:59 -070024 stat->batch = 0;
Omar Sandoval34dbad52017-03-21 08:56:08 -070025}
26
Shaohua Lieca8b532017-10-06 17:55:59 -070027/* src is a per-cpu stat, mean isn't initialized */
Jens Axboecf43e6b2016-11-07 21:32:37 -070028static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
29{
30 if (!src->nr_samples)
31 return;
32
Jens Axboecf43e6b2016-11-07 21:32:37 -070033 dst->min = min(dst->min, src->min);
34 dst->max = max(dst->max, src->max);
35
Shaohua Lieca8b532017-10-06 17:55:59 -070036 dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
37 dst->nr_samples + src->nr_samples);
38
Jens Axboecf43e6b2016-11-07 21:32:37 -070039 dst->nr_samples += src->nr_samples;
40}
41
Omar Sandoval34dbad52017-03-21 08:56:08 -070042static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
Jens Axboecf43e6b2016-11-07 21:32:37 -070043{
Omar Sandoval34dbad52017-03-21 08:56:08 -070044 stat->min = min(stat->min, value);
45 stat->max = max(stat->max, value);
Jens Axboecf43e6b2016-11-07 21:32:37 -070046 stat->batch += value;
Shaohua Lieca8b532017-10-06 17:55:59 -070047 stat->nr_samples++;
Jens Axboecf43e6b2016-11-07 21:32:37 -070048}
49
Omar Sandoval34dbad52017-03-21 08:56:08 -070050void blk_stat_add(struct request *rq)
Jens Axboecf43e6b2016-11-07 21:32:37 -070051{
Omar Sandoval34dbad52017-03-21 08:56:08 -070052 struct request_queue *q = rq->q;
53 struct blk_stat_callback *cb;
54 struct blk_rq_stat *stat;
55 int bucket;
Shaohua Lieca8b532017-10-06 17:55:59 -070056 u64 now, value;
Jens Axboecf43e6b2016-11-07 21:32:37 -070057
Omar Sandoval544ccc8d2018-05-09 02:08:50 -070058 now = ktime_get_ns();
59 value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
Omar Sandoval34dbad52017-03-21 08:56:08 -070060
Shaohua Lib9147dd2017-03-27 15:19:42 -070061 blk_throtl_stat_add(rq, value);
62
Omar Sandoval34dbad52017-03-21 08:56:08 -070063 rcu_read_lock();
64 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
Jens Axboed3738122017-05-09 11:39:56 -060065 if (!blk_stat_is_active(cb))
66 continue;
67
68 bucket = cb->bucket_fn(rq);
69 if (bucket < 0)
70 continue;
71
72 stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
73 __blk_stat_add(stat, value);
74 put_cpu_ptr(cb->cpu_stat);
Jens Axboecf43e6b2016-11-07 21:32:37 -070075 }
Omar Sandoval34dbad52017-03-21 08:56:08 -070076 rcu_read_unlock();
Jens Axboecf43e6b2016-11-07 21:32:37 -070077}
78
Kees Cooke99e88a2017-10-16 14:43:17 -070079static void blk_stat_timer_fn(struct timer_list *t)
Jens Axboecf43e6b2016-11-07 21:32:37 -070080{
Kees Cooke99e88a2017-10-16 14:43:17 -070081 struct blk_stat_callback *cb = from_timer(cb, t, timer);
Omar Sandoval34dbad52017-03-21 08:56:08 -070082 unsigned int bucket;
83 int cpu;
Jens Axboecf43e6b2016-11-07 21:32:37 -070084
Omar Sandoval34dbad52017-03-21 08:56:08 -070085 for (bucket = 0; bucket < cb->buckets; bucket++)
86 blk_stat_init(&cb->stat[bucket]);
87
88 for_each_online_cpu(cpu) {
89 struct blk_rq_stat *cpu_stat;
90
91 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
92 for (bucket = 0; bucket < cb->buckets; bucket++) {
93 blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
94 blk_stat_init(&cpu_stat[bucket]);
95 }
Jens Axboecf43e6b2016-11-07 21:32:37 -070096 }
97
Omar Sandoval34dbad52017-03-21 08:56:08 -070098 cb->timer_fn(cb);
99}
100
101struct blk_stat_callback *
102blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
Stephen Batesa37244e2017-04-20 15:29:16 -0600103 int (*bucket_fn)(const struct request *),
Omar Sandoval34dbad52017-03-21 08:56:08 -0700104 unsigned int buckets, void *data)
105{
106 struct blk_stat_callback *cb;
107
108 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
109 if (!cb)
110 return NULL;
111
112 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
113 GFP_KERNEL);
114 if (!cb->stat) {
115 kfree(cb);
116 return NULL;
117 }
118 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
119 __alignof__(struct blk_rq_stat));
120 if (!cb->cpu_stat) {
121 kfree(cb->stat);
122 kfree(cb);
123 return NULL;
124 }
125
126 cb->timer_fn = timer_fn;
127 cb->bucket_fn = bucket_fn;
128 cb->data = data;
129 cb->buckets = buckets;
Kees Cooke99e88a2017-10-16 14:43:17 -0700130 timer_setup(&cb->timer, blk_stat_timer_fn, 0);
Omar Sandoval34dbad52017-03-21 08:56:08 -0700131
132 return cb;
133}
134EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
135
136void blk_stat_add_callback(struct request_queue *q,
137 struct blk_stat_callback *cb)
138{
139 unsigned int bucket;
140 int cpu;
141
142 for_each_possible_cpu(cpu) {
143 struct blk_rq_stat *cpu_stat;
144
145 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
146 for (bucket = 0; bucket < cb->buckets; bucket++)
147 blk_stat_init(&cpu_stat[bucket]);
148 }
149
150 spin_lock(&q->stats->lock);
151 list_add_tail_rcu(&cb->list, &q->stats->callbacks);
Bart Van Assche7dfdbc72018-03-07 17:10:05 -0800152 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
Omar Sandoval34dbad52017-03-21 08:56:08 -0700153 spin_unlock(&q->stats->lock);
154}
155EXPORT_SYMBOL_GPL(blk_stat_add_callback);
156
157void blk_stat_remove_callback(struct request_queue *q,
158 struct blk_stat_callback *cb)
159{
160 spin_lock(&q->stats->lock);
161 list_del_rcu(&cb->list);
Shaohua Lib9147dd2017-03-27 15:19:42 -0700162 if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
Bart Van Assche7dfdbc72018-03-07 17:10:05 -0800163 blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
Omar Sandoval34dbad52017-03-21 08:56:08 -0700164 spin_unlock(&q->stats->lock);
165
166 del_timer_sync(&cb->timer);
167}
168EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
169
170static void blk_stat_free_callback_rcu(struct rcu_head *head)
171{
172 struct blk_stat_callback *cb;
173
174 cb = container_of(head, struct blk_stat_callback, rcu);
175 free_percpu(cb->cpu_stat);
176 kfree(cb->stat);
177 kfree(cb);
178}
179
180void blk_stat_free_callback(struct blk_stat_callback *cb)
181{
Jens Axboea83b5762017-03-21 17:20:01 -0600182 if (cb)
183 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
Omar Sandoval34dbad52017-03-21 08:56:08 -0700184}
185EXPORT_SYMBOL_GPL(blk_stat_free_callback);
186
Shaohua Lib9147dd2017-03-27 15:19:42 -0700187void blk_stat_enable_accounting(struct request_queue *q)
188{
189 spin_lock(&q->stats->lock);
190 q->stats->enable_accounting = true;
Bart Van Assche7dfdbc72018-03-07 17:10:05 -0800191 blk_queue_flag_set(QUEUE_FLAG_STATS, q);
Shaohua Lib9147dd2017-03-27 15:19:42 -0700192 spin_unlock(&q->stats->lock);
193}
194
Omar Sandoval34dbad52017-03-21 08:56:08 -0700195struct blk_queue_stats *blk_alloc_queue_stats(void)
196{
197 struct blk_queue_stats *stats;
198
199 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
200 if (!stats)
201 return NULL;
202
203 INIT_LIST_HEAD(&stats->callbacks);
204 spin_lock_init(&stats->lock);
Shaohua Lib9147dd2017-03-27 15:19:42 -0700205 stats->enable_accounting = false;
Omar Sandoval34dbad52017-03-21 08:56:08 -0700206
207 return stats;
208}
209
210void blk_free_queue_stats(struct blk_queue_stats *stats)
211{
212 if (!stats)
213 return;
214
215 WARN_ON(!list_empty(&stats->callbacks));
216
217 kfree(stats);
Jens Axboecf43e6b2016-11-07 21:32:37 -0700218}