blob: 0d8721a60db98c79a75ff462e952ed07e907aa92 [file] [log] [blame]
Jens Axboecf43e6b2016-11-07 21:32:37 -07001/*
2 * Block stat tracking code
3 *
4 * Copyright (C) 2016 Jens Axboe
5 */
6#include <linux/kernel.h>
Omar Sandoval34dbad52017-03-21 08:56:08 -07007#include <linux/rculist.h>
Jens Axboecf43e6b2016-11-07 21:32:37 -07008#include <linux/blk-mq.h>
9
10#include "blk-stat.h"
11#include "blk-mq.h"
12
Omar Sandoval48752532017-03-21 08:56:07 -070013#define BLK_RQ_STAT_BATCH 64
14
Omar Sandoval34dbad52017-03-21 08:56:08 -070015struct blk_queue_stats {
16 struct list_head callbacks;
17 spinlock_t lock;
18};
19
20unsigned int blk_stat_rq_ddir(const struct request *rq)
21{
22 return rq_data_dir(rq);
23}
24EXPORT_SYMBOL_GPL(blk_stat_rq_ddir);
25
26static void blk_stat_init(struct blk_rq_stat *stat)
27{
28 stat->min = -1ULL;
29 stat->max = stat->nr_samples = stat->mean = 0;
30 stat->batch = stat->nr_batch = 0;
31}
32
Jens Axboecf43e6b2016-11-07 21:32:37 -070033static void blk_stat_flush_batch(struct blk_rq_stat *stat)
34{
35 const s32 nr_batch = READ_ONCE(stat->nr_batch);
Shaohua Li209200e2016-12-02 17:13:09 -080036 const s32 nr_samples = READ_ONCE(stat->nr_samples);
Jens Axboecf43e6b2016-11-07 21:32:37 -070037
38 if (!nr_batch)
39 return;
40 if (!nr_samples)
41 stat->mean = div64_s64(stat->batch, nr_batch);
42 else {
43 stat->mean = div64_s64((stat->mean * nr_samples) +
44 stat->batch,
45 nr_batch + nr_samples);
46 }
47
48 stat->nr_samples += nr_batch;
49 stat->nr_batch = stat->batch = 0;
50}
51
52static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
53{
Omar Sandoval7d8d0012017-03-16 09:46:14 -060054 blk_stat_flush_batch(src);
55
Jens Axboecf43e6b2016-11-07 21:32:37 -070056 if (!src->nr_samples)
57 return;
58
Jens Axboecf43e6b2016-11-07 21:32:37 -070059 dst->min = min(dst->min, src->min);
60 dst->max = max(dst->max, src->max);
61
62 if (!dst->nr_samples)
63 dst->mean = src->mean;
64 else {
65 dst->mean = div64_s64((src->mean * src->nr_samples) +
66 (dst->mean * dst->nr_samples),
67 dst->nr_samples + src->nr_samples);
68 }
69 dst->nr_samples += src->nr_samples;
70}
71
Omar Sandoval34dbad52017-03-21 08:56:08 -070072static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
Jens Axboecf43e6b2016-11-07 21:32:37 -070073{
Omar Sandoval34dbad52017-03-21 08:56:08 -070074 stat->min = min(stat->min, value);
75 stat->max = max(stat->max, value);
Jens Axboecf43e6b2016-11-07 21:32:37 -070076
77 if (stat->batch + value < stat->batch ||
78 stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
79 blk_stat_flush_batch(stat);
80
81 stat->batch += value;
82 stat->nr_batch++;
83}
84
Omar Sandoval34dbad52017-03-21 08:56:08 -070085void blk_stat_add(struct request *rq)
Jens Axboecf43e6b2016-11-07 21:32:37 -070086{
Omar Sandoval34dbad52017-03-21 08:56:08 -070087 struct request_queue *q = rq->q;
88 struct blk_stat_callback *cb;
89 struct blk_rq_stat *stat;
90 int bucket;
91 s64 now, value;
Jens Axboecf43e6b2016-11-07 21:32:37 -070092
Omar Sandoval34dbad52017-03-21 08:56:08 -070093 now = __blk_stat_time(ktime_to_ns(ktime_get()));
94 if (now < blk_stat_time(&rq->issue_stat))
95 return;
96
97 value = now - blk_stat_time(&rq->issue_stat);
98
99 rcu_read_lock();
100 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
101 if (blk_stat_is_active(cb)) {
102 bucket = cb->bucket_fn(rq);
103 stat = &this_cpu_ptr(cb->cpu_stat)[bucket];
104 __blk_stat_add(stat, value);
Jens Axboecf43e6b2016-11-07 21:32:37 -0700105 }
Jens Axboecf43e6b2016-11-07 21:32:37 -0700106 }
Omar Sandoval34dbad52017-03-21 08:56:08 -0700107 rcu_read_unlock();
Jens Axboecf43e6b2016-11-07 21:32:37 -0700108}
109
Omar Sandoval34dbad52017-03-21 08:56:08 -0700110static void blk_stat_timer_fn(unsigned long data)
Jens Axboecf43e6b2016-11-07 21:32:37 -0700111{
Omar Sandoval34dbad52017-03-21 08:56:08 -0700112 struct blk_stat_callback *cb = (void *)data;
113 unsigned int bucket;
114 int cpu;
Jens Axboecf43e6b2016-11-07 21:32:37 -0700115
Omar Sandoval34dbad52017-03-21 08:56:08 -0700116 for (bucket = 0; bucket < cb->buckets; bucket++)
117 blk_stat_init(&cb->stat[bucket]);
118
119 for_each_online_cpu(cpu) {
120 struct blk_rq_stat *cpu_stat;
121
122 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
123 for (bucket = 0; bucket < cb->buckets; bucket++) {
124 blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
125 blk_stat_init(&cpu_stat[bucket]);
126 }
Jens Axboecf43e6b2016-11-07 21:32:37 -0700127 }
128
Omar Sandoval34dbad52017-03-21 08:56:08 -0700129 cb->timer_fn(cb);
130}
131
132struct blk_stat_callback *
133blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
134 unsigned int (*bucket_fn)(const struct request *),
135 unsigned int buckets, void *data)
136{
137 struct blk_stat_callback *cb;
138
139 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
140 if (!cb)
141 return NULL;
142
143 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
144 GFP_KERNEL);
145 if (!cb->stat) {
146 kfree(cb);
147 return NULL;
148 }
149 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
150 __alignof__(struct blk_rq_stat));
151 if (!cb->cpu_stat) {
152 kfree(cb->stat);
153 kfree(cb);
154 return NULL;
155 }
156
157 cb->timer_fn = timer_fn;
158 cb->bucket_fn = bucket_fn;
159 cb->data = data;
160 cb->buckets = buckets;
161 setup_timer(&cb->timer, blk_stat_timer_fn, (unsigned long)cb);
162
163 return cb;
164}
165EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
166
167void blk_stat_add_callback(struct request_queue *q,
168 struct blk_stat_callback *cb)
169{
170 unsigned int bucket;
171 int cpu;
172
173 for_each_possible_cpu(cpu) {
174 struct blk_rq_stat *cpu_stat;
175
176 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
177 for (bucket = 0; bucket < cb->buckets; bucket++)
178 blk_stat_init(&cpu_stat[bucket]);
179 }
180
181 spin_lock(&q->stats->lock);
182 list_add_tail_rcu(&cb->list, &q->stats->callbacks);
183 set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
184 spin_unlock(&q->stats->lock);
185}
186EXPORT_SYMBOL_GPL(blk_stat_add_callback);
187
188void blk_stat_remove_callback(struct request_queue *q,
189 struct blk_stat_callback *cb)
190{
191 spin_lock(&q->stats->lock);
192 list_del_rcu(&cb->list);
193 if (list_empty(&q->stats->callbacks))
194 clear_bit(QUEUE_FLAG_STATS, &q->queue_flags);
195 spin_unlock(&q->stats->lock);
196
197 del_timer_sync(&cb->timer);
198}
199EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
200
201static void blk_stat_free_callback_rcu(struct rcu_head *head)
202{
203 struct blk_stat_callback *cb;
204
205 cb = container_of(head, struct blk_stat_callback, rcu);
206 free_percpu(cb->cpu_stat);
207 kfree(cb->stat);
208 kfree(cb);
209}
210
211void blk_stat_free_callback(struct blk_stat_callback *cb)
212{
213 call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
214}
215EXPORT_SYMBOL_GPL(blk_stat_free_callback);
216
217struct blk_queue_stats *blk_alloc_queue_stats(void)
218{
219 struct blk_queue_stats *stats;
220
221 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
222 if (!stats)
223 return NULL;
224
225 INIT_LIST_HEAD(&stats->callbacks);
226 spin_lock_init(&stats->lock);
227
228 return stats;
229}
230
231void blk_free_queue_stats(struct blk_queue_stats *stats)
232{
233 if (!stats)
234 return;
235
236 WARN_ON(!list_empty(&stats->callbacks));
237
238 kfree(stats);
Jens Axboecf43e6b2016-11-07 21:32:37 -0700239}