Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Block stat tracking code |
| 3 | * |
| 4 | * Copyright (C) 2016 Jens Axboe |
| 5 | */ |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/blk-mq.h> |
| 8 | |
| 9 | #include "blk-stat.h" |
| 10 | #include "blk-mq.h" |
| 11 | |
Omar Sandoval | 4875253 | 2017-03-21 08:56:07 -0700 | [diff] [blame^] | 12 | #define BLK_RQ_STAT_BATCH 64 |
| 13 | |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 14 | static void blk_stat_flush_batch(struct blk_rq_stat *stat) |
| 15 | { |
| 16 | const s32 nr_batch = READ_ONCE(stat->nr_batch); |
Shaohua Li | 209200e | 2016-12-02 17:13:09 -0800 | [diff] [blame] | 17 | const s32 nr_samples = READ_ONCE(stat->nr_samples); |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 18 | |
| 19 | if (!nr_batch) |
| 20 | return; |
| 21 | if (!nr_samples) |
| 22 | stat->mean = div64_s64(stat->batch, nr_batch); |
| 23 | else { |
| 24 | stat->mean = div64_s64((stat->mean * nr_samples) + |
| 25 | stat->batch, |
| 26 | nr_batch + nr_samples); |
| 27 | } |
| 28 | |
| 29 | stat->nr_samples += nr_batch; |
| 30 | stat->nr_batch = stat->batch = 0; |
| 31 | } |
| 32 | |
| 33 | static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) |
| 34 | { |
Omar Sandoval | 7d8d001 | 2017-03-16 09:46:14 -0600 | [diff] [blame] | 35 | blk_stat_flush_batch(src); |
| 36 | |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 37 | if (!src->nr_samples) |
| 38 | return; |
| 39 | |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 40 | dst->min = min(dst->min, src->min); |
| 41 | dst->max = max(dst->max, src->max); |
| 42 | |
| 43 | if (!dst->nr_samples) |
| 44 | dst->mean = src->mean; |
| 45 | else { |
| 46 | dst->mean = div64_s64((src->mean * src->nr_samples) + |
| 47 | (dst->mean * dst->nr_samples), |
| 48 | dst->nr_samples + src->nr_samples); |
| 49 | } |
| 50 | dst->nr_samples += src->nr_samples; |
| 51 | } |
| 52 | |
| 53 | static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst) |
| 54 | { |
| 55 | struct blk_mq_hw_ctx *hctx; |
| 56 | struct blk_mq_ctx *ctx; |
| 57 | uint64_t latest = 0; |
| 58 | int i, j, nr; |
| 59 | |
Omar Sandoval | fa2e39c | 2017-03-21 08:56:06 -0700 | [diff] [blame] | 60 | blk_stat_init(&dst[READ]); |
| 61 | blk_stat_init(&dst[WRITE]); |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 62 | |
| 63 | nr = 0; |
| 64 | do { |
| 65 | uint64_t newest = 0; |
| 66 | |
| 67 | queue_for_each_hw_ctx(q, hctx, i) { |
| 68 | hctx_for_each_ctx(hctx, ctx, j) { |
Omar Sandoval | fa2e39c | 2017-03-21 08:56:06 -0700 | [diff] [blame] | 69 | blk_stat_flush_batch(&ctx->stat[READ]); |
| 70 | blk_stat_flush_batch(&ctx->stat[WRITE]); |
Jens Axboe | 7cd54aa | 2016-12-09 13:08:35 -0700 | [diff] [blame] | 71 | |
Omar Sandoval | fa2e39c | 2017-03-21 08:56:06 -0700 | [diff] [blame] | 72 | if (!ctx->stat[READ].nr_samples && |
| 73 | !ctx->stat[WRITE].nr_samples) |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 74 | continue; |
Omar Sandoval | fa2e39c | 2017-03-21 08:56:06 -0700 | [diff] [blame] | 75 | if (ctx->stat[READ].time > newest) |
| 76 | newest = ctx->stat[READ].time; |
| 77 | if (ctx->stat[WRITE].time > newest) |
| 78 | newest = ctx->stat[WRITE].time; |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 79 | } |
| 80 | } |
| 81 | |
| 82 | /* |
| 83 | * No samples |
| 84 | */ |
| 85 | if (!newest) |
| 86 | break; |
| 87 | |
| 88 | if (newest > latest) |
| 89 | latest = newest; |
| 90 | |
| 91 | queue_for_each_hw_ctx(q, hctx, i) { |
| 92 | hctx_for_each_ctx(hctx, ctx, j) { |
Omar Sandoval | fa2e39c | 2017-03-21 08:56:06 -0700 | [diff] [blame] | 93 | if (ctx->stat[READ].time == newest) { |
| 94 | blk_stat_sum(&dst[READ], |
| 95 | &ctx->stat[READ]); |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 96 | nr++; |
| 97 | } |
Omar Sandoval | fa2e39c | 2017-03-21 08:56:06 -0700 | [diff] [blame] | 98 | if (ctx->stat[WRITE].time == newest) { |
| 99 | blk_stat_sum(&dst[WRITE], |
| 100 | &ctx->stat[WRITE]); |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 101 | nr++; |
| 102 | } |
| 103 | } |
| 104 | } |
| 105 | /* |
| 106 | * If we race on finding an entry, just loop back again. |
| 107 | * Should be very rare. |
| 108 | */ |
| 109 | } while (!nr); |
| 110 | |
Omar Sandoval | fa2e39c | 2017-03-21 08:56:06 -0700 | [diff] [blame] | 111 | dst[READ].time = dst[WRITE].time = latest; |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst) |
| 115 | { |
| 116 | if (q->mq_ops) |
| 117 | blk_mq_stat_get(q, dst); |
| 118 | else { |
Omar Sandoval | fa2e39c | 2017-03-21 08:56:06 -0700 | [diff] [blame] | 119 | blk_stat_flush_batch(&q->rq_stats[READ]); |
| 120 | blk_stat_flush_batch(&q->rq_stats[WRITE]); |
| 121 | memcpy(&dst[READ], &q->rq_stats[READ], |
| 122 | sizeof(struct blk_rq_stat)); |
| 123 | memcpy(&dst[WRITE], &q->rq_stats[WRITE], |
| 124 | sizeof(struct blk_rq_stat)); |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 125 | } |
| 126 | } |
| 127 | |
| 128 | void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst) |
| 129 | { |
| 130 | struct blk_mq_ctx *ctx; |
| 131 | unsigned int i, nr; |
| 132 | |
| 133 | nr = 0; |
| 134 | do { |
| 135 | uint64_t newest = 0; |
| 136 | |
| 137 | hctx_for_each_ctx(hctx, ctx, i) { |
Omar Sandoval | fa2e39c | 2017-03-21 08:56:06 -0700 | [diff] [blame] | 138 | blk_stat_flush_batch(&ctx->stat[READ]); |
| 139 | blk_stat_flush_batch(&ctx->stat[WRITE]); |
Jens Axboe | 7cd54aa | 2016-12-09 13:08:35 -0700 | [diff] [blame] | 140 | |
Omar Sandoval | fa2e39c | 2017-03-21 08:56:06 -0700 | [diff] [blame] | 141 | if (!ctx->stat[READ].nr_samples && |
| 142 | !ctx->stat[WRITE].nr_samples) |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 143 | continue; |
| 144 | |
Omar Sandoval | fa2e39c | 2017-03-21 08:56:06 -0700 | [diff] [blame] | 145 | if (ctx->stat[READ].time > newest) |
| 146 | newest = ctx->stat[READ].time; |
| 147 | if (ctx->stat[WRITE].time > newest) |
| 148 | newest = ctx->stat[WRITE].time; |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 149 | } |
| 150 | |
| 151 | if (!newest) |
| 152 | break; |
| 153 | |
| 154 | hctx_for_each_ctx(hctx, ctx, i) { |
Omar Sandoval | fa2e39c | 2017-03-21 08:56:06 -0700 | [diff] [blame] | 155 | if (ctx->stat[READ].time == newest) { |
| 156 | blk_stat_sum(&dst[READ], &ctx->stat[READ]); |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 157 | nr++; |
| 158 | } |
Omar Sandoval | fa2e39c | 2017-03-21 08:56:06 -0700 | [diff] [blame] | 159 | if (ctx->stat[WRITE].time == newest) { |
| 160 | blk_stat_sum(&dst[WRITE], &ctx->stat[WRITE]); |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 161 | nr++; |
| 162 | } |
| 163 | } |
| 164 | /* |
| 165 | * If we race on finding an entry, just loop back again. |
| 166 | * Should be very rare, as the window is only updated |
| 167 | * occasionally |
| 168 | */ |
| 169 | } while (!nr); |
| 170 | } |
| 171 | |
| 172 | static void __blk_stat_init(struct blk_rq_stat *stat, s64 time_now) |
| 173 | { |
| 174 | stat->min = -1ULL; |
| 175 | stat->max = stat->nr_samples = stat->mean = 0; |
| 176 | stat->batch = stat->nr_batch = 0; |
| 177 | stat->time = time_now & BLK_STAT_NSEC_MASK; |
| 178 | } |
| 179 | |
| 180 | void blk_stat_init(struct blk_rq_stat *stat) |
| 181 | { |
| 182 | __blk_stat_init(stat, ktime_to_ns(ktime_get())); |
| 183 | } |
| 184 | |
| 185 | static bool __blk_stat_is_current(struct blk_rq_stat *stat, s64 now) |
| 186 | { |
| 187 | return (now & BLK_STAT_NSEC_MASK) == (stat->time & BLK_STAT_NSEC_MASK); |
| 188 | } |
| 189 | |
| 190 | bool blk_stat_is_current(struct blk_rq_stat *stat) |
| 191 | { |
| 192 | return __blk_stat_is_current(stat, ktime_to_ns(ktime_get())); |
| 193 | } |
| 194 | |
| 195 | void blk_stat_add(struct blk_rq_stat *stat, struct request *rq) |
| 196 | { |
| 197 | s64 now, value; |
| 198 | |
| 199 | now = __blk_stat_time(ktime_to_ns(ktime_get())); |
| 200 | if (now < blk_stat_time(&rq->issue_stat)) |
| 201 | return; |
| 202 | |
| 203 | if (!__blk_stat_is_current(stat, now)) |
| 204 | __blk_stat_init(stat, now); |
| 205 | |
| 206 | value = now - blk_stat_time(&rq->issue_stat); |
| 207 | if (value > stat->max) |
| 208 | stat->max = value; |
| 209 | if (value < stat->min) |
| 210 | stat->min = value; |
| 211 | |
| 212 | if (stat->batch + value < stat->batch || |
| 213 | stat->nr_batch + 1 == BLK_RQ_STAT_BATCH) |
| 214 | blk_stat_flush_batch(stat); |
| 215 | |
| 216 | stat->batch += value; |
| 217 | stat->nr_batch++; |
| 218 | } |
| 219 | |
| 220 | void blk_stat_clear(struct request_queue *q) |
| 221 | { |
| 222 | if (q->mq_ops) { |
| 223 | struct blk_mq_hw_ctx *hctx; |
| 224 | struct blk_mq_ctx *ctx; |
| 225 | int i, j; |
| 226 | |
| 227 | queue_for_each_hw_ctx(q, hctx, i) { |
| 228 | hctx_for_each_ctx(hctx, ctx, j) { |
Omar Sandoval | fa2e39c | 2017-03-21 08:56:06 -0700 | [diff] [blame] | 229 | blk_stat_init(&ctx->stat[READ]); |
| 230 | blk_stat_init(&ctx->stat[WRITE]); |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 231 | } |
| 232 | } |
| 233 | } else { |
Omar Sandoval | fa2e39c | 2017-03-21 08:56:06 -0700 | [diff] [blame] | 234 | blk_stat_init(&q->rq_stats[READ]); |
| 235 | blk_stat_init(&q->rq_stats[WRITE]); |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 236 | } |
| 237 | } |
| 238 | |
| 239 | void blk_stat_set_issue_time(struct blk_issue_stat *stat) |
| 240 | { |
| 241 | stat->time = (stat->time & BLK_STAT_MASK) | |
| 242 | (ktime_to_ns(ktime_get()) & BLK_STAT_TIME_MASK); |
| 243 | } |
| 244 | |
| 245 | /* |
| 246 | * Enable stat tracking, return whether it was enabled |
| 247 | */ |
| 248 | bool blk_stat_enable(struct request_queue *q) |
| 249 | { |
| 250 | if (!test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { |
| 251 | set_bit(QUEUE_FLAG_STATS, &q->queue_flags); |
| 252 | return false; |
| 253 | } |
| 254 | |
| 255 | return true; |
| 256 | } |