Daeho Jeong | 5211874 | 2021-08-19 20:52:28 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * f2fs iostat support |
| 4 | * |
| 5 | * Copyright 2021 Google LLC |
| 6 | * Author: Daeho Jeong <daehojeong@google.com> |
| 7 | */ |
| 8 | |
| 9 | #include <linux/fs.h> |
| 10 | #include <linux/f2fs_fs.h> |
| 11 | #include <linux/seq_file.h> |
| 12 | |
| 13 | #include "f2fs.h" |
| 14 | #include "iostat.h" |
| 15 | #include <trace/events/f2fs.h> |
| 16 | |
Daeho Jeong | a4b6817 | 2021-08-20 15:29:09 -0700 | [diff] [blame] | 17 | #define NUM_PREALLOC_IOSTAT_CTXS 128 |
| 18 | static struct kmem_cache *bio_iostat_ctx_cache; |
| 19 | static mempool_t *bio_iostat_ctx_pool; |
| 20 | |
Daeho Jeong | 5211874 | 2021-08-19 20:52:28 -0700 | [diff] [blame] | 21 | int __maybe_unused iostat_info_seq_show(struct seq_file *seq, void *offset) |
| 22 | { |
| 23 | struct super_block *sb = seq->private; |
| 24 | struct f2fs_sb_info *sbi = F2FS_SB(sb); |
| 25 | time64_t now = ktime_get_real_seconds(); |
| 26 | |
| 27 | if (!sbi->iostat_enable) |
| 28 | return 0; |
| 29 | |
| 30 | seq_printf(seq, "time: %-16llu\n", now); |
| 31 | |
| 32 | /* print app write IOs */ |
| 33 | seq_puts(seq, "[WRITE]\n"); |
| 34 | seq_printf(seq, "app buffered: %-16llu\n", |
| 35 | sbi->rw_iostat[APP_BUFFERED_IO]); |
| 36 | seq_printf(seq, "app direct: %-16llu\n", |
| 37 | sbi->rw_iostat[APP_DIRECT_IO]); |
| 38 | seq_printf(seq, "app mapped: %-16llu\n", |
| 39 | sbi->rw_iostat[APP_MAPPED_IO]); |
| 40 | |
| 41 | /* print fs write IOs */ |
| 42 | seq_printf(seq, "fs data: %-16llu\n", |
| 43 | sbi->rw_iostat[FS_DATA_IO]); |
| 44 | seq_printf(seq, "fs node: %-16llu\n", |
| 45 | sbi->rw_iostat[FS_NODE_IO]); |
| 46 | seq_printf(seq, "fs meta: %-16llu\n", |
| 47 | sbi->rw_iostat[FS_META_IO]); |
| 48 | seq_printf(seq, "fs gc data: %-16llu\n", |
| 49 | sbi->rw_iostat[FS_GC_DATA_IO]); |
| 50 | seq_printf(seq, "fs gc node: %-16llu\n", |
| 51 | sbi->rw_iostat[FS_GC_NODE_IO]); |
| 52 | seq_printf(seq, "fs cp data: %-16llu\n", |
| 53 | sbi->rw_iostat[FS_CP_DATA_IO]); |
| 54 | seq_printf(seq, "fs cp node: %-16llu\n", |
| 55 | sbi->rw_iostat[FS_CP_NODE_IO]); |
| 56 | seq_printf(seq, "fs cp meta: %-16llu\n", |
| 57 | sbi->rw_iostat[FS_CP_META_IO]); |
| 58 | |
| 59 | /* print app read IOs */ |
| 60 | seq_puts(seq, "[READ]\n"); |
| 61 | seq_printf(seq, "app buffered: %-16llu\n", |
| 62 | sbi->rw_iostat[APP_BUFFERED_READ_IO]); |
| 63 | seq_printf(seq, "app direct: %-16llu\n", |
| 64 | sbi->rw_iostat[APP_DIRECT_READ_IO]); |
| 65 | seq_printf(seq, "app mapped: %-16llu\n", |
| 66 | sbi->rw_iostat[APP_MAPPED_READ_IO]); |
| 67 | |
| 68 | /* print fs read IOs */ |
| 69 | seq_printf(seq, "fs data: %-16llu\n", |
| 70 | sbi->rw_iostat[FS_DATA_READ_IO]); |
| 71 | seq_printf(seq, "fs gc data: %-16llu\n", |
| 72 | sbi->rw_iostat[FS_GDATA_READ_IO]); |
| 73 | seq_printf(seq, "fs compr_data: %-16llu\n", |
| 74 | sbi->rw_iostat[FS_CDATA_READ_IO]); |
| 75 | seq_printf(seq, "fs node: %-16llu\n", |
| 76 | sbi->rw_iostat[FS_NODE_READ_IO]); |
| 77 | seq_printf(seq, "fs meta: %-16llu\n", |
| 78 | sbi->rw_iostat[FS_META_READ_IO]); |
| 79 | |
| 80 | /* print other IOs */ |
| 81 | seq_puts(seq, "[OTHER]\n"); |
| 82 | seq_printf(seq, "fs discard: %-16llu\n", |
| 83 | sbi->rw_iostat[FS_DISCARD]); |
| 84 | |
| 85 | return 0; |
| 86 | } |
| 87 | |
Daeho Jeong | a4b6817 | 2021-08-20 15:29:09 -0700 | [diff] [blame] | 88 | static inline void __record_iostat_latency(struct f2fs_sb_info *sbi) |
| 89 | { |
| 90 | int io, idx = 0; |
| 91 | unsigned int cnt; |
| 92 | struct f2fs_iostat_latency iostat_lat[MAX_IO_TYPE][NR_PAGE_TYPE]; |
| 93 | struct iostat_lat_info *io_lat = sbi->iostat_io_lat; |
| 94 | |
| 95 | spin_lock_irq(&sbi->iostat_lat_lock); |
| 96 | for (idx = 0; idx < MAX_IO_TYPE; idx++) { |
| 97 | for (io = 0; io < NR_PAGE_TYPE; io++) { |
| 98 | cnt = io_lat->bio_cnt[idx][io]; |
| 99 | iostat_lat[idx][io].peak_lat = |
| 100 | jiffies_to_msecs(io_lat->peak_lat[idx][io]); |
| 101 | iostat_lat[idx][io].cnt = cnt; |
| 102 | iostat_lat[idx][io].avg_lat = cnt ? |
| 103 | jiffies_to_msecs(io_lat->sum_lat[idx][io]) / cnt : 0; |
| 104 | io_lat->sum_lat[idx][io] = 0; |
| 105 | io_lat->peak_lat[idx][io] = 0; |
| 106 | io_lat->bio_cnt[idx][io] = 0; |
| 107 | } |
| 108 | } |
| 109 | spin_unlock_irq(&sbi->iostat_lat_lock); |
| 110 | |
| 111 | trace_f2fs_iostat_latency(sbi, iostat_lat); |
| 112 | } |
| 113 | |
Daeho Jeong | 5211874 | 2021-08-19 20:52:28 -0700 | [diff] [blame] | 114 | static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi) |
| 115 | { |
| 116 | unsigned long long iostat_diff[NR_IO_TYPE]; |
| 117 | int i; |
| 118 | |
| 119 | if (time_is_after_jiffies(sbi->iostat_next_period)) |
| 120 | return; |
| 121 | |
| 122 | /* Need double check under the lock */ |
| 123 | spin_lock(&sbi->iostat_lock); |
| 124 | if (time_is_after_jiffies(sbi->iostat_next_period)) { |
| 125 | spin_unlock(&sbi->iostat_lock); |
| 126 | return; |
| 127 | } |
| 128 | sbi->iostat_next_period = jiffies + |
| 129 | msecs_to_jiffies(sbi->iostat_period_ms); |
| 130 | |
| 131 | for (i = 0; i < NR_IO_TYPE; i++) { |
| 132 | iostat_diff[i] = sbi->rw_iostat[i] - |
| 133 | sbi->prev_rw_iostat[i]; |
| 134 | sbi->prev_rw_iostat[i] = sbi->rw_iostat[i]; |
| 135 | } |
| 136 | spin_unlock(&sbi->iostat_lock); |
| 137 | |
| 138 | trace_f2fs_iostat(sbi, iostat_diff); |
Daeho Jeong | a4b6817 | 2021-08-20 15:29:09 -0700 | [diff] [blame] | 139 | |
| 140 | __record_iostat_latency(sbi); |
Daeho Jeong | 5211874 | 2021-08-19 20:52:28 -0700 | [diff] [blame] | 141 | } |
| 142 | |
| 143 | void f2fs_reset_iostat(struct f2fs_sb_info *sbi) |
| 144 | { |
Daeho Jeong | a4b6817 | 2021-08-20 15:29:09 -0700 | [diff] [blame] | 145 | struct iostat_lat_info *io_lat = sbi->iostat_io_lat; |
Daeho Jeong | 5211874 | 2021-08-19 20:52:28 -0700 | [diff] [blame] | 146 | int i; |
| 147 | |
| 148 | spin_lock(&sbi->iostat_lock); |
| 149 | for (i = 0; i < NR_IO_TYPE; i++) { |
| 150 | sbi->rw_iostat[i] = 0; |
| 151 | sbi->prev_rw_iostat[i] = 0; |
| 152 | } |
| 153 | spin_unlock(&sbi->iostat_lock); |
Daeho Jeong | a4b6817 | 2021-08-20 15:29:09 -0700 | [diff] [blame] | 154 | |
| 155 | spin_lock_irq(&sbi->iostat_lat_lock); |
| 156 | memset(io_lat, 0, sizeof(struct iostat_lat_info)); |
| 157 | spin_unlock_irq(&sbi->iostat_lat_lock); |
Daeho Jeong | 5211874 | 2021-08-19 20:52:28 -0700 | [diff] [blame] | 158 | } |
| 159 | |
| 160 | void f2fs_update_iostat(struct f2fs_sb_info *sbi, |
| 161 | enum iostat_type type, unsigned long long io_bytes) |
| 162 | { |
| 163 | if (!sbi->iostat_enable) |
| 164 | return; |
| 165 | |
| 166 | spin_lock(&sbi->iostat_lock); |
| 167 | sbi->rw_iostat[type] += io_bytes; |
| 168 | |
| 169 | if (type == APP_WRITE_IO || type == APP_DIRECT_IO) |
| 170 | sbi->rw_iostat[APP_BUFFERED_IO] = |
| 171 | sbi->rw_iostat[APP_WRITE_IO] - |
| 172 | sbi->rw_iostat[APP_DIRECT_IO]; |
| 173 | |
| 174 | if (type == APP_READ_IO || type == APP_DIRECT_READ_IO) |
| 175 | sbi->rw_iostat[APP_BUFFERED_READ_IO] = |
| 176 | sbi->rw_iostat[APP_READ_IO] - |
| 177 | sbi->rw_iostat[APP_DIRECT_READ_IO]; |
| 178 | spin_unlock(&sbi->iostat_lock); |
| 179 | |
| 180 | f2fs_record_iostat(sbi); |
| 181 | } |
| 182 | |
Daeho Jeong | a4b6817 | 2021-08-20 15:29:09 -0700 | [diff] [blame] | 183 | static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx, |
| 184 | int rw, bool is_sync) |
| 185 | { |
| 186 | unsigned long ts_diff; |
| 187 | unsigned int iotype = iostat_ctx->type; |
| 188 | unsigned long flags; |
| 189 | struct f2fs_sb_info *sbi = iostat_ctx->sbi; |
| 190 | struct iostat_lat_info *io_lat = sbi->iostat_io_lat; |
| 191 | int idx; |
| 192 | |
| 193 | if (!sbi->iostat_enable) |
| 194 | return; |
| 195 | |
| 196 | ts_diff = jiffies - iostat_ctx->submit_ts; |
| 197 | if (iotype >= META_FLUSH) |
| 198 | iotype = META; |
| 199 | |
| 200 | if (rw == 0) { |
| 201 | idx = READ_IO; |
| 202 | } else { |
| 203 | if (is_sync) |
| 204 | idx = WRITE_SYNC_IO; |
| 205 | else |
| 206 | idx = WRITE_ASYNC_IO; |
| 207 | } |
| 208 | |
| 209 | spin_lock_irqsave(&sbi->iostat_lat_lock, flags); |
| 210 | io_lat->sum_lat[idx][iotype] += ts_diff; |
| 211 | io_lat->bio_cnt[idx][iotype]++; |
| 212 | if (ts_diff > io_lat->peak_lat[idx][iotype]) |
| 213 | io_lat->peak_lat[idx][iotype] = ts_diff; |
| 214 | spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags); |
| 215 | } |
| 216 | |
| 217 | void iostat_update_and_unbind_ctx(struct bio *bio, int rw) |
| 218 | { |
| 219 | struct bio_iostat_ctx *iostat_ctx = bio->bi_private; |
| 220 | bool is_sync = bio->bi_opf & REQ_SYNC; |
| 221 | |
| 222 | if (rw == 0) |
| 223 | bio->bi_private = iostat_ctx->post_read_ctx; |
| 224 | else |
| 225 | bio->bi_private = iostat_ctx->sbi; |
| 226 | __update_iostat_latency(iostat_ctx, rw, is_sync); |
| 227 | mempool_free(iostat_ctx, bio_iostat_ctx_pool); |
| 228 | } |
| 229 | |
| 230 | void iostat_alloc_and_bind_ctx(struct f2fs_sb_info *sbi, |
| 231 | struct bio *bio, struct bio_post_read_ctx *ctx) |
| 232 | { |
| 233 | struct bio_iostat_ctx *iostat_ctx; |
| 234 | /* Due to the mempool, this never fails. */ |
| 235 | iostat_ctx = mempool_alloc(bio_iostat_ctx_pool, GFP_NOFS); |
| 236 | iostat_ctx->sbi = sbi; |
| 237 | iostat_ctx->submit_ts = 0; |
| 238 | iostat_ctx->type = 0; |
| 239 | iostat_ctx->post_read_ctx = ctx; |
| 240 | bio->bi_private = iostat_ctx; |
| 241 | } |
| 242 | |
| 243 | int __init f2fs_init_iostat_processing(void) |
| 244 | { |
| 245 | bio_iostat_ctx_cache = |
| 246 | kmem_cache_create("f2fs_bio_iostat_ctx", |
| 247 | sizeof(struct bio_iostat_ctx), 0, 0, NULL); |
| 248 | if (!bio_iostat_ctx_cache) |
| 249 | goto fail; |
| 250 | bio_iostat_ctx_pool = |
| 251 | mempool_create_slab_pool(NUM_PREALLOC_IOSTAT_CTXS, |
| 252 | bio_iostat_ctx_cache); |
| 253 | if (!bio_iostat_ctx_pool) |
| 254 | goto fail_free_cache; |
| 255 | return 0; |
| 256 | |
| 257 | fail_free_cache: |
| 258 | kmem_cache_destroy(bio_iostat_ctx_cache); |
| 259 | fail: |
| 260 | return -ENOMEM; |
| 261 | } |
| 262 | |
| 263 | void f2fs_destroy_iostat_processing(void) |
| 264 | { |
| 265 | mempool_destroy(bio_iostat_ctx_pool); |
| 266 | kmem_cache_destroy(bio_iostat_ctx_cache); |
| 267 | } |
| 268 | |
Daeho Jeong | 5211874 | 2021-08-19 20:52:28 -0700 | [diff] [blame] | 269 | int f2fs_init_iostat(struct f2fs_sb_info *sbi) |
| 270 | { |
| 271 | /* init iostat info */ |
| 272 | spin_lock_init(&sbi->iostat_lock); |
Daeho Jeong | a4b6817 | 2021-08-20 15:29:09 -0700 | [diff] [blame] | 273 | spin_lock_init(&sbi->iostat_lat_lock); |
Daeho Jeong | 5211874 | 2021-08-19 20:52:28 -0700 | [diff] [blame] | 274 | sbi->iostat_enable = false; |
| 275 | sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS; |
Daeho Jeong | a4b6817 | 2021-08-20 15:29:09 -0700 | [diff] [blame] | 276 | sbi->iostat_io_lat = f2fs_kzalloc(sbi, sizeof(struct iostat_lat_info), |
| 277 | GFP_KERNEL); |
| 278 | if (!sbi->iostat_io_lat) |
| 279 | return -ENOMEM; |
Daeho Jeong | 5211874 | 2021-08-19 20:52:28 -0700 | [diff] [blame] | 280 | |
| 281 | return 0; |
| 282 | } |
Daeho Jeong | a4b6817 | 2021-08-20 15:29:09 -0700 | [diff] [blame] | 283 | |
| 284 | void f2fs_destroy_iostat(struct f2fs_sb_info *sbi) |
| 285 | { |
| 286 | kfree(sbi->iostat_io_lat); |
| 287 | } |