Jin Yao | 6041441 | 2019-11-07 15:47:14 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #include <stdlib.h> |
| 3 | #include <string.h> |
| 4 | #include <linux/zalloc.h> |
| 5 | #include "block-info.h" |
| 6 | #include "sort.h" |
| 7 | #include "annotate.h" |
| 8 | #include "symbol.h" |
Jin Yao | b65a7d3 | 2019-11-07 15:47:16 +0800 | [diff] [blame^] | 9 | #include "dso.h" |
| 10 | #include "map.h" |
| 11 | #include "srcline.h" |
| 12 | #include "evlist.h" |
| 13 | |
| 14 | static struct block_header_column { |
| 15 | const char *name; |
| 16 | int width; |
| 17 | } block_columns[PERF_HPP_REPORT__BLOCK_MAX_INDEX] = { |
| 18 | [PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT] = { |
| 19 | .name = "Sampled Cycles%", |
| 20 | .width = 15, |
| 21 | }, |
| 22 | [PERF_HPP_REPORT__BLOCK_LBR_CYCLES] = { |
| 23 | .name = "Sampled Cycles", |
| 24 | .width = 14, |
| 25 | }, |
| 26 | [PERF_HPP_REPORT__BLOCK_CYCLES_PCT] = { |
| 27 | .name = "Avg Cycles%", |
| 28 | .width = 11, |
| 29 | }, |
| 30 | [PERF_HPP_REPORT__BLOCK_AVG_CYCLES] = { |
| 31 | .name = "Avg Cycles", |
| 32 | .width = 10, |
| 33 | }, |
| 34 | [PERF_HPP_REPORT__BLOCK_RANGE] = { |
| 35 | .name = "[Program Block Range]", |
| 36 | .width = 70, |
| 37 | }, |
| 38 | [PERF_HPP_REPORT__BLOCK_DSO] = { |
| 39 | .name = "Shared Object", |
| 40 | .width = 20, |
| 41 | } |
| 42 | }; |
Jin Yao | 6041441 | 2019-11-07 15:47:14 +0800 | [diff] [blame] | 43 | |
| 44 | struct block_info *block_info__get(struct block_info *bi) |
| 45 | { |
| 46 | if (bi) |
| 47 | refcount_inc(&bi->refcnt); |
| 48 | return bi; |
| 49 | } |
| 50 | |
| 51 | void block_info__put(struct block_info *bi) |
| 52 | { |
| 53 | if (bi && refcount_dec_and_test(&bi->refcnt)) |
| 54 | free(bi); |
| 55 | } |
| 56 | |
| 57 | struct block_info *block_info__new(void) |
| 58 | { |
| 59 | struct block_info *bi = zalloc(sizeof(*bi)); |
| 60 | |
| 61 | if (bi) |
| 62 | refcount_set(&bi->refcnt, 1); |
| 63 | return bi; |
| 64 | } |
| 65 | |
| 66 | int64_t block_info__cmp(struct perf_hpp_fmt *fmt __maybe_unused, |
| 67 | struct hist_entry *left, struct hist_entry *right) |
| 68 | { |
| 69 | struct block_info *bi_l = left->block_info; |
| 70 | struct block_info *bi_r = right->block_info; |
| 71 | int cmp; |
| 72 | |
| 73 | if (!bi_l->sym || !bi_r->sym) { |
| 74 | if (!bi_l->sym && !bi_r->sym) |
| 75 | return 0; |
| 76 | else if (!bi_l->sym) |
| 77 | return -1; |
| 78 | else |
| 79 | return 1; |
| 80 | } |
| 81 | |
| 82 | if (bi_l->sym == bi_r->sym) { |
| 83 | if (bi_l->start == bi_r->start) { |
| 84 | if (bi_l->end == bi_r->end) |
| 85 | return 0; |
| 86 | else |
| 87 | return (int64_t)(bi_r->end - bi_l->end); |
| 88 | } else |
| 89 | return (int64_t)(bi_r->start - bi_l->start); |
| 90 | } else { |
| 91 | cmp = strcmp(bi_l->sym->name, bi_r->sym->name); |
| 92 | return cmp; |
| 93 | } |
| 94 | |
| 95 | if (bi_l->sym->start != bi_r->sym->start) |
| 96 | return (int64_t)(bi_r->sym->start - bi_l->sym->start); |
| 97 | |
| 98 | return (int64_t)(bi_r->sym->end - bi_l->sym->end); |
| 99 | } |
| 100 | |
| 101 | static void init_block_info(struct block_info *bi, struct symbol *sym, |
| 102 | struct cyc_hist *ch, int offset, |
| 103 | u64 total_cycles) |
| 104 | { |
| 105 | bi->sym = sym; |
| 106 | bi->start = ch->start; |
| 107 | bi->end = offset; |
| 108 | bi->cycles = ch->cycles; |
| 109 | bi->cycles_aggr = ch->cycles_aggr; |
| 110 | bi->num = ch->num; |
| 111 | bi->num_aggr = ch->num_aggr; |
| 112 | bi->total_cycles = total_cycles; |
| 113 | |
| 114 | memcpy(bi->cycles_spark, ch->cycles_spark, |
| 115 | NUM_SPARKS * sizeof(u64)); |
| 116 | } |
| 117 | |
| 118 | int block_info__process_sym(struct hist_entry *he, struct block_hist *bh, |
| 119 | u64 *block_cycles_aggr, u64 total_cycles) |
| 120 | { |
| 121 | struct annotation *notes; |
| 122 | struct cyc_hist *ch; |
| 123 | static struct addr_location al; |
| 124 | u64 cycles = 0; |
| 125 | |
| 126 | if (!he->ms.map || !he->ms.sym) |
| 127 | return 0; |
| 128 | |
| 129 | memset(&al, 0, sizeof(al)); |
| 130 | al.map = he->ms.map; |
| 131 | al.sym = he->ms.sym; |
| 132 | |
| 133 | notes = symbol__annotation(he->ms.sym); |
| 134 | if (!notes || !notes->src || !notes->src->cycles_hist) |
| 135 | return 0; |
| 136 | ch = notes->src->cycles_hist; |
| 137 | for (unsigned int i = 0; i < symbol__size(he->ms.sym); i++) { |
| 138 | if (ch[i].num_aggr) { |
| 139 | struct block_info *bi; |
| 140 | struct hist_entry *he_block; |
| 141 | |
| 142 | bi = block_info__new(); |
| 143 | if (!bi) |
| 144 | return -1; |
| 145 | |
| 146 | init_block_info(bi, he->ms.sym, &ch[i], i, |
| 147 | total_cycles); |
| 148 | cycles += bi->cycles_aggr / bi->num_aggr; |
| 149 | |
| 150 | he_block = hists__add_entry_block(&bh->block_hists, |
| 151 | &al, bi); |
| 152 | if (!he_block) { |
| 153 | block_info__put(bi); |
| 154 | return -1; |
| 155 | } |
| 156 | } |
| 157 | } |
| 158 | |
| 159 | if (block_cycles_aggr) |
| 160 | *block_cycles_aggr += cycles; |
| 161 | |
| 162 | return 0; |
| 163 | } |
Jin Yao | b65a7d3 | 2019-11-07 15:47:16 +0800 | [diff] [blame^] | 164 | |
| 165 | static int block_column_header(struct perf_hpp_fmt *fmt, |
| 166 | struct perf_hpp *hpp, |
| 167 | struct hists *hists __maybe_unused, |
| 168 | int line __maybe_unused, |
| 169 | int *span __maybe_unused) |
| 170 | { |
| 171 | struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt); |
| 172 | |
| 173 | return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, |
| 174 | block_fmt->header); |
| 175 | } |
| 176 | |
| 177 | static int block_column_width(struct perf_hpp_fmt *fmt, |
| 178 | struct perf_hpp *hpp __maybe_unused, |
| 179 | struct hists *hists __maybe_unused) |
| 180 | { |
| 181 | struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt); |
| 182 | |
| 183 | return block_fmt->width; |
| 184 | } |
| 185 | |
| 186 | static int block_total_cycles_pct_entry(struct perf_hpp_fmt *fmt, |
| 187 | struct perf_hpp *hpp, |
| 188 | struct hist_entry *he) |
| 189 | { |
| 190 | struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt); |
| 191 | struct block_info *bi = he->block_info; |
| 192 | double ratio = 0.0; |
| 193 | char buf[16]; |
| 194 | |
| 195 | if (block_fmt->total_cycles) |
| 196 | ratio = (double)bi->cycles / (double)block_fmt->total_cycles; |
| 197 | |
| 198 | sprintf(buf, "%.2f%%", 100.0 * ratio); |
| 199 | |
| 200 | return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf); |
| 201 | } |
| 202 | |
| 203 | static int64_t block_total_cycles_pct_sort(struct perf_hpp_fmt *fmt, |
| 204 | struct hist_entry *left, |
| 205 | struct hist_entry *right) |
| 206 | { |
| 207 | struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt); |
| 208 | struct block_info *bi_l = left->block_info; |
| 209 | struct block_info *bi_r = right->block_info; |
| 210 | double l, r; |
| 211 | |
| 212 | if (block_fmt->total_cycles) { |
| 213 | l = ((double)bi_l->cycles / |
| 214 | (double)block_fmt->total_cycles) * 100000.0; |
| 215 | r = ((double)bi_r->cycles / |
| 216 | (double)block_fmt->total_cycles) * 100000.0; |
| 217 | return (int64_t)l - (int64_t)r; |
| 218 | } |
| 219 | |
| 220 | return 0; |
| 221 | } |
| 222 | |
| 223 | static void cycles_string(u64 cycles, char *buf, int size) |
| 224 | { |
| 225 | if (cycles >= 1000000) |
| 226 | scnprintf(buf, size, "%.1fM", (double)cycles / 1000000.0); |
| 227 | else if (cycles >= 1000) |
| 228 | scnprintf(buf, size, "%.1fK", (double)cycles / 1000.0); |
| 229 | else |
| 230 | scnprintf(buf, size, "%1d", cycles); |
| 231 | } |
| 232 | |
| 233 | static int block_cycles_lbr_entry(struct perf_hpp_fmt *fmt, |
| 234 | struct perf_hpp *hpp, struct hist_entry *he) |
| 235 | { |
| 236 | struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt); |
| 237 | struct block_info *bi = he->block_info; |
| 238 | char cycles_buf[16]; |
| 239 | |
| 240 | cycles_string(bi->cycles_aggr, cycles_buf, sizeof(cycles_buf)); |
| 241 | |
| 242 | return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, |
| 243 | cycles_buf); |
| 244 | } |
| 245 | |
| 246 | static int block_cycles_pct_entry(struct perf_hpp_fmt *fmt, |
| 247 | struct perf_hpp *hpp, struct hist_entry *he) |
| 248 | { |
| 249 | struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt); |
| 250 | struct block_info *bi = he->block_info; |
| 251 | double ratio = 0.0; |
| 252 | u64 avg; |
| 253 | char buf[16]; |
| 254 | |
| 255 | if (block_fmt->block_cycles && bi->num_aggr) { |
| 256 | avg = bi->cycles_aggr / bi->num_aggr; |
| 257 | ratio = (double)avg / (double)block_fmt->block_cycles; |
| 258 | } |
| 259 | |
| 260 | sprintf(buf, "%.2f%%", 100.0 * ratio); |
| 261 | |
| 262 | return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf); |
| 263 | } |
| 264 | |
| 265 | static int block_avg_cycles_entry(struct perf_hpp_fmt *fmt, |
| 266 | struct perf_hpp *hpp, |
| 267 | struct hist_entry *he) |
| 268 | { |
| 269 | struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt); |
| 270 | struct block_info *bi = he->block_info; |
| 271 | char cycles_buf[16]; |
| 272 | |
| 273 | cycles_string(bi->cycles_aggr / bi->num_aggr, cycles_buf, |
| 274 | sizeof(cycles_buf)); |
| 275 | |
| 276 | return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, |
| 277 | cycles_buf); |
| 278 | } |
| 279 | |
| 280 | static int block_range_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, |
| 281 | struct hist_entry *he) |
| 282 | { |
| 283 | struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt); |
| 284 | struct block_info *bi = he->block_info; |
| 285 | char buf[128]; |
| 286 | char *start_line, *end_line; |
| 287 | |
| 288 | symbol_conf.disable_add2line_warn = true; |
| 289 | |
| 290 | start_line = map__srcline(he->ms.map, bi->sym->start + bi->start, |
| 291 | he->ms.sym); |
| 292 | |
| 293 | end_line = map__srcline(he->ms.map, bi->sym->start + bi->end, |
| 294 | he->ms.sym); |
| 295 | |
| 296 | if ((start_line != SRCLINE_UNKNOWN) && (end_line != SRCLINE_UNKNOWN)) { |
| 297 | scnprintf(buf, sizeof(buf), "[%s -> %s]", |
| 298 | start_line, end_line); |
| 299 | } else { |
| 300 | scnprintf(buf, sizeof(buf), "[%7lx -> %7lx]", |
| 301 | bi->start, bi->end); |
| 302 | } |
| 303 | |
| 304 | free_srcline(start_line); |
| 305 | free_srcline(end_line); |
| 306 | |
| 307 | return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf); |
| 308 | } |
| 309 | |
| 310 | static int block_dso_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, |
| 311 | struct hist_entry *he) |
| 312 | { |
| 313 | struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt); |
| 314 | struct map *map = he->ms.map; |
| 315 | |
| 316 | if (map && map->dso) { |
| 317 | return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, |
| 318 | map->dso->short_name); |
| 319 | } |
| 320 | |
| 321 | return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, |
| 322 | "[unknown]"); |
| 323 | } |
| 324 | |
| 325 | static void init_block_header(struct block_fmt *block_fmt) |
| 326 | { |
| 327 | struct perf_hpp_fmt *fmt = &block_fmt->fmt; |
| 328 | |
| 329 | BUG_ON(block_fmt->idx >= PERF_HPP_REPORT__BLOCK_MAX_INDEX); |
| 330 | |
| 331 | block_fmt->header = block_columns[block_fmt->idx].name; |
| 332 | block_fmt->width = block_columns[block_fmt->idx].width; |
| 333 | |
| 334 | fmt->header = block_column_header; |
| 335 | fmt->width = block_column_width; |
| 336 | } |
| 337 | |
| 338 | static void hpp_register(struct block_fmt *block_fmt, int idx, |
| 339 | struct perf_hpp_list *hpp_list) |
| 340 | { |
| 341 | struct perf_hpp_fmt *fmt = &block_fmt->fmt; |
| 342 | |
| 343 | block_fmt->idx = idx; |
| 344 | INIT_LIST_HEAD(&fmt->list); |
| 345 | INIT_LIST_HEAD(&fmt->sort_list); |
| 346 | |
| 347 | switch (idx) { |
| 348 | case PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT: |
| 349 | fmt->entry = block_total_cycles_pct_entry; |
| 350 | fmt->cmp = block_info__cmp; |
| 351 | fmt->sort = block_total_cycles_pct_sort; |
| 352 | break; |
| 353 | case PERF_HPP_REPORT__BLOCK_LBR_CYCLES: |
| 354 | fmt->entry = block_cycles_lbr_entry; |
| 355 | break; |
| 356 | case PERF_HPP_REPORT__BLOCK_CYCLES_PCT: |
| 357 | fmt->entry = block_cycles_pct_entry; |
| 358 | break; |
| 359 | case PERF_HPP_REPORT__BLOCK_AVG_CYCLES: |
| 360 | fmt->entry = block_avg_cycles_entry; |
| 361 | break; |
| 362 | case PERF_HPP_REPORT__BLOCK_RANGE: |
| 363 | fmt->entry = block_range_entry; |
| 364 | break; |
| 365 | case PERF_HPP_REPORT__BLOCK_DSO: |
| 366 | fmt->entry = block_dso_entry; |
| 367 | break; |
| 368 | default: |
| 369 | return; |
| 370 | } |
| 371 | |
| 372 | init_block_header(block_fmt); |
| 373 | perf_hpp_list__column_register(hpp_list, fmt); |
| 374 | } |
| 375 | |
| 376 | static void register_block_columns(struct perf_hpp_list *hpp_list, |
| 377 | struct block_fmt *block_fmts) |
| 378 | { |
| 379 | for (int i = 0; i < PERF_HPP_REPORT__BLOCK_MAX_INDEX; i++) |
| 380 | hpp_register(&block_fmts[i], i, hpp_list); |
| 381 | } |
| 382 | |
| 383 | static void init_block_hist(struct block_hist *bh, struct block_fmt *block_fmts) |
| 384 | { |
| 385 | __hists__init(&bh->block_hists, &bh->block_list); |
| 386 | perf_hpp_list__init(&bh->block_list); |
| 387 | bh->block_list.nr_header_lines = 1; |
| 388 | |
| 389 | register_block_columns(&bh->block_list, block_fmts); |
| 390 | |
| 391 | perf_hpp_list__register_sort_field(&bh->block_list, |
| 392 | &block_fmts[PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT].fmt); |
| 393 | } |
| 394 | |
| 395 | static void process_block_report(struct hists *hists, |
| 396 | struct block_report *block_report, |
| 397 | u64 total_cycles) |
| 398 | { |
| 399 | struct rb_node *next = rb_first_cached(&hists->entries); |
| 400 | struct block_hist *bh = &block_report->hist; |
| 401 | struct hist_entry *he; |
| 402 | |
| 403 | init_block_hist(bh, block_report->fmts); |
| 404 | |
| 405 | while (next) { |
| 406 | he = rb_entry(next, struct hist_entry, rb_node); |
| 407 | block_info__process_sym(he, bh, &block_report->cycles, |
| 408 | total_cycles); |
| 409 | next = rb_next(&he->rb_node); |
| 410 | } |
| 411 | |
| 412 | for (int i = 0; i < PERF_HPP_REPORT__BLOCK_MAX_INDEX; i++) { |
| 413 | block_report->fmts[i].total_cycles = total_cycles; |
| 414 | block_report->fmts[i].block_cycles = block_report->cycles; |
| 415 | } |
| 416 | |
| 417 | hists__output_resort(&bh->block_hists, NULL); |
| 418 | } |
| 419 | |
| 420 | struct block_report *block_info__create_report(struct evlist *evlist, |
| 421 | u64 total_cycles) |
| 422 | { |
| 423 | struct block_report *block_reports; |
| 424 | int nr_hists = evlist->core.nr_entries, i = 0; |
| 425 | struct evsel *pos; |
| 426 | |
| 427 | block_reports = calloc(nr_hists, sizeof(struct block_report)); |
| 428 | if (!block_reports) |
| 429 | return NULL; |
| 430 | |
| 431 | evlist__for_each_entry(evlist, pos) { |
| 432 | struct hists *hists = evsel__hists(pos); |
| 433 | |
| 434 | process_block_report(hists, &block_reports[i], total_cycles); |
| 435 | i++; |
| 436 | } |
| 437 | |
| 438 | return block_reports; |
| 439 | } |