blob: 656065af497129509e0b8f980ac2e0fac6a050bb [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Jiri Olsaf87027b2015-06-03 16:25:59 +02002#include <stdio.h>
3#include "evsel.h"
4#include "stat.h"
5#include "color.h"
Andi Kleenfb4605b2016-03-01 10:57:52 -08006#include "pmu.h"
Andi Kleen37932c12017-03-20 13:17:08 -07007#include "rblist.h"
8#include "evlist.h"
9#include "expr.h"
Andi Kleenb18f3e32017-08-31 12:40:31 -070010#include "metricgroup.h"
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030011#include <linux/zalloc.h>
Jiri Olsaf87027b2015-06-03 16:25:59 +020012
Andi Kleen44d49a62016-02-29 14:36:22 -080013/*
14 * AGGR_GLOBAL: Use CPU 0
15 * AGGR_SOCKET: Use first CPU of socket
Kan Liangdb5742b2019-06-04 15:50:42 -070016 * AGGR_DIE: Use first CPU of die
Andi Kleen44d49a62016-02-29 14:36:22 -080017 * AGGR_CORE: Use first CPU of core
18 * AGGR_NONE: Use matching CPU
19 * AGGR_THREAD: Not supported?
20 */
Andi Kleenfb4605b2016-03-01 10:57:52 -080021static bool have_frontend_stalled;
Jiri Olsaf87027b2015-06-03 16:25:59 +020022
Jin Yao8efb2df2017-12-05 22:03:03 +080023struct runtime_stat rt_stat;
Jiri Olsaf87027b2015-06-03 16:25:59 +020024struct stats walltime_nsecs_stats;
25
Andi Kleen37932c12017-03-20 13:17:08 -070026struct saved_value {
27 struct rb_node rb_node;
28 struct perf_evsel *evsel;
Jin Yao49cd4562017-12-05 22:03:02 +080029 enum stat_type type;
30 int ctx;
Andi Kleen37932c12017-03-20 13:17:08 -070031 int cpu;
Jin Yao49cd4562017-12-05 22:03:02 +080032 struct runtime_stat *stat;
Andi Kleen37932c12017-03-20 13:17:08 -070033 struct stats stats;
34};
35
36static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
37{
38 struct saved_value *a = container_of(rb_node,
39 struct saved_value,
40 rb_node);
41 const struct saved_value *b = entry;
42
Andi Kleen37932c12017-03-20 13:17:08 -070043 if (a->cpu != b->cpu)
44 return a->cpu - b->cpu;
Jin Yao49cd4562017-12-05 22:03:02 +080045
46 /*
47 * Previously the rbtree was used to link generic metrics.
48 * The keys were evsel/cpu. Now the rbtree is extended to support
49 * per-thread shadow stats. For shadow stats case, the keys
50 * are cpu/type/ctx/stat (evsel is NULL). For generic metrics
51 * case, the keys are still evsel/cpu (type/ctx/stat are 0 or NULL).
52 */
53 if (a->type != b->type)
54 return a->type - b->type;
55
56 if (a->ctx != b->ctx)
57 return a->ctx - b->ctx;
58
59 if (a->evsel == NULL && b->evsel == NULL) {
60 if (a->stat == b->stat)
61 return 0;
62
63 if ((char *)a->stat < (char *)b->stat)
64 return -1;
65
66 return 1;
67 }
68
Andi Kleen5e976652017-07-24 16:40:03 -070069 if (a->evsel == b->evsel)
70 return 0;
71 if ((char *)a->evsel < (char *)b->evsel)
72 return -1;
73 return +1;
Andi Kleen37932c12017-03-20 13:17:08 -070074}
75
76static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
77 const void *entry)
78{
79 struct saved_value *nd = malloc(sizeof(struct saved_value));
80
81 if (!nd)
82 return NULL;
83 memcpy(nd, entry, sizeof(struct saved_value));
84 return &nd->rb_node;
85}
86
Jin Yaob984aff2017-12-01 18:57:28 +080087static void saved_value_delete(struct rblist *rblist __maybe_unused,
88 struct rb_node *rb_node)
89{
90 struct saved_value *v;
91
92 BUG_ON(!rb_node);
93 v = container_of(rb_node, struct saved_value, rb_node);
94 free(v);
95}
96
Andi Kleen37932c12017-03-20 13:17:08 -070097static struct saved_value *saved_value_lookup(struct perf_evsel *evsel,
Andi Kleen4e1a0962017-08-31 12:40:33 -070098 int cpu,
Jin Yao1fcd0392017-12-05 22:03:04 +080099 bool create,
100 enum stat_type type,
101 int ctx,
102 struct runtime_stat *st)
Andi Kleen37932c12017-03-20 13:17:08 -0700103{
Jin Yao1fcd0392017-12-05 22:03:04 +0800104 struct rblist *rblist;
Andi Kleen37932c12017-03-20 13:17:08 -0700105 struct rb_node *nd;
106 struct saved_value dm = {
107 .cpu = cpu,
Andi Kleen37932c12017-03-20 13:17:08 -0700108 .evsel = evsel,
Jin Yao1fcd0392017-12-05 22:03:04 +0800109 .type = type,
110 .ctx = ctx,
111 .stat = st,
Andi Kleen37932c12017-03-20 13:17:08 -0700112 };
Jin Yao1fcd0392017-12-05 22:03:04 +0800113
114 rblist = &st->value_list;
115
116 nd = rblist__find(rblist, &dm);
Andi Kleen37932c12017-03-20 13:17:08 -0700117 if (nd)
118 return container_of(nd, struct saved_value, rb_node);
119 if (create) {
Jin Yao1fcd0392017-12-05 22:03:04 +0800120 rblist__add_node(rblist, &dm);
121 nd = rblist__find(rblist, &dm);
Andi Kleen37932c12017-03-20 13:17:08 -0700122 if (nd)
123 return container_of(nd, struct saved_value, rb_node);
124 }
125 return NULL;
126}
127
Jin Yao8efb2df2017-12-05 22:03:03 +0800128void runtime_stat__init(struct runtime_stat *st)
129{
130 struct rblist *rblist = &st->value_list;
131
132 rblist__init(rblist);
133 rblist->node_cmp = saved_value_cmp;
134 rblist->node_new = saved_value_new;
135 rblist->node_delete = saved_value_delete;
136}
137
138void runtime_stat__exit(struct runtime_stat *st)
139{
140 rblist__exit(&st->value_list);
141}
142
Andi Kleenfb4605b2016-03-01 10:57:52 -0800143void perf_stat__init_shadow_stats(void)
144{
145 have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
Jin Yao8efb2df2017-12-05 22:03:03 +0800146 runtime_stat__init(&rt_stat);
Andi Kleenfb4605b2016-03-01 10:57:52 -0800147}
148
Jiri Olsaf87027b2015-06-03 16:25:59 +0200149static int evsel_context(struct perf_evsel *evsel)
150{
151 int ctx = 0;
152
153 if (evsel->attr.exclude_kernel)
154 ctx |= CTX_BIT_KERNEL;
155 if (evsel->attr.exclude_user)
156 ctx |= CTX_BIT_USER;
157 if (evsel->attr.exclude_hv)
158 ctx |= CTX_BIT_HV;
159 if (evsel->attr.exclude_host)
160 ctx |= CTX_BIT_HOST;
161 if (evsel->attr.exclude_idle)
162 ctx |= CTX_BIT_IDLE;
163
164 return ctx;
165}
166
Jin Yao6a1e2c52017-12-05 22:03:06 +0800167static void reset_stat(struct runtime_stat *st)
Jiri Olsaf87027b2015-06-03 16:25:59 +0200168{
Jin Yao6a1e2c52017-12-05 22:03:06 +0800169 struct rblist *rblist;
Andi Kleen37932c12017-03-20 13:17:08 -0700170 struct rb_node *pos, *next;
171
Jin Yao6a1e2c52017-12-05 22:03:06 +0800172 rblist = &st->value_list;
Davidlohr Buesoca227022018-12-06 11:18:16 -0800173 next = rb_first_cached(&rblist->entries);
Andi Kleen37932c12017-03-20 13:17:08 -0700174 while (next) {
175 pos = next;
176 next = rb_next(pos);
177 memset(&container_of(pos, struct saved_value, rb_node)->stats,
178 0,
179 sizeof(struct stats));
180 }
Jiri Olsaf87027b2015-06-03 16:25:59 +0200181}
182
Jin Yao6a1e2c52017-12-05 22:03:06 +0800183void perf_stat__reset_shadow_stats(void)
184{
185 reset_stat(&rt_stat);
186 memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
187}
188
189void perf_stat__reset_shadow_per_stat(struct runtime_stat *st)
190{
191 reset_stat(st);
192}
193
Jin Yao1fcd0392017-12-05 22:03:04 +0800194static void update_runtime_stat(struct runtime_stat *st,
195 enum stat_type type,
196 int ctx, int cpu, u64 count)
197{
198 struct saved_value *v = saved_value_lookup(NULL, cpu, true,
199 type, ctx, st);
200
201 if (v)
202 update_stats(&v->stats, count);
203}
204
Jiri Olsaf87027b2015-06-03 16:25:59 +0200205/*
206 * Update various tracking values we maintain to print
207 * more semantic information such as miss/hit ratios,
208 * instruction rates, etc:
209 */
Jiri Olsa54830dd2017-01-23 22:42:56 +0100210void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
Jin Yao1fcd0392017-12-05 22:03:04 +0800211 int cpu, struct runtime_stat *st)
Jiri Olsaf87027b2015-06-03 16:25:59 +0200212{
213 int ctx = evsel_context(counter);
Ravi Bangoria57ddf092018-11-16 09:58:43 +0530214 u64 count_ns = count;
Jiri Olsaf87027b2015-06-03 16:25:59 +0200215
Jiri Olsa54830dd2017-01-23 22:42:56 +0100216 count *= counter->scale;
217
Ravi Bangoriaeb08d002018-11-15 15:25:32 +0530218 if (perf_evsel__is_clock(counter))
Ravi Bangoria57ddf092018-11-16 09:58:43 +0530219 update_runtime_stat(st, STAT_NSECS, 0, cpu, count_ns);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200220 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
Jin Yao1fcd0392017-12-05 22:03:04 +0800221 update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200222 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
Jin Yao1fcd0392017-12-05 22:03:04 +0800223 update_runtime_stat(st, STAT_CYCLES_IN_TX, ctx, cpu, count);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200224 else if (perf_stat_evsel__is(counter, TRANSACTION_START))
Jin Yao1fcd0392017-12-05 22:03:04 +0800225 update_runtime_stat(st, STAT_TRANSACTION, ctx, cpu, count);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200226 else if (perf_stat_evsel__is(counter, ELISION_START))
Jin Yao1fcd0392017-12-05 22:03:04 +0800227 update_runtime_stat(st, STAT_ELISION, ctx, cpu, count);
Andi Kleen239bd472016-05-24 12:52:37 -0700228 else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
Jin Yao1fcd0392017-12-05 22:03:04 +0800229 update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS,
230 ctx, cpu, count);
Andi Kleen239bd472016-05-24 12:52:37 -0700231 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
Jin Yao1fcd0392017-12-05 22:03:04 +0800232 update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED,
233 ctx, cpu, count);
Andi Kleen239bd472016-05-24 12:52:37 -0700234 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
Jin Yao1fcd0392017-12-05 22:03:04 +0800235 update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED,
236 ctx, cpu, count);
Andi Kleen239bd472016-05-24 12:52:37 -0700237 else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
Jin Yao1fcd0392017-12-05 22:03:04 +0800238 update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES,
239 ctx, cpu, count);
Andi Kleen239bd472016-05-24 12:52:37 -0700240 else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
Jin Yao1fcd0392017-12-05 22:03:04 +0800241 update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES,
242 ctx, cpu, count);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200243 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
Jin Yao1fcd0392017-12-05 22:03:04 +0800244 update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
245 ctx, cpu, count);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200246 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
Jin Yao1fcd0392017-12-05 22:03:04 +0800247 update_runtime_stat(st, STAT_STALLED_CYCLES_BACK,
248 ctx, cpu, count);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200249 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
Jin Yao1fcd0392017-12-05 22:03:04 +0800250 update_runtime_stat(st, STAT_BRANCHES, ctx, cpu, count);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200251 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
Jin Yao1fcd0392017-12-05 22:03:04 +0800252 update_runtime_stat(st, STAT_CACHEREFS, ctx, cpu, count);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200253 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
Jin Yao1fcd0392017-12-05 22:03:04 +0800254 update_runtime_stat(st, STAT_L1_DCACHE, ctx, cpu, count);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200255 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
Jin Yao1fcd0392017-12-05 22:03:04 +0800256 update_runtime_stat(st, STAT_L1_ICACHE, ctx, cpu, count);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200257 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
Jin Yao1fcd0392017-12-05 22:03:04 +0800258 update_runtime_stat(st, STAT_LL_CACHE, ctx, cpu, count);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200259 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
Jin Yao1fcd0392017-12-05 22:03:04 +0800260 update_runtime_stat(st, STAT_DTLB_CACHE, ctx, cpu, count);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200261 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
Jin Yao1fcd0392017-12-05 22:03:04 +0800262 update_runtime_stat(st, STAT_ITLB_CACHE, ctx, cpu, count);
Kan Liangdaefd0b2017-05-26 12:05:38 -0700263 else if (perf_stat_evsel__is(counter, SMI_NUM))
Jin Yao1fcd0392017-12-05 22:03:04 +0800264 update_runtime_stat(st, STAT_SMI_NUM, ctx, cpu, count);
Kan Liangdaefd0b2017-05-26 12:05:38 -0700265 else if (perf_stat_evsel__is(counter, APERF))
Jin Yao1fcd0392017-12-05 22:03:04 +0800266 update_runtime_stat(st, STAT_APERF, ctx, cpu, count);
Andi Kleen37932c12017-03-20 13:17:08 -0700267
268 if (counter->collect_stat) {
Jin Yao1fcd0392017-12-05 22:03:04 +0800269 struct saved_value *v = saved_value_lookup(counter, cpu, true,
270 STAT_NONE, 0, st);
Jiri Olsa54830dd2017-01-23 22:42:56 +0100271 update_stats(&v->stats, count);
Andi Kleen37932c12017-03-20 13:17:08 -0700272 }
Jiri Olsaf87027b2015-06-03 16:25:59 +0200273}
274
275/* used for get_ratio_color() */
276enum grc_type {
277 GRC_STALLED_CYCLES_FE,
278 GRC_STALLED_CYCLES_BE,
279 GRC_CACHE_MISSES,
280 GRC_MAX_NR
281};
282
283static const char *get_ratio_color(enum grc_type type, double ratio)
284{
285 static const double grc_table[GRC_MAX_NR][3] = {
286 [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
287 [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
288 [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 },
289 };
290 const char *color = PERF_COLOR_NORMAL;
291
292 if (ratio > grc_table[type][0])
293 color = PERF_COLOR_RED;
294 else if (ratio > grc_table[type][1])
295 color = PERF_COLOR_MAGENTA;
296 else if (ratio > grc_table[type][2])
297 color = PERF_COLOR_YELLOW;
298
299 return color;
300}
301
Andi Kleen37932c12017-03-20 13:17:08 -0700302static struct perf_evsel *perf_stat__find_event(struct perf_evlist *evsel_list,
303 const char *name)
304{
305 struct perf_evsel *c2;
306
307 evlist__for_each_entry (evsel_list, c2) {
Andi Kleen145c4072019-06-24 12:37:08 -0700308 if (!strcasecmp(c2->name, name) && !c2->collect_stat)
Andi Kleen37932c12017-03-20 13:17:08 -0700309 return c2;
310 }
311 return NULL;
312}
313
314/* Mark MetricExpr target events and link events using them to them. */
315void perf_stat__collect_metric_expr(struct perf_evlist *evsel_list)
316{
317 struct perf_evsel *counter, *leader, **metric_events, *oc;
318 bool found;
319 const char **metric_names;
320 int i;
321 int num_metric_names;
322
323 evlist__for_each_entry(evsel_list, counter) {
324 bool invalid = false;
325
326 leader = counter->leader;
327 if (!counter->metric_expr)
328 continue;
329 metric_events = counter->metric_events;
330 if (!metric_events) {
331 if (expr__find_other(counter->metric_expr, counter->name,
332 &metric_names, &num_metric_names) < 0)
333 continue;
334
335 metric_events = calloc(sizeof(struct perf_evsel *),
336 num_metric_names + 1);
337 if (!metric_events)
338 return;
339 counter->metric_events = metric_events;
340 }
341
342 for (i = 0; i < num_metric_names; i++) {
343 found = false;
344 if (leader) {
345 /* Search in group */
346 for_each_group_member (oc, leader) {
Andi Kleen145c4072019-06-24 12:37:08 -0700347 if (!strcasecmp(oc->name, metric_names[i]) &&
348 !oc->collect_stat) {
Andi Kleen37932c12017-03-20 13:17:08 -0700349 found = true;
350 break;
351 }
352 }
353 }
354 if (!found) {
355 /* Search ignoring groups */
356 oc = perf_stat__find_event(evsel_list, metric_names[i]);
357 }
358 if (!oc) {
359 /* Deduping one is good enough to handle duplicated PMUs. */
360 static char *printed;
361
362 /*
363 * Adding events automatically would be difficult, because
364 * it would risk creating groups that are not schedulable.
365 * perf stat doesn't understand all the scheduling constraints
366 * of events. So we ask the user instead to add the missing
367 * events.
368 */
369 if (!printed || strcasecmp(printed, metric_names[i])) {
370 fprintf(stderr,
371 "Add %s event to groups to get metric expression for %s\n",
372 metric_names[i],
373 counter->name);
374 printed = strdup(metric_names[i]);
375 }
376 invalid = true;
377 continue;
378 }
379 metric_events[i] = oc;
380 oc->collect_stat = true;
381 }
382 metric_events[i] = NULL;
383 free(metric_names);
384 if (invalid) {
385 free(metric_events);
386 counter->metric_events = NULL;
387 counter->metric_expr = NULL;
388 }
389 }
390}
391
Jin Yaoe0128b32017-12-05 22:03:05 +0800392static double runtime_stat_avg(struct runtime_stat *st,
393 enum stat_type type, int ctx, int cpu)
394{
395 struct saved_value *v;
396
397 v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
398 if (!v)
399 return 0.0;
400
401 return avg_stats(&v->stats);
402}
403
404static double runtime_stat_n(struct runtime_stat *st,
405 enum stat_type type, int ctx, int cpu)
406{
407 struct saved_value *v;
408
409 v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
410 if (!v)
411 return 0.0;
412
413 return v->stats.n;
414}
415
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200416static void print_stalled_cycles_frontend(struct perf_stat_config *config,
417 int cpu,
Arnaldo Carvalho de Melob8f8eb82016-03-22 13:09:37 -0300418 struct perf_evsel *evsel, double avg,
Jin Yaoe0128b32017-12-05 22:03:05 +0800419 struct perf_stat_output_ctx *out,
420 struct runtime_stat *st)
Jiri Olsaf87027b2015-06-03 16:25:59 +0200421{
422 double total, ratio = 0.0;
423 const char *color;
424 int ctx = evsel_context(evsel);
425
Jin Yaoe0128b32017-12-05 22:03:05 +0800426 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200427
428 if (total)
429 ratio = avg / total * 100.0;
430
431 color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
432
Andi Kleen140aead2016-01-30 09:06:49 -0800433 if (ratio)
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200434 out->print_metric(config, out->ctx, color, "%7.2f%%", "frontend cycles idle",
Andi Kleen140aead2016-01-30 09:06:49 -0800435 ratio);
436 else
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200437 out->print_metric(config, out->ctx, NULL, NULL, "frontend cycles idle", 0);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200438}
439
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200440static void print_stalled_cycles_backend(struct perf_stat_config *config,
441 int cpu,
Arnaldo Carvalho de Melob8f8eb82016-03-22 13:09:37 -0300442 struct perf_evsel *evsel, double avg,
Jin Yaoe0128b32017-12-05 22:03:05 +0800443 struct perf_stat_output_ctx *out,
444 struct runtime_stat *st)
Jiri Olsaf87027b2015-06-03 16:25:59 +0200445{
446 double total, ratio = 0.0;
447 const char *color;
448 int ctx = evsel_context(evsel);
449
Jin Yaoe0128b32017-12-05 22:03:05 +0800450 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200451
452 if (total)
453 ratio = avg / total * 100.0;
454
455 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
456
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200457 out->print_metric(config, out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200458}
459
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200460static void print_branch_misses(struct perf_stat_config *config,
461 int cpu,
Arnaldo Carvalho de Melob8f8eb82016-03-22 13:09:37 -0300462 struct perf_evsel *evsel,
Andi Kleen140aead2016-01-30 09:06:49 -0800463 double avg,
Jin Yaoe0128b32017-12-05 22:03:05 +0800464 struct perf_stat_output_ctx *out,
465 struct runtime_stat *st)
Jiri Olsaf87027b2015-06-03 16:25:59 +0200466{
467 double total, ratio = 0.0;
468 const char *color;
469 int ctx = evsel_context(evsel);
470
Jin Yaoe0128b32017-12-05 22:03:05 +0800471 total = runtime_stat_avg(st, STAT_BRANCHES, ctx, cpu);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200472
473 if (total)
474 ratio = avg / total * 100.0;
475
476 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
477
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200478 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all branches", ratio);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200479}
480
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200481static void print_l1_dcache_misses(struct perf_stat_config *config,
482 int cpu,
Arnaldo Carvalho de Melob8f8eb82016-03-22 13:09:37 -0300483 struct perf_evsel *evsel,
Andi Kleen140aead2016-01-30 09:06:49 -0800484 double avg,
Jin Yaoe0128b32017-12-05 22:03:05 +0800485 struct perf_stat_output_ctx *out,
486 struct runtime_stat *st)
487
Jiri Olsaf87027b2015-06-03 16:25:59 +0200488{
489 double total, ratio = 0.0;
490 const char *color;
491 int ctx = evsel_context(evsel);
492
Jin Yaoe0128b32017-12-05 22:03:05 +0800493 total = runtime_stat_avg(st, STAT_L1_DCACHE, ctx, cpu);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200494
495 if (total)
496 ratio = avg / total * 100.0;
497
498 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
499
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200500 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-dcache hits", ratio);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200501}
502
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200503static void print_l1_icache_misses(struct perf_stat_config *config,
504 int cpu,
Arnaldo Carvalho de Melob8f8eb82016-03-22 13:09:37 -0300505 struct perf_evsel *evsel,
Andi Kleen140aead2016-01-30 09:06:49 -0800506 double avg,
Jin Yaoe0128b32017-12-05 22:03:05 +0800507 struct perf_stat_output_ctx *out,
508 struct runtime_stat *st)
509
Jiri Olsaf87027b2015-06-03 16:25:59 +0200510{
511 double total, ratio = 0.0;
512 const char *color;
513 int ctx = evsel_context(evsel);
514
Jin Yaoe0128b32017-12-05 22:03:05 +0800515 total = runtime_stat_avg(st, STAT_L1_ICACHE, ctx, cpu);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200516
517 if (total)
518 ratio = avg / total * 100.0;
519
520 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200521 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-icache hits", ratio);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200522}
523
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200524static void print_dtlb_cache_misses(struct perf_stat_config *config,
525 int cpu,
Arnaldo Carvalho de Melob8f8eb82016-03-22 13:09:37 -0300526 struct perf_evsel *evsel,
Andi Kleen140aead2016-01-30 09:06:49 -0800527 double avg,
Jin Yaoe0128b32017-12-05 22:03:05 +0800528 struct perf_stat_output_ctx *out,
529 struct runtime_stat *st)
Jiri Olsaf87027b2015-06-03 16:25:59 +0200530{
531 double total, ratio = 0.0;
532 const char *color;
533 int ctx = evsel_context(evsel);
534
Jin Yaoe0128b32017-12-05 22:03:05 +0800535 total = runtime_stat_avg(st, STAT_DTLB_CACHE, ctx, cpu);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200536
537 if (total)
538 ratio = avg / total * 100.0;
539
540 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200541 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all dTLB cache hits", ratio);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200542}
543
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200544static void print_itlb_cache_misses(struct perf_stat_config *config,
545 int cpu,
Arnaldo Carvalho de Melob8f8eb82016-03-22 13:09:37 -0300546 struct perf_evsel *evsel,
Andi Kleen140aead2016-01-30 09:06:49 -0800547 double avg,
Jin Yaoe0128b32017-12-05 22:03:05 +0800548 struct perf_stat_output_ctx *out,
549 struct runtime_stat *st)
Jiri Olsaf87027b2015-06-03 16:25:59 +0200550{
551 double total, ratio = 0.0;
552 const char *color;
553 int ctx = evsel_context(evsel);
554
Jin Yaoe0128b32017-12-05 22:03:05 +0800555 total = runtime_stat_avg(st, STAT_ITLB_CACHE, ctx, cpu);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200556
557 if (total)
558 ratio = avg / total * 100.0;
559
560 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200561 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all iTLB cache hits", ratio);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200562}
563
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200564static void print_ll_cache_misses(struct perf_stat_config *config,
565 int cpu,
Arnaldo Carvalho de Melob8f8eb82016-03-22 13:09:37 -0300566 struct perf_evsel *evsel,
Andi Kleen140aead2016-01-30 09:06:49 -0800567 double avg,
Jin Yaoe0128b32017-12-05 22:03:05 +0800568 struct perf_stat_output_ctx *out,
569 struct runtime_stat *st)
Jiri Olsaf87027b2015-06-03 16:25:59 +0200570{
571 double total, ratio = 0.0;
572 const char *color;
573 int ctx = evsel_context(evsel);
574
Jin Yaoe0128b32017-12-05 22:03:05 +0800575 total = runtime_stat_avg(st, STAT_LL_CACHE, ctx, cpu);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200576
577 if (total)
578 ratio = avg / total * 100.0;
579
580 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200581 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200582}
583
Andi Kleen239bd472016-05-24 12:52:37 -0700584/*
585 * High level "TopDown" CPU core pipe line bottleneck break down.
586 *
587 * Basic concept following
588 * Yasin, A Top Down Method for Performance analysis and Counter architecture
589 * ISPASS14
590 *
591 * The CPU pipeline is divided into 4 areas that can be bottlenecks:
592 *
593 * Frontend -> Backend -> Retiring
594 * BadSpeculation in addition means out of order execution that is thrown away
595 * (for example branch mispredictions)
596 * Frontend is instruction decoding.
597 * Backend is execution, like computation and accessing data in memory
598 * Retiring is good execution that is not directly bottlenecked
599 *
600 * The formulas are computed in slots.
601 * A slot is an entry in the pipeline each for the pipeline width
602 * (for example a 4-wide pipeline has 4 slots for each cycle)
603 *
604 * Formulas:
605 * BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) /
606 * TotalSlots
607 * Retiring = SlotsRetired / TotalSlots
608 * FrontendBound = FetchBubbles / TotalSlots
609 * BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound
610 *
611 * The kernel provides the mapping to the low level CPU events and any scaling
612 * needed for the CPU pipeline width, for example:
613 *
614 * TotalSlots = Cycles * 4
615 *
616 * The scaling factor is communicated in the sysfs unit.
617 *
618 * In some cases the CPU may not be able to measure all the formulas due to
619 * missing events. In this case multiple formulas are combined, as possible.
620 *
621 * Full TopDown supports more levels to sub-divide each area: for example
622 * BackendBound into computing bound and memory bound. For now we only
623 * support Level 1 TopDown.
624 */
625
626static double sanitize_val(double x)
627{
628 if (x < 0 && x >= -0.02)
629 return 0.0;
630 return x;
631}
632
Jin Yaoe0128b32017-12-05 22:03:05 +0800633static double td_total_slots(int ctx, int cpu, struct runtime_stat *st)
Andi Kleen239bd472016-05-24 12:52:37 -0700634{
Jin Yaoe0128b32017-12-05 22:03:05 +0800635 return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, ctx, cpu);
Andi Kleen239bd472016-05-24 12:52:37 -0700636}
637
Jin Yaoe0128b32017-12-05 22:03:05 +0800638static double td_bad_spec(int ctx, int cpu, struct runtime_stat *st)
Andi Kleen239bd472016-05-24 12:52:37 -0700639{
640 double bad_spec = 0;
641 double total_slots;
642 double total;
643
Jin Yaoe0128b32017-12-05 22:03:05 +0800644 total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, ctx, cpu) -
645 runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, ctx, cpu) +
646 runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, ctx, cpu);
647
648 total_slots = td_total_slots(ctx, cpu, st);
Andi Kleen239bd472016-05-24 12:52:37 -0700649 if (total_slots)
650 bad_spec = total / total_slots;
651 return sanitize_val(bad_spec);
652}
653
Jin Yaoe0128b32017-12-05 22:03:05 +0800654static double td_retiring(int ctx, int cpu, struct runtime_stat *st)
Andi Kleen239bd472016-05-24 12:52:37 -0700655{
656 double retiring = 0;
Jin Yaoe0128b32017-12-05 22:03:05 +0800657 double total_slots = td_total_slots(ctx, cpu, st);
658 double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED,
659 ctx, cpu);
Andi Kleen239bd472016-05-24 12:52:37 -0700660
661 if (total_slots)
662 retiring = ret_slots / total_slots;
663 return retiring;
664}
665
Jin Yaoe0128b32017-12-05 22:03:05 +0800666static double td_fe_bound(int ctx, int cpu, struct runtime_stat *st)
Andi Kleen239bd472016-05-24 12:52:37 -0700667{
668 double fe_bound = 0;
Jin Yaoe0128b32017-12-05 22:03:05 +0800669 double total_slots = td_total_slots(ctx, cpu, st);
670 double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES,
671 ctx, cpu);
Andi Kleen239bd472016-05-24 12:52:37 -0700672
673 if (total_slots)
674 fe_bound = fetch_bub / total_slots;
675 return fe_bound;
676}
677
Jin Yaoe0128b32017-12-05 22:03:05 +0800678static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
Andi Kleen239bd472016-05-24 12:52:37 -0700679{
Jin Yaoe0128b32017-12-05 22:03:05 +0800680 double sum = (td_fe_bound(ctx, cpu, st) +
681 td_bad_spec(ctx, cpu, st) +
682 td_retiring(ctx, cpu, st));
Andi Kleen239bd472016-05-24 12:52:37 -0700683 if (sum == 0)
684 return 0;
685 return sanitize_val(1.0 - sum);
686}
687
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200688static void print_smi_cost(struct perf_stat_config *config,
689 int cpu, struct perf_evsel *evsel,
Jin Yaoe0128b32017-12-05 22:03:05 +0800690 struct perf_stat_output_ctx *out,
691 struct runtime_stat *st)
Kan Liangdaefd0b2017-05-26 12:05:38 -0700692{
693 double smi_num, aperf, cycles, cost = 0.0;
694 int ctx = evsel_context(evsel);
695 const char *color = NULL;
696
Jin Yaoe0128b32017-12-05 22:03:05 +0800697 smi_num = runtime_stat_avg(st, STAT_SMI_NUM, ctx, cpu);
698 aperf = runtime_stat_avg(st, STAT_APERF, ctx, cpu);
699 cycles = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
Kan Liangdaefd0b2017-05-26 12:05:38 -0700700
701 if ((cycles == 0) || (aperf == 0))
702 return;
703
704 if (smi_num)
705 cost = (aperf - cycles) / aperf * 100.00;
706
707 if (cost > 10)
708 color = PERF_COLOR_RED;
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200709 out->print_metric(config, out->ctx, color, "%8.1f%%", "SMI cycles%", cost);
710 out->print_metric(config, out->ctx, NULL, "%4.0f", "SMI#", smi_num);
Kan Liangdaefd0b2017-05-26 12:05:38 -0700711}
712
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200713static void generic_metric(struct perf_stat_config *config,
714 const char *metric_expr,
Andi Kleenbba49af2017-08-31 12:40:28 -0700715 struct perf_evsel **metric_events,
716 char *name,
717 const char *metric_name,
718 double avg,
719 int cpu,
Jin Yaoe0128b32017-12-05 22:03:05 +0800720 struct perf_stat_output_ctx *out,
721 struct runtime_stat *st)
Andi Kleenbba49af2017-08-31 12:40:28 -0700722{
723 print_metric_t print_metric = out->print_metric;
724 struct parse_ctx pctx;
725 double ratio;
726 int i;
727 void *ctxp = out->ctx;
Andi Kleene3a94272019-06-24 12:37:11 -0700728 char *n, *pn;
Andi Kleenbba49af2017-08-31 12:40:28 -0700729
730 expr__ctx_init(&pctx);
731 expr__add_id(&pctx, name, avg);
732 for (i = 0; metric_events[i]; i++) {
733 struct saved_value *v;
Andi Kleenfd48aad2017-08-31 12:40:34 -0700734 struct stats *stats;
735 double scale;
Andi Kleenbba49af2017-08-31 12:40:28 -0700736
Andi Kleenfd48aad2017-08-31 12:40:34 -0700737 if (!strcmp(metric_events[i]->name, "duration_time")) {
738 stats = &walltime_nsecs_stats;
739 scale = 1e-9;
740 } else {
Jin Yao1fcd0392017-12-05 22:03:04 +0800741 v = saved_value_lookup(metric_events[i], cpu, false,
Jin Yaoe0128b32017-12-05 22:03:05 +0800742 STAT_NONE, 0, st);
Andi Kleenfd48aad2017-08-31 12:40:34 -0700743 if (!v)
744 break;
745 stats = &v->stats;
746 scale = 1.0;
747 }
Andi Kleene3a94272019-06-24 12:37:11 -0700748
749 n = strdup(metric_events[i]->name);
750 if (!n)
751 return;
752 /*
753 * This display code with --no-merge adds [cpu] postfixes.
754 * These are not supported by the parser. Remove everything
755 * after the space.
756 */
757 pn = strchr(n, ' ');
758 if (pn)
759 *pn = 0;
760 expr__add_id(&pctx, n, avg_stats(stats)*scale);
Andi Kleenbba49af2017-08-31 12:40:28 -0700761 }
762 if (!metric_events[i]) {
763 const char *p = metric_expr;
764
765 if (expr__parse(&ratio, &pctx, &p) == 0)
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200766 print_metric(config, ctxp, NULL, "%8.1f",
Andi Kleenbba49af2017-08-31 12:40:28 -0700767 metric_name ?
768 metric_name :
769 out->force_header ? name : "",
770 ratio);
771 else
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200772 print_metric(config, ctxp, NULL, NULL,
Andi Kleen4ed962e2017-08-31 12:40:29 -0700773 out->force_header ?
774 (metric_name ? metric_name : name) : "", 0);
Andi Kleenbba49af2017-08-31 12:40:28 -0700775 } else
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200776 print_metric(config, ctxp, NULL, NULL, "", 0);
Andi Kleene3a94272019-06-24 12:37:11 -0700777
778 for (i = 1; i < pctx.num_ids; i++)
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -0300779 zfree(&pctx.ids[i].name);
Andi Kleenbba49af2017-08-31 12:40:28 -0700780}
781
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200782void perf_stat__print_shadow_stats(struct perf_stat_config *config,
783 struct perf_evsel *evsel,
Andi Kleen140aead2016-01-30 09:06:49 -0800784 double avg, int cpu,
Andi Kleenb18f3e32017-08-31 12:40:31 -0700785 struct perf_stat_output_ctx *out,
Jin Yaoe0128b32017-12-05 22:03:05 +0800786 struct rblist *metric_events,
787 struct runtime_stat *st)
Jiri Olsaf87027b2015-06-03 16:25:59 +0200788{
Andi Kleen140aead2016-01-30 09:06:49 -0800789 void *ctxp = out->ctx;
790 print_metric_t print_metric = out->print_metric;
Jiri Olsaf87027b2015-06-03 16:25:59 +0200791 double total, ratio = 0.0, total2;
Andi Kleen239bd472016-05-24 12:52:37 -0700792 const char *color = NULL;
Jiri Olsaf87027b2015-06-03 16:25:59 +0200793 int ctx = evsel_context(evsel);
Andi Kleenb18f3e32017-08-31 12:40:31 -0700794 struct metric_event *me;
795 int num = 1;
Jiri Olsaf87027b2015-06-03 16:25:59 +0200796
797 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800798 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
799
Jiri Olsaf87027b2015-06-03 16:25:59 +0200800 if (total) {
801 ratio = avg / total;
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200802 print_metric(config, ctxp, NULL, "%7.2f ",
Andi Kleen140aead2016-01-30 09:06:49 -0800803 "insn per cycle", ratio);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200804 } else {
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200805 print_metric(config, ctxp, NULL, NULL, "insn per cycle", 0);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200806 }
Jin Yaoe0128b32017-12-05 22:03:05 +0800807
808 total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT,
809 ctx, cpu);
810
811 total = max(total, runtime_stat_avg(st,
812 STAT_STALLED_CYCLES_BACK,
813 ctx, cpu));
Jiri Olsaf87027b2015-06-03 16:25:59 +0200814
815 if (total && avg) {
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200816 out->new_line(config, ctxp);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200817 ratio = total / avg;
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200818 print_metric(config, ctxp, NULL, "%7.2f ",
Andi Kleen140aead2016-01-30 09:06:49 -0800819 "stalled cycles per insn",
820 ratio);
Andi Kleenfb4605b2016-03-01 10:57:52 -0800821 } else if (have_frontend_stalled) {
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200822 print_metric(config, ctxp, NULL, NULL,
Andi Kleen140aead2016-01-30 09:06:49 -0800823 "stalled cycles per insn", 0);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200824 }
Andi Kleen140aead2016-01-30 09:06:49 -0800825 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800826 if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200827 print_branch_misses(config, cpu, evsel, avg, out, st);
Andi Kleen140aead2016-01-30 09:06:49 -0800828 else
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200829 print_metric(config, ctxp, NULL, NULL, "of all branches", 0);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200830 } else if (
831 evsel->attr.type == PERF_TYPE_HW_CACHE &&
832 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
833 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
Andi Kleen140aead2016-01-30 09:06:49 -0800834 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800835
836 if (runtime_stat_n(st, STAT_L1_DCACHE, ctx, cpu) != 0)
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200837 print_l1_dcache_misses(config, cpu, evsel, avg, out, st);
Andi Kleen140aead2016-01-30 09:06:49 -0800838 else
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200839 print_metric(config, ctxp, NULL, NULL, "of all L1-dcache hits", 0);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200840 } else if (
841 evsel->attr.type == PERF_TYPE_HW_CACHE &&
842 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
843 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
Andi Kleen140aead2016-01-30 09:06:49 -0800844 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800845
846 if (runtime_stat_n(st, STAT_L1_ICACHE, ctx, cpu) != 0)
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200847 print_l1_icache_misses(config, cpu, evsel, avg, out, st);
Andi Kleen140aead2016-01-30 09:06:49 -0800848 else
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200849 print_metric(config, ctxp, NULL, NULL, "of all L1-icache hits", 0);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200850 } else if (
851 evsel->attr.type == PERF_TYPE_HW_CACHE &&
852 evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
853 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
Andi Kleen140aead2016-01-30 09:06:49 -0800854 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800855
856 if (runtime_stat_n(st, STAT_DTLB_CACHE, ctx, cpu) != 0)
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200857 print_dtlb_cache_misses(config, cpu, evsel, avg, out, st);
Andi Kleen140aead2016-01-30 09:06:49 -0800858 else
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200859 print_metric(config, ctxp, NULL, NULL, "of all dTLB cache hits", 0);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200860 } else if (
861 evsel->attr.type == PERF_TYPE_HW_CACHE &&
862 evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
863 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
Andi Kleen140aead2016-01-30 09:06:49 -0800864 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800865
866 if (runtime_stat_n(st, STAT_ITLB_CACHE, ctx, cpu) != 0)
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200867 print_itlb_cache_misses(config, cpu, evsel, avg, out, st);
Andi Kleen140aead2016-01-30 09:06:49 -0800868 else
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200869 print_metric(config, ctxp, NULL, NULL, "of all iTLB cache hits", 0);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200870 } else if (
871 evsel->attr.type == PERF_TYPE_HW_CACHE &&
872 evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
873 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
Andi Kleen140aead2016-01-30 09:06:49 -0800874 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800875
876 if (runtime_stat_n(st, STAT_LL_CACHE, ctx, cpu) != 0)
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200877 print_ll_cache_misses(config, cpu, evsel, avg, out, st);
Andi Kleen140aead2016-01-30 09:06:49 -0800878 else
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200879 print_metric(config, ctxp, NULL, NULL, "of all LL-cache hits", 0);
Andi Kleen140aead2016-01-30 09:06:49 -0800880 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800881 total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200882
883 if (total)
884 ratio = avg * 100 / total;
885
Jin Yaoe0128b32017-12-05 22:03:05 +0800886 if (runtime_stat_n(st, STAT_CACHEREFS, ctx, cpu) != 0)
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200887 print_metric(config, ctxp, NULL, "%8.3f %%",
Andi Kleen140aead2016-01-30 09:06:49 -0800888 "of all cache refs", ratio);
889 else
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200890 print_metric(config, ctxp, NULL, NULL, "of all cache refs", 0);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200891 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200892 print_stalled_cycles_frontend(config, cpu, evsel, avg, out, st);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200893 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200894 print_stalled_cycles_backend(config, cpu, evsel, avg, out, st);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200895 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800896 total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200897
898 if (total) {
899 ratio = avg / total;
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200900 print_metric(config, ctxp, NULL, "%8.3f", "GHz", ratio);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200901 } else {
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200902 print_metric(config, ctxp, NULL, NULL, "Ghz", 0);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200903 }
904 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800905 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
906
Jiri Olsaf87027b2015-06-03 16:25:59 +0200907 if (total)
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200908 print_metric(config, ctxp, NULL,
Andi Kleen140aead2016-01-30 09:06:49 -0800909 "%7.2f%%", "transactional cycles",
910 100.0 * (avg / total));
911 else
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200912 print_metric(config, ctxp, NULL, NULL, "transactional cycles",
Andi Kleen140aead2016-01-30 09:06:49 -0800913 0);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200914 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800915 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
916 total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, ctx, cpu);
917
Jiri Olsaf87027b2015-06-03 16:25:59 +0200918 if (total2 < avg)
919 total2 = avg;
920 if (total)
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200921 print_metric(config, ctxp, NULL, "%7.2f%%", "aborted cycles",
Jiri Olsaf87027b2015-06-03 16:25:59 +0200922 100.0 * ((total2-avg) / total));
Andi Kleen140aead2016-01-30 09:06:49 -0800923 else
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200924 print_metric(config, ctxp, NULL, NULL, "aborted cycles", 0);
Andi Kleen140aead2016-01-30 09:06:49 -0800925 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800926 total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
927 ctx, cpu);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200928
Andi Kleen54976282015-07-27 16:24:51 -0700929 if (avg)
Jiri Olsaf87027b2015-06-03 16:25:59 +0200930 ratio = total / avg;
931
Jin Yaoe0128b32017-12-05 22:03:05 +0800932 if (runtime_stat_n(st, STAT_CYCLES_IN_TX, ctx, cpu) != 0)
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200933 print_metric(config, ctxp, NULL, "%8.0f",
Andi Kleen140aead2016-01-30 09:06:49 -0800934 "cycles / transaction", ratio);
935 else
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200936 print_metric(config, ctxp, NULL, NULL, "cycles / transaction",
Jin Yaoe0128b32017-12-05 22:03:05 +0800937 0);
Andi Kleen140aead2016-01-30 09:06:49 -0800938 } else if (perf_stat_evsel__is(evsel, ELISION_START)) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800939 total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
940 ctx, cpu);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200941
Andi Kleen54976282015-07-27 16:24:51 -0700942 if (avg)
Jiri Olsaf87027b2015-06-03 16:25:59 +0200943 ratio = total / avg;
944
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200945 print_metric(config, ctxp, NULL, "%8.0f", "cycles / elision", ratio);
Jiri Olsa0aa802a2018-07-20 13:00:34 +0200946 } else if (perf_evsel__is_clock(evsel)) {
Andi Kleen4579ecc2015-11-02 17:50:20 -0800947 if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200948 print_metric(config, ctxp, NULL, "%8.3f", "CPUs utilized",
Jiri Olsa0aa802a2018-07-20 13:00:34 +0200949 avg / (ratio * evsel->scale));
Andi Kleen4579ecc2015-11-02 17:50:20 -0800950 else
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200951 print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0);
Andi Kleen239bd472016-05-24 12:52:37 -0700952 } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800953 double fe_bound = td_fe_bound(ctx, cpu, st);
Andi Kleen239bd472016-05-24 12:52:37 -0700954
955 if (fe_bound > 0.2)
956 color = PERF_COLOR_RED;
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200957 print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
Andi Kleen239bd472016-05-24 12:52:37 -0700958 fe_bound * 100.);
959 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800960 double retiring = td_retiring(ctx, cpu, st);
Andi Kleen239bd472016-05-24 12:52:37 -0700961
962 if (retiring > 0.7)
963 color = PERF_COLOR_GREEN;
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200964 print_metric(config, ctxp, color, "%8.1f%%", "retiring",
Andi Kleen239bd472016-05-24 12:52:37 -0700965 retiring * 100.);
966 } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800967 double bad_spec = td_bad_spec(ctx, cpu, st);
Andi Kleen239bd472016-05-24 12:52:37 -0700968
969 if (bad_spec > 0.1)
970 color = PERF_COLOR_RED;
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200971 print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
Andi Kleen239bd472016-05-24 12:52:37 -0700972 bad_spec * 100.);
973 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
Jin Yaoe0128b32017-12-05 22:03:05 +0800974 double be_bound = td_be_bound(ctx, cpu, st);
Andi Kleen239bd472016-05-24 12:52:37 -0700975 const char *name = "backend bound";
976 static int have_recovery_bubbles = -1;
977
978 /* In case the CPU does not support topdown-recovery-bubbles */
979 if (have_recovery_bubbles < 0)
980 have_recovery_bubbles = pmu_have_event("cpu",
981 "topdown-recovery-bubbles");
982 if (!have_recovery_bubbles)
983 name = "backend bound/bad spec";
984
985 if (be_bound > 0.2)
986 color = PERF_COLOR_RED;
Jin Yaoe0128b32017-12-05 22:03:05 +0800987 if (td_total_slots(ctx, cpu, st) > 0)
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200988 print_metric(config, ctxp, color, "%8.1f%%", name,
Andi Kleen239bd472016-05-24 12:52:37 -0700989 be_bound * 100.);
990 else
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200991 print_metric(config, ctxp, NULL, NULL, name, 0);
Andi Kleen37932c12017-03-20 13:17:08 -0700992 } else if (evsel->metric_expr) {
Jiri Olsa6ca9a082018-08-30 08:32:28 +0200993 generic_metric(config, evsel->metric_expr, evsel->metric_events, evsel->name,
Jin Yaoe0128b32017-12-05 22:03:05 +0800994 evsel->metric_name, avg, cpu, out, st);
995 } else if (runtime_stat_n(st, STAT_NSECS, 0, cpu) != 0) {
Jiri Olsaf87027b2015-06-03 16:25:59 +0200996 char unit = 'M';
Andi Kleen140aead2016-01-30 09:06:49 -0800997 char unit_buf[10];
Jiri Olsaf87027b2015-06-03 16:25:59 +0200998
Jin Yaoe0128b32017-12-05 22:03:05 +0800999 total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
Jiri Olsaf87027b2015-06-03 16:25:59 +02001000
1001 if (total)
1002 ratio = 1000.0 * avg / total;
1003 if (ratio < 0.001) {
1004 ratio *= 1000;
1005 unit = 'K';
1006 }
Andi Kleen140aead2016-01-30 09:06:49 -08001007 snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
Jiri Olsa6ca9a082018-08-30 08:32:28 +02001008 print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio);
Kan Liangdaefd0b2017-05-26 12:05:38 -07001009 } else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
Jiri Olsa6ca9a082018-08-30 08:32:28 +02001010 print_smi_cost(config, cpu, evsel, out, st);
Jiri Olsaf87027b2015-06-03 16:25:59 +02001011 } else {
Andi Kleenb18f3e32017-08-31 12:40:31 -07001012 num = 0;
Jiri Olsaf87027b2015-06-03 16:25:59 +02001013 }
Andi Kleenb18f3e32017-08-31 12:40:31 -07001014
1015 if ((me = metricgroup__lookup(metric_events, evsel, false)) != NULL) {
1016 struct metric_expr *mexp;
1017
1018 list_for_each_entry (mexp, &me->head, nd) {
1019 if (num++ > 0)
Jiri Olsa6ca9a082018-08-30 08:32:28 +02001020 out->new_line(config, ctxp);
1021 generic_metric(config, mexp->metric_expr, mexp->metric_events,
Andi Kleenb18f3e32017-08-31 12:40:31 -07001022 evsel->name, mexp->metric_name,
Jin Yaoe0128b32017-12-05 22:03:05 +08001023 avg, cpu, out, st);
Andi Kleenb18f3e32017-08-31 12:40:31 -07001024 }
1025 }
1026 if (num == 0)
Jiri Olsa6ca9a082018-08-30 08:32:28 +02001027 print_metric(config, ctxp, NULL, NULL, NULL, 0);
Jiri Olsaf87027b2015-06-03 16:25:59 +02001028}