blob: b89160718c04bae7ad85ace9f32baf0f16e60904 [file] [log] [blame]
Thomas Gleixner2025cf92019-05-29 07:18:02 -07001// SPDX-License-Identifier: GPL-2.0-only
Andi Kleenb18f3e32017-08-31 12:40:31 -07002/*
3 * Copyright (c) 2017, Intel Corporation.
Andi Kleenb18f3e32017-08-31 12:40:31 -07004 */
5
6/* Manage metrics and groups of metrics from JSON files */
7
8#include "metricgroup.h"
Arnaldo Carvalho de Melob4209022019-08-29 15:56:40 -03009#include "debug.h"
Andi Kleenb18f3e32017-08-31 12:40:31 -070010#include "evlist.h"
Arnaldo Carvalho de Melo0b8026e2019-08-21 10:54:14 -030011#include "evsel.h"
Andi Kleenb18f3e32017-08-31 12:40:31 -070012#include "strbuf.h"
13#include "pmu.h"
14#include "expr.h"
15#include "rblist.h"
Andi Kleenb18f3e32017-08-31 12:40:31 -070016#include <string.h>
Andi Kleenb18f3e32017-08-31 12:40:31 -070017#include <errno.h>
Andi Kleenb18f3e32017-08-31 12:40:31 -070018#include "strlist.h"
19#include <assert.h>
Arnaldo Carvalho de Melobd9860b2019-06-25 21:13:51 -030020#include <linux/ctype.h>
Arnaldo Carvalho de Melob4209022019-08-29 15:56:40 -030021#include <linux/string.h>
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030022#include <linux/zalloc.h>
Arnaldo Carvalho de Melo0b8026e2019-08-21 10:54:14 -030023#include <subcmd/parse-options.h>
Kan Liangab483d82020-02-24 13:59:23 -080024#include <api/fs/fs.h>
25#include "util.h"
Jiri Olsaf6fb0962020-07-19 20:13:16 +020026#include <asm/bug.h>
Namhyung Kimb214ba82020-09-24 21:44:53 +090027#include "cgroup.h"
Andi Kleenb18f3e32017-08-31 12:40:31 -070028
29struct metric_event *metricgroup__lookup(struct rblist *metric_events,
Jiri Olsa32dcd022019-07-21 13:23:51 +020030 struct evsel *evsel,
Andi Kleenb18f3e32017-08-31 12:40:31 -070031 bool create)
32{
33 struct rb_node *nd;
34 struct metric_event me = {
35 .evsel = evsel
36 };
Andi Kleen4bd1bef2017-11-17 13:43:00 -080037
38 if (!metric_events)
39 return NULL;
40
Andi Kleenb18f3e32017-08-31 12:40:31 -070041 nd = rblist__find(metric_events, &me);
42 if (nd)
43 return container_of(nd, struct metric_event, nd);
44 if (create) {
45 rblist__add_node(metric_events, &me);
46 nd = rblist__find(metric_events, &me);
47 if (nd)
48 return container_of(nd, struct metric_event, nd);
49 }
50 return NULL;
51}
52
53static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
54{
55 struct metric_event *a = container_of(rb_node,
56 struct metric_event,
57 nd);
58 const struct metric_event *b = entry;
59
60 if (a->evsel == b->evsel)
61 return 0;
62 if ((char *)a->evsel < (char *)b->evsel)
63 return -1;
64 return +1;
65}
66
67static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
68 const void *entry)
69{
70 struct metric_event *me = malloc(sizeof(struct metric_event));
71
72 if (!me)
73 return NULL;
74 memcpy(me, entry, sizeof(struct metric_event));
75 me->evsel = ((struct metric_event *)entry)->evsel;
76 INIT_LIST_HEAD(&me->head);
77 return &me->nd;
78}
79
Jiri Olsa9afe5652020-06-02 23:47:38 +020080static void metric_event_delete(struct rblist *rblist __maybe_unused,
81 struct rb_node *rb_node)
82{
83 struct metric_event *me = container_of(rb_node, struct metric_event, nd);
84 struct metric_expr *expr, *tmp;
85
86 list_for_each_entry_safe(expr, tmp, &me->head, nd) {
Jiri Olsa4ea28962020-07-19 20:13:10 +020087 free(expr->metric_refs);
Namhyung Kimb033ab12020-09-15 12:18:10 +090088 free(expr->metric_events);
Jiri Olsa9afe5652020-06-02 23:47:38 +020089 free(expr);
90 }
91
92 free(me);
93}
94
Andi Kleenb18f3e32017-08-31 12:40:31 -070095static void metricgroup__rblist_init(struct rblist *metric_events)
96{
97 rblist__init(metric_events);
98 metric_events->node_cmp = metric_event_cmp;
99 metric_events->node_new = metric_event_new;
Jiri Olsa9afe5652020-06-02 23:47:38 +0200100 metric_events->node_delete = metric_event_delete;
101}
102
103void metricgroup__rblist_exit(struct rblist *metric_events)
104{
105 rblist__exit(metric_events);
Andi Kleenb18f3e32017-08-31 12:40:31 -0700106}
107
Jiri Olsa83de0b72020-07-19 20:13:09 +0200108/*
109 * A node in the list of referenced metrics. metric_expr
110 * is held as a convenience to avoid a search through the
111 * metric list.
112 */
113struct metric_ref_node {
114 const char *metric_name;
115 const char *metric_expr;
116 struct list_head list;
117};
118
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200119struct metric {
Andi Kleenb18f3e32017-08-31 12:40:31 -0700120 struct list_head nd;
Ian Rogersded80bd2020-05-15 15:17:32 -0700121 struct expr_parse_ctx pctx;
Andi Kleenb18f3e32017-08-31 12:40:31 -0700122 const char *metric_name;
123 const char *metric_expr;
Jin Yao287f2642019-08-28 13:59:31 +0800124 const char *metric_unit;
Jiri Olsa83de0b72020-07-19 20:13:09 +0200125 struct list_head metric_refs;
126 int metric_refs_cnt;
Kajol Jain1e1a8732020-04-02 02:03:37 +0530127 int runtime;
Ian Rogers7f9eca52020-05-20 11:20:07 -0700128 bool has_constraint;
Andi Kleenb18f3e32017-08-31 12:40:31 -0700129};
130
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200131#define RECURSION_ID_MAX 1000
132
133struct expr_ids {
134 struct expr_id id[RECURSION_ID_MAX];
135 int cnt;
136};
137
138static struct expr_id *expr_ids__alloc(struct expr_ids *ids)
139{
140 if (ids->cnt >= RECURSION_ID_MAX)
141 return NULL;
142 return &ids->id[ids->cnt++];
143}
144
145static void expr_ids__exit(struct expr_ids *ids)
146{
147 int i;
148
149 for (i = 0; i < ids->cnt; i++)
150 free(ids->id[i].id);
151}
152
Ian Rogersdcc81be2020-09-17 13:18:07 -0700153static bool contains_event(struct evsel **metric_events, int num_events,
154 const char *event_name)
155{
156 int i;
157
158 for (i = 0; i < num_events; i++) {
159 if (!strcmp(metric_events[i]->name, event_name))
160 return true;
161 }
162 return false;
163}
164
Ian Rogers24406892020-05-20 11:20:09 -0700165/**
Ian Rogers9e34c1c2020-09-09 20:26:31 -0700166 * Find a group of events in perf_evlist that correspond to those from a parsed
Ian Rogers05530a72020-05-20 11:20:10 -0700167 * metric expression. Note, as find_evsel_group is called in the same order as
168 * perf_evlist was constructed, metric_no_merge doesn't need to test for
169 * underfilling a group.
Ian Rogers24406892020-05-20 11:20:09 -0700170 * @perf_evlist: a list of events something like: {metric1 leader, metric1
171 * sibling, metric1 sibling}:W,duration_time,{metric2 leader, metric2 sibling,
172 * metric2 sibling}:W,duration_time
173 * @pctx: the parse context for the metric expression.
Ian Rogers05530a72020-05-20 11:20:10 -0700174 * @metric_no_merge: don't attempt to share events for the metric with other
175 * metrics.
Ian Rogers24406892020-05-20 11:20:09 -0700176 * @has_constraint: is there a contraint on the group of events? In which case
177 * the events won't be grouped.
178 * @metric_events: out argument, null terminated array of evsel's associated
179 * with the metric.
180 * @evlist_used: in/out argument, bitmap tracking which evlist events are used.
181 * @return the first metric event or NULL on failure.
182 */
Jiri Olsa63503db2019-07-21 13:23:52 +0200183static struct evsel *find_evsel_group(struct evlist *perf_evlist,
Ian Rogersded80bd2020-05-15 15:17:32 -0700184 struct expr_parse_ctx *pctx,
Ian Rogers05530a72020-05-20 11:20:10 -0700185 bool metric_no_merge,
Ian Rogers24406892020-05-20 11:20:09 -0700186 bool has_constraint,
Kajol Jain58fc90f2020-02-21 15:41:21 +0530187 struct evsel **metric_events,
Ian Rogers45db55f2020-05-20 00:28:08 -0700188 unsigned long *evlist_used)
Andi Kleenb18f3e32017-08-31 12:40:31 -0700189{
Ian Rogers24406892020-05-20 11:20:09 -0700190 struct evsel *ev, *current_leader = NULL;
Jiri Olsa070b3b52020-07-12 15:26:18 +0200191 struct expr_id_data *val_ptr;
Ian Rogers24406892020-05-20 11:20:09 -0700192 int i = 0, matched_events = 0, events_to_match;
193 const int idnum = (int)hashmap__size(&pctx->ids);
194
Ian Rogersdcc81be2020-09-17 13:18:07 -0700195 /*
196 * duration_time is always grouped separately, when events are grouped
197 * (ie has_constraint is false) then ignore it in the matching loop and
198 * add it to metric_events at the end.
199 */
Ian Rogers24406892020-05-20 11:20:09 -0700200 if (!has_constraint &&
201 hashmap__find(&pctx->ids, "duration_time", (void **)&val_ptr))
202 events_to_match = idnum - 1;
203 else
204 events_to_match = idnum;
Andi Kleenb18f3e32017-08-31 12:40:31 -0700205
206 evlist__for_each_entry (perf_evlist, ev) {
Ian Rogers24406892020-05-20 11:20:09 -0700207 /*
208 * Events with a constraint aren't grouped and match the first
209 * events available.
210 */
211 if (has_constraint && ev->weak_group)
Kajol Jain58fc90f2020-02-21 15:41:21 +0530212 continue;
Ian Rogers05530a72020-05-20 11:20:10 -0700213 /* Ignore event if already used and merging is disabled. */
214 if (metric_no_merge && test_bit(ev->idx, evlist_used))
215 continue;
Ian Rogers24406892020-05-20 11:20:09 -0700216 if (!has_constraint && ev->leader != current_leader) {
217 /*
218 * Start of a new group, discard the whole match and
219 * start again.
220 */
221 matched_events = 0;
Kajol Jain58fc90f2020-02-21 15:41:21 +0530222 memset(metric_events, 0,
223 sizeof(struct evsel *) * idnum);
Ian Rogers24406892020-05-20 11:20:09 -0700224 current_leader = ev->leader;
225 }
Ian Rogersdcc81be2020-09-17 13:18:07 -0700226 /*
227 * Check for duplicate events with the same name. For example,
228 * uncore_imc/cas_count_read/ will turn into 6 events per socket
229 * on skylakex. Only the first such event is placed in
230 * metric_events. If events aren't grouped then this also
231 * ensures that the same event in different sibling groups
232 * aren't both added to metric_events.
233 */
234 if (contains_event(metric_events, matched_events, ev->name))
235 continue;
236 /* Does this event belong to the parse context? */
237 if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr))
Ian Rogers24406892020-05-20 11:20:09 -0700238 metric_events[matched_events++] = ev;
Ian Rogersdcc81be2020-09-17 13:18:07 -0700239
Ian Rogers24406892020-05-20 11:20:09 -0700240 if (matched_events == events_to_match)
241 break;
242 }
243
244 if (events_to_match != idnum) {
245 /* Add the first duration_time. */
246 evlist__for_each_entry(perf_evlist, ev) {
247 if (!strcmp(ev->name, "duration_time")) {
248 metric_events[matched_events++] = ev;
249 break;
250 }
Andi Kleenb18f3e32017-08-31 12:40:31 -0700251 }
252 }
Jin Yaof01642e2019-08-28 13:59:32 +0800253
Ian Rogers24406892020-05-20 11:20:09 -0700254 if (matched_events != idnum) {
Ian Rogersdcc81be2020-09-17 13:18:07 -0700255 /* Not a whole match */
Jin Yaof01642e2019-08-28 13:59:32 +0800256 return NULL;
257 }
258
259 metric_events[idnum] = NULL;
260
261 for (i = 0; i < idnum; i++) {
Kajol Jain58fc90f2020-02-21 15:41:21 +0530262 ev = metric_events[i];
Ian Rogersdcc81be2020-09-17 13:18:07 -0700263 /* Don't free the used events. */
Ian Rogers45db55f2020-05-20 00:28:08 -0700264 set_bit(ev->idx, evlist_used);
Ian Rogersdcc81be2020-09-17 13:18:07 -0700265 /*
266 * The metric leader points to the identically named event in
267 * metric_events.
268 */
269 ev->metric_leader = ev;
270 /*
271 * Mark two events with identical names in the same group (or
272 * globally) as being in use as uncore events may be duplicated
273 * for each pmu. Set the metric leader of such events to be the
274 * event that appears in metric_events.
275 */
276 evlist__for_each_entry_continue(perf_evlist, ev) {
277 /*
278 * If events are grouped then the search can terminate
279 * when then group is left.
280 */
281 if (!has_constraint &&
John Garryc2337d62020-12-04 19:10:12 +0800282 ev->leader != metric_events[i]->leader &&
283 !strcmp(ev->leader->pmu_name,
284 metric_events[i]->leader->pmu_name))
Ian Rogersdcc81be2020-09-17 13:18:07 -0700285 break;
286 if (!strcmp(metric_events[i]->name, ev->name)) {
287 set_bit(ev->idx, evlist_used);
288 ev->metric_leader = metric_events[i];
289 }
290 }
Jin Yaof01642e2019-08-28 13:59:32 +0800291 }
292
293 return metric_events[0];
Andi Kleenb18f3e32017-08-31 12:40:31 -0700294}
295
296static int metricgroup__setup_events(struct list_head *groups,
Ian Rogers05530a72020-05-20 11:20:10 -0700297 bool metric_no_merge,
Jiri Olsa63503db2019-07-21 13:23:52 +0200298 struct evlist *perf_evlist,
Andi Kleenb18f3e32017-08-31 12:40:31 -0700299 struct rblist *metric_events_list)
300{
301 struct metric_event *me;
302 struct metric_expr *expr;
303 int i = 0;
304 int ret = 0;
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200305 struct metric *m;
Ian Rogers24406892020-05-20 11:20:09 -0700306 struct evsel *evsel, *tmp;
Ian Rogers45db55f2020-05-20 00:28:08 -0700307 unsigned long *evlist_used;
Kajol Jain58fc90f2020-02-21 15:41:21 +0530308
Ian Rogers45db55f2020-05-20 00:28:08 -0700309 evlist_used = bitmap_alloc(perf_evlist->core.nr_entries);
310 if (!evlist_used)
311 return -ENOMEM;
Andi Kleenb18f3e32017-08-31 12:40:31 -0700312
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200313 list_for_each_entry (m, groups, nd) {
Jiri Olsa32dcd022019-07-21 13:23:51 +0200314 struct evsel **metric_events;
Jiri Olsa4ea28962020-07-19 20:13:10 +0200315 struct metric_ref *metric_refs = NULL;
Andi Kleenb18f3e32017-08-31 12:40:31 -0700316
Ian Rogersded80bd2020-05-15 15:17:32 -0700317 metric_events = calloc(sizeof(void *),
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200318 hashmap__size(&m->pctx.ids) + 1);
Andi Kleenb18f3e32017-08-31 12:40:31 -0700319 if (!metric_events) {
320 ret = -ENOMEM;
321 break;
322 }
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200323 evsel = find_evsel_group(perf_evlist, &m->pctx,
Ian Rogers05530a72020-05-20 11:20:10 -0700324 metric_no_merge,
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200325 m->has_constraint, metric_events,
Ian Rogers05530a72020-05-20 11:20:10 -0700326 evlist_used);
Andi Kleenb18f3e32017-08-31 12:40:31 -0700327 if (!evsel) {
328 pr_debug("Cannot resolve %s: %s\n",
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200329 m->metric_name, m->metric_expr);
Ian Rogersa159e2f2020-05-07 22:36:24 -0700330 free(metric_events);
Andi Kleenb18f3e32017-08-31 12:40:31 -0700331 continue;
332 }
Ian Rogersded80bd2020-05-15 15:17:32 -0700333 for (i = 0; metric_events[i]; i++)
Andi Kleenb18f3e32017-08-31 12:40:31 -0700334 metric_events[i]->collect_stat = true;
335 me = metricgroup__lookup(metric_events_list, evsel, true);
336 if (!me) {
337 ret = -ENOMEM;
Ian Rogersa159e2f2020-05-07 22:36:24 -0700338 free(metric_events);
Andi Kleenb18f3e32017-08-31 12:40:31 -0700339 break;
340 }
341 expr = malloc(sizeof(struct metric_expr));
342 if (!expr) {
343 ret = -ENOMEM;
Ian Rogersa159e2f2020-05-07 22:36:24 -0700344 free(metric_events);
Andi Kleenb18f3e32017-08-31 12:40:31 -0700345 break;
346 }
Jiri Olsa4ea28962020-07-19 20:13:10 +0200347
348 /*
349 * Collect and store collected nested expressions
350 * for metric processing.
351 */
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200352 if (m->metric_refs_cnt) {
Jiri Olsa4ea28962020-07-19 20:13:10 +0200353 struct metric_ref_node *ref;
354
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200355 metric_refs = zalloc(sizeof(struct metric_ref) * (m->metric_refs_cnt + 1));
Jiri Olsa4ea28962020-07-19 20:13:10 +0200356 if (!metric_refs) {
357 ret = -ENOMEM;
358 free(metric_events);
Namhyung Kimb033ab12020-09-15 12:18:10 +0900359 free(expr);
Jiri Olsa4ea28962020-07-19 20:13:10 +0200360 break;
361 }
362
363 i = 0;
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200364 list_for_each_entry(ref, &m->metric_refs, list) {
Jiri Olsa4ea28962020-07-19 20:13:10 +0200365 /*
366 * Intentionally passing just const char pointers,
367 * originally from 'struct pmu_event' object.
368 * We don't need to change them, so there's no
369 * need to create our own copy.
370 */
371 metric_refs[i].metric_name = ref->metric_name;
372 metric_refs[i].metric_expr = ref->metric_expr;
373 i++;
374 }
375 };
376
377 expr->metric_refs = metric_refs;
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200378 expr->metric_expr = m->metric_expr;
379 expr->metric_name = m->metric_name;
380 expr->metric_unit = m->metric_unit;
Andi Kleenb18f3e32017-08-31 12:40:31 -0700381 expr->metric_events = metric_events;
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200382 expr->runtime = m->runtime;
Andi Kleenb18f3e32017-08-31 12:40:31 -0700383 list_add(&expr->nd, &me->head);
384 }
Kajol Jain58fc90f2020-02-21 15:41:21 +0530385
Ian Rogers24406892020-05-20 11:20:09 -0700386 evlist__for_each_entry_safe(perf_evlist, tmp, evsel) {
387 if (!test_bit(evsel->idx, evlist_used)) {
388 evlist__remove(perf_evlist, evsel);
389 evsel__delete(evsel);
390 }
391 }
Ian Rogers45db55f2020-05-20 00:28:08 -0700392 bitmap_free(evlist_used);
Kajol Jain58fc90f2020-02-21 15:41:21 +0530393
Andi Kleenb18f3e32017-08-31 12:40:31 -0700394 return ret;
395}
396
397static bool match_metric(const char *n, const char *list)
398{
399 int len;
400 char *m;
401
402 if (!list)
403 return false;
404 if (!strcmp(list, "all"))
405 return true;
406 if (!n)
407 return !strcasecmp(list, "No_group");
408 len = strlen(list);
409 m = strcasestr(n, list);
410 if (!m)
411 return false;
412 if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
413 (m[len] == 0 || m[len] == ';'))
414 return true;
415 return false;
416}
417
Andi Kleen71b0acc2017-08-31 12:40:32 -0700418struct mep {
419 struct rb_node nd;
420 const char *name;
421 struct strlist *metrics;
422};
423
424static int mep_cmp(struct rb_node *rb_node, const void *entry)
425{
426 struct mep *a = container_of(rb_node, struct mep, nd);
427 struct mep *b = (struct mep *)entry;
428
429 return strcmp(a->name, b->name);
430}
431
432static struct rb_node *mep_new(struct rblist *rl __maybe_unused,
433 const void *entry)
434{
435 struct mep *me = malloc(sizeof(struct mep));
436
437 if (!me)
438 return NULL;
439 memcpy(me, entry, sizeof(struct mep));
440 me->name = strdup(me->name);
441 if (!me->name)
442 goto out_me;
443 me->metrics = strlist__new(NULL, NULL);
444 if (!me->metrics)
445 goto out_name;
446 return &me->nd;
447out_name:
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -0300448 zfree(&me->name);
Andi Kleen71b0acc2017-08-31 12:40:32 -0700449out_me:
450 free(me);
451 return NULL;
452}
453
454static struct mep *mep_lookup(struct rblist *groups, const char *name)
455{
456 struct rb_node *nd;
457 struct mep me = {
458 .name = name
459 };
460 nd = rblist__find(groups, &me);
461 if (nd)
462 return container_of(nd, struct mep, nd);
463 rblist__add_node(groups, &me);
464 nd = rblist__find(groups, &me);
465 if (nd)
466 return container_of(nd, struct mep, nd);
467 return NULL;
468}
469
470static void mep_delete(struct rblist *rl __maybe_unused,
471 struct rb_node *nd)
472{
473 struct mep *me = container_of(nd, struct mep, nd);
474
475 strlist__delete(me->metrics);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -0300476 zfree(&me->name);
Andi Kleen71b0acc2017-08-31 12:40:32 -0700477 free(me);
478}
479
480static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
481{
482 struct str_node *sn;
483 int n = 0;
484
485 strlist__for_each_entry (sn, metrics) {
486 if (raw)
487 printf("%s%s", n > 0 ? " " : "", sn->s);
488 else
489 printf(" %s\n", sn->s);
490 n++;
491 }
492 if (raw)
493 putchar('\n');
494}
495
496void metricgroup__print(bool metrics, bool metricgroups, char *filter,
Jiri Olsa33bbc572019-02-13 13:32:41 +0100497 bool raw, bool details)
Andi Kleen71b0acc2017-08-31 12:40:32 -0700498{
Ganapatrao Kulkarni54e32dc2017-10-17 00:02:18 +0530499 struct pmu_events_map *map = perf_pmu__find_map(NULL);
Andi Kleen71b0acc2017-08-31 12:40:32 -0700500 struct pmu_event *pe;
501 int i;
502 struct rblist groups;
503 struct rb_node *node, *next;
504 struct strlist *metriclist = NULL;
505
506 if (!map)
507 return;
508
509 if (!metricgroups) {
510 metriclist = strlist__new(NULL, NULL);
511 if (!metriclist)
512 return;
513 }
514
515 rblist__init(&groups);
516 groups.node_new = mep_new;
517 groups.node_cmp = mep_cmp;
518 groups.node_delete = mep_delete;
519 for (i = 0; ; i++) {
520 const char *g;
521 pe = &map->table[i];
522
523 if (!pe->name && !pe->metric_group && !pe->metric_name)
524 break;
525 if (!pe->metric_expr)
526 continue;
527 g = pe->metric_group;
528 if (!g && pe->metric_name) {
529 if (pe->name)
530 continue;
531 g = "No_group";
532 }
533 if (g) {
534 char *omg;
535 char *mg = strdup(g);
536
537 if (!mg)
538 return;
539 omg = mg;
540 while ((g = strsep(&mg, ";")) != NULL) {
541 struct mep *me;
542 char *s;
543
Arnaldo Carvalho de Melo80e90732019-06-26 11:21:47 -0300544 g = skip_spaces(g);
Andi Kleen71b0acc2017-08-31 12:40:32 -0700545 if (*g == 0)
546 g = "No_group";
Andi Kleen71b0acc2017-08-31 12:40:32 -0700547 if (filter && !strstr(g, filter))
548 continue;
549 if (raw)
550 s = (char *)pe->metric_name;
551 else {
Michael Petlan95f04322018-07-30 17:35:04 -0400552 if (asprintf(&s, "%s\n%*s%s]",
553 pe->metric_name, 8, "[", pe->desc) < 0)
Andi Kleen71b0acc2017-08-31 12:40:32 -0700554 return;
Jiri Olsa33bbc572019-02-13 13:32:41 +0100555
556 if (details) {
557 if (asprintf(&s, "%s\n%*s%s]",
558 s, 8, "[", pe->metric_expr) < 0)
559 return;
560 }
Andi Kleen71b0acc2017-08-31 12:40:32 -0700561 }
562
563 if (!s)
564 continue;
565
566 if (!metricgroups) {
567 strlist__add(metriclist, s);
568 } else {
569 me = mep_lookup(&groups, g);
570 if (!me)
571 continue;
572 strlist__add(me->metrics, s);
573 }
Namhyung Kim4f57a1e2020-09-15 12:18:09 +0900574
575 if (!raw)
576 free(s);
Andi Kleen71b0acc2017-08-31 12:40:32 -0700577 }
578 free(omg);
579 }
580 }
581
Namhyung Kimfac49a32020-09-09 14:58:48 +0900582 if (!filter || !rblist__empty(&groups)) {
583 if (metricgroups && !raw)
584 printf("\nMetric Groups:\n\n");
585 else if (metrics && !raw)
586 printf("\nMetrics:\n\n");
587 }
Andi Kleen71b0acc2017-08-31 12:40:32 -0700588
Davidlohr Buesoca227022018-12-06 11:18:16 -0800589 for (node = rb_first_cached(&groups.entries); node; node = next) {
Andi Kleen71b0acc2017-08-31 12:40:32 -0700590 struct mep *me = container_of(node, struct mep, nd);
591
592 if (metricgroups)
Andi Kleen9c344d12019-06-28 15:07:36 -0700593 printf("%s%s%s", me->name, metrics && !raw ? ":" : "", raw ? " " : "\n");
Andi Kleen71b0acc2017-08-31 12:40:32 -0700594 if (metrics)
595 metricgroup__print_strlist(me->metrics, raw);
596 next = rb_next(node);
597 rblist__remove_node(&groups, node);
598 }
599 if (!metricgroups)
600 metricgroup__print_strlist(metriclist, raw);
601 strlist__delete(metriclist);
602}
603
Kan Liangf7426342020-02-24 13:59:21 -0800604static void metricgroup__add_metric_weak_group(struct strbuf *events,
Ian Rogersded80bd2020-05-15 15:17:32 -0700605 struct expr_parse_ctx *ctx)
Kan Liangf7426342020-02-24 13:59:21 -0800606{
Ian Rogersded80bd2020-05-15 15:17:32 -0700607 struct hashmap_entry *cur;
Ian Rogers4e21c132020-05-20 11:20:05 -0700608 size_t bkt;
609 bool no_group = true, has_duration = false;
Kan Liangf7426342020-02-24 13:59:21 -0800610
Ian Rogersded80bd2020-05-15 15:17:32 -0700611 hashmap__for_each_entry((&ctx->ids), cur, bkt) {
612 pr_debug("found event %s\n", (const char *)cur->key);
Kan Liangf7426342020-02-24 13:59:21 -0800613 /*
614 * Duration time maps to a software event and can make
615 * groups not count. Always use it outside a
616 * group.
617 */
Ian Rogersded80bd2020-05-15 15:17:32 -0700618 if (!strcmp(cur->key, "duration_time")) {
Ian Rogers4e21c132020-05-20 11:20:05 -0700619 has_duration = true;
Kan Liangf7426342020-02-24 13:59:21 -0800620 continue;
621 }
622 strbuf_addf(events, "%s%s",
Ian Rogers4e21c132020-05-20 11:20:05 -0700623 no_group ? "{" : ",",
Ian Rogersded80bd2020-05-15 15:17:32 -0700624 (const char *)cur->key);
Kan Liangf7426342020-02-24 13:59:21 -0800625 no_group = false;
626 }
Ian Rogers4e21c132020-05-20 11:20:05 -0700627 if (!no_group) {
Kan Liangf7426342020-02-24 13:59:21 -0800628 strbuf_addf(events, "}:W");
Ian Rogers4e21c132020-05-20 11:20:05 -0700629 if (has_duration)
630 strbuf_addf(events, ",duration_time");
631 } else if (has_duration)
632 strbuf_addf(events, "duration_time");
Kan Liangf7426342020-02-24 13:59:21 -0800633}
634
Kan Liangab483d82020-02-24 13:59:23 -0800635static void metricgroup__add_metric_non_group(struct strbuf *events,
Ian Rogersded80bd2020-05-15 15:17:32 -0700636 struct expr_parse_ctx *ctx)
Kan Liangab483d82020-02-24 13:59:23 -0800637{
Ian Rogersded80bd2020-05-15 15:17:32 -0700638 struct hashmap_entry *cur;
639 size_t bkt;
Ian Rogerse2ce1052020-05-20 11:20:11 -0700640 bool first = true;
Kan Liangab483d82020-02-24 13:59:23 -0800641
Ian Rogerse2ce1052020-05-20 11:20:11 -0700642 hashmap__for_each_entry((&ctx->ids), cur, bkt) {
643 if (!first)
644 strbuf_addf(events, ",");
645 strbuf_addf(events, "%s", (const char *)cur->key);
646 first = false;
647 }
Kan Liangab483d82020-02-24 13:59:23 -0800648}
649
650static void metricgroup___watchdog_constraint_hint(const char *name, bool foot)
651{
652 static bool violate_nmi_constraint;
653
654 if (!foot) {
655 pr_warning("Splitting metric group %s into standalone metrics.\n", name);
656 violate_nmi_constraint = true;
657 return;
658 }
659
660 if (!violate_nmi_constraint)
661 return;
662
663 pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
664 " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
665 " perf stat ...\n"
666 " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
667}
668
669static bool metricgroup__has_constraint(struct pmu_event *pe)
670{
671 if (!pe->metric_constraint)
672 return false;
673
674 if (!strcmp(pe->metric_constraint, "NO_NMI_WATCHDOG") &&
675 sysctl__nmi_watchdog_enabled()) {
676 metricgroup___watchdog_constraint_hint(pe->metric_name, false);
677 return true;
678 }
679
680 return false;
681}
682
Kajol Jainf5a489d2020-09-07 12:11:32 +0530683int __weak arch_get_runtimeparam(struct pmu_event *pe __maybe_unused)
Kajol Jain1e1a8732020-04-02 02:03:37 +0530684{
685 return 1;
686}
687
Jiri Olsa119e5212020-07-19 20:13:20 +0200688static int __add_metric(struct list_head *metric_list,
Jiri Olsae7e1badd2020-07-19 20:13:08 +0200689 struct pmu_event *pe,
690 bool metric_no_group,
Jiri Olsa83de0b72020-07-19 20:13:09 +0200691 int runtime,
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200692 struct metric **mp,
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200693 struct expr_id *parent,
694 struct expr_ids *ids)
Kajol Jain47352ab2020-04-02 02:03:36 +0530695{
Jiri Olsa83de0b72020-07-19 20:13:09 +0200696 struct metric_ref_node *ref;
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200697 struct metric *m;
Kajol Jain47352ab2020-04-02 02:03:36 +0530698
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200699 if (*mp == NULL) {
Jiri Olsa83de0b72020-07-19 20:13:09 +0200700 /*
701 * We got in here for the parent group,
702 * allocate it and put it on the list.
703 */
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200704 m = zalloc(sizeof(*m));
705 if (!m)
Jiri Olsa83de0b72020-07-19 20:13:09 +0200706 return -ENOMEM;
Kajol Jain47352ab2020-04-02 02:03:36 +0530707
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200708 expr__ctx_init(&m->pctx);
709 m->metric_name = pe->metric_name;
710 m->metric_expr = pe->metric_expr;
711 m->metric_unit = pe->unit;
712 m->runtime = runtime;
713 m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
714 INIT_LIST_HEAD(&m->metric_refs);
715 m->metric_refs_cnt = 0;
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200716
717 parent = expr_ids__alloc(ids);
718 if (!parent) {
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200719 free(m);
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200720 return -EINVAL;
721 }
722
723 parent->id = strdup(pe->metric_name);
724 if (!parent->id) {
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200725 free(m);
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200726 return -ENOMEM;
727 }
Namhyung Kim6f47ed62020-09-15 12:18:17 +0900728 *mp = m;
Jiri Olsa83de0b72020-07-19 20:13:09 +0200729 } else {
730 /*
731 * We got here for the referenced metric, via the
732 * recursive metricgroup__add_metric call, add
733 * it to the parent group.
734 */
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200735 m = *mp;
Ian Rogersded80bd2020-05-15 15:17:32 -0700736
Jiri Olsa83de0b72020-07-19 20:13:09 +0200737 ref = malloc(sizeof(*ref));
738 if (!ref)
739 return -ENOMEM;
740
741 /*
742 * Intentionally passing just const char pointers,
743 * from 'pe' object, so they never go away. We don't
744 * need to change them, so there's no need to create
745 * our own copy.
746 */
747 ref->metric_name = pe->metric_name;
748 ref->metric_expr = pe->metric_expr;
749
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200750 list_add(&ref->list, &m->metric_refs);
751 m->metric_refs_cnt++;
Jiri Olsa83de0b72020-07-19 20:13:09 +0200752 }
753
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200754 /* Force all found IDs in metric to have us as parent ID. */
755 WARN_ON_ONCE(!parent);
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200756 m->pctx.parent = parent;
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200757
Jiri Olsa83de0b72020-07-19 20:13:09 +0200758 /*
759 * For both the parent and referenced metrics, we parse
760 * all the metric's IDs and add it to the parent context.
761 */
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200762 if (expr__find_other(pe->metric_expr, NULL, &m->pctx, runtime) < 0) {
Namhyung Kim6f47ed62020-09-15 12:18:17 +0900763 if (m->metric_refs_cnt == 0) {
764 expr__ctx_clear(&m->pctx);
765 free(m);
766 *mp = NULL;
767 }
Ian Rogersded80bd2020-05-15 15:17:32 -0700768 return -EINVAL;
769 }
770
Jiri Olsa83de0b72020-07-19 20:13:09 +0200771 /*
772 * We add new group only in the 'parent' call,
773 * so bail out for referenced metric case.
774 */
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200775 if (m->metric_refs_cnt)
Jiri Olsa83de0b72020-07-19 20:13:09 +0200776 return 0;
777
Jiri Olsa119e5212020-07-19 20:13:20 +0200778 if (list_empty(metric_list))
779 list_add(&m->nd, metric_list);
Ian Rogers6bf21022020-05-20 11:20:08 -0700780 else {
781 struct list_head *pos;
782
783 /* Place the largest groups at the front. */
Jiri Olsa119e5212020-07-19 20:13:20 +0200784 list_for_each_prev(pos, metric_list) {
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200785 struct metric *old = list_entry(pos, struct metric, nd);
Ian Rogers6bf21022020-05-20 11:20:08 -0700786
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200787 if (hashmap__size(&m->pctx.ids) <=
Ian Rogers6bf21022020-05-20 11:20:08 -0700788 hashmap__size(&old->pctx.ids))
789 break;
790 }
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200791 list_add(&m->nd, pos);
Ian Rogers6bf21022020-05-20 11:20:08 -0700792 }
Kajol Jain47352ab2020-04-02 02:03:36 +0530793
794 return 0;
795}
796
Jiri Olsace391942020-07-19 20:13:06 +0200797#define map_for_each_event(__pe, __idx, __map) \
798 for (__idx = 0, __pe = &__map->table[__idx]; \
799 __pe->name || __pe->metric_group || __pe->metric_name; \
800 __pe = &__map->table[++__idx])
801
802#define map_for_each_metric(__pe, __idx, __map, __metric) \
803 map_for_each_event(__pe, __idx, __map) \
804 if (__pe->metric_expr && \
805 (match_metric(__pe->metric_group, __metric) || \
806 match_metric(__pe->metric_name, __metric)))
807
Jiri Olsa83de0b72020-07-19 20:13:09 +0200808static struct pmu_event *find_metric(const char *metric, struct pmu_events_map *map)
809{
810 struct pmu_event *pe;
811 int i;
812
813 map_for_each_event(pe, i, map) {
814 if (match_metric(pe->metric_name, metric))
815 return pe;
816 }
817
818 return NULL;
819}
820
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200821static int recursion_check(struct metric *m, const char *id, struct expr_id **parent,
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200822 struct expr_ids *ids)
823{
824 struct expr_id_data *data;
825 struct expr_id *p;
826 int ret;
827
828 /*
829 * We get the parent referenced by 'id' argument and
830 * traverse through all the parent object IDs to check
831 * if we already processed 'id', if we did, it's recursion
832 * and we fail.
833 */
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200834 ret = expr__get_id(&m->pctx, id, &data);
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200835 if (ret)
836 return ret;
837
Ian Rogers29396cd2020-08-26 08:30:55 -0700838 p = expr_id_data__parent(data);
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200839
840 while (p->parent) {
841 if (!strcmp(p->id, id)) {
842 pr_err("failed: recursion detected for %s\n", id);
843 return -1;
844 }
845 p = p->parent;
846 }
847
848 /*
849 * If we are over the limit of static entris, the metric
850 * is too difficult/nested to process, fail as well.
851 */
852 p = expr_ids__alloc(ids);
853 if (!p) {
854 pr_err("failed: too many nested metrics\n");
855 return -EINVAL;
856 }
857
858 p->id = strdup(id);
Ian Rogers29396cd2020-08-26 08:30:55 -0700859 p->parent = expr_id_data__parent(data);
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200860 *parent = p;
861
862 return p->id ? 0 : -ENOMEM;
863}
864
Jiri Olsa119e5212020-07-19 20:13:20 +0200865static int add_metric(struct list_head *metric_list,
Jiri Olsaa29c1642020-07-19 20:13:07 +0200866 struct pmu_event *pe,
Jiri Olsa83de0b72020-07-19 20:13:09 +0200867 bool metric_no_group,
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200868 struct metric **mp,
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200869 struct expr_id *parent,
870 struct expr_ids *ids);
Jiri Olsa83de0b72020-07-19 20:13:09 +0200871
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200872static int __resolve_metric(struct metric *m,
Jiri Olsa83de0b72020-07-19 20:13:09 +0200873 bool metric_no_group,
Jiri Olsa119e5212020-07-19 20:13:20 +0200874 struct list_head *metric_list,
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200875 struct pmu_events_map *map,
876 struct expr_ids *ids)
Jiri Olsaa29c1642020-07-19 20:13:07 +0200877{
Jiri Olsa83de0b72020-07-19 20:13:09 +0200878 struct hashmap_entry *cur;
879 size_t bkt;
880 bool all;
881 int ret;
882
883 /*
884 * Iterate all the parsed IDs and if there's metric,
885 * add it to the context.
886 */
887 do {
888 all = true;
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200889 hashmap__for_each_entry((&m->pctx.ids), cur, bkt) {
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200890 struct expr_id *parent;
Jiri Olsa83de0b72020-07-19 20:13:09 +0200891 struct pmu_event *pe;
892
893 pe = find_metric(cur->key, map);
894 if (!pe)
895 continue;
896
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200897 ret = recursion_check(m, cur->key, &parent, ids);
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200898 if (ret)
899 return ret;
900
Jiri Olsa83de0b72020-07-19 20:13:09 +0200901 all = false;
902 /* The metric key itself needs to go out.. */
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200903 expr__del_id(&m->pctx, cur->key);
Jiri Olsa83de0b72020-07-19 20:13:09 +0200904
905 /* ... and it gets resolved to the parent context. */
Jiri Olsa119e5212020-07-19 20:13:20 +0200906 ret = add_metric(metric_list, pe, metric_no_group, &m, parent, ids);
Jiri Olsa83de0b72020-07-19 20:13:09 +0200907 if (ret)
908 return ret;
909
910 /*
911 * We added new metric to hashmap, so we need
912 * to break the iteration and start over.
913 */
914 break;
915 }
916 } while (!all);
917
918 return 0;
919}
920
921static int resolve_metric(bool metric_no_group,
922 struct list_head *metric_list,
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200923 struct pmu_events_map *map,
924 struct expr_ids *ids)
Jiri Olsa83de0b72020-07-19 20:13:09 +0200925{
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200926 struct metric *m;
Jiri Olsa83de0b72020-07-19 20:13:09 +0200927 int err;
928
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200929 list_for_each_entry(m, metric_list, nd) {
930 err = __resolve_metric(m, metric_no_group, metric_list, map, ids);
Jiri Olsa83de0b72020-07-19 20:13:09 +0200931 if (err)
932 return err;
933 }
934 return 0;
935}
936
Jiri Olsa119e5212020-07-19 20:13:20 +0200937static int add_metric(struct list_head *metric_list,
Jiri Olsa83de0b72020-07-19 20:13:09 +0200938 struct pmu_event *pe,
939 bool metric_no_group,
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200940 struct metric **m,
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200941 struct expr_id *parent,
942 struct expr_ids *ids)
Jiri Olsa83de0b72020-07-19 20:13:09 +0200943{
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200944 struct metric *orig = *m;
Jiri Olsaa29c1642020-07-19 20:13:07 +0200945 int ret = 0;
946
947 pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
948
949 if (!strstr(pe->metric_expr, "?")) {
Jiri Olsa119e5212020-07-19 20:13:20 +0200950 ret = __add_metric(metric_list, pe, metric_no_group, 1, m, parent, ids);
Jiri Olsaa29c1642020-07-19 20:13:07 +0200951 } else {
952 int j, count;
953
Kajol Jainf5a489d2020-09-07 12:11:32 +0530954 count = arch_get_runtimeparam(pe);
Jiri Olsaa29c1642020-07-19 20:13:07 +0200955
956 /* This loop is added to create multiple
957 * events depend on count value and add
Jiri Olsa119e5212020-07-19 20:13:20 +0200958 * those events to metric_list.
Jiri Olsaa29c1642020-07-19 20:13:07 +0200959 */
960
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200961 for (j = 0; j < count && !ret; j++, *m = orig)
Jiri Olsa119e5212020-07-19 20:13:20 +0200962 ret = __add_metric(metric_list, pe, metric_no_group, j, m, parent, ids);
Jiri Olsaa29c1642020-07-19 20:13:07 +0200963 }
964
965 return ret;
966}
967
Ian Rogers05530a72020-05-20 11:20:10 -0700968static int metricgroup__add_metric(const char *metric, bool metric_no_group,
969 struct strbuf *events,
Jiri Olsa119e5212020-07-19 20:13:20 +0200970 struct list_head *metric_list,
Jiri Olsa13813962020-06-09 12:57:47 -0300971 struct pmu_events_map *map)
Andi Kleenb18f3e32017-08-31 12:40:31 -0700972{
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200973 struct expr_ids ids = { .cnt = 0, };
Andi Kleenb18f3e32017-08-31 12:40:31 -0700974 struct pmu_event *pe;
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200975 struct metric *m;
Jiri Olsa98461d92020-07-19 20:13:13 +0200976 LIST_HEAD(list);
Ian Rogers90810392020-05-20 11:20:06 -0700977 int i, ret;
978 bool has_match = false;
Andi Kleenb18f3e32017-08-31 12:40:31 -0700979
Jiri Olsace391942020-07-19 20:13:06 +0200980 map_for_each_metric(pe, i, map, metric) {
Jiri Olsace391942020-07-19 20:13:06 +0200981 has_match = true;
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200982 m = NULL;
Andi Kleenb18f3e32017-08-31 12:40:31 -0700983
Jiri Olsaa0c05b32020-07-19 20:13:19 +0200984 ret = add_metric(&list, pe, metric_no_group, &m, NULL, &ids);
Jiri Olsa83de0b72020-07-19 20:13:09 +0200985 if (ret)
Namhyung Kim27adafc2020-09-15 12:18:16 +0900986 goto out;
Jiri Olsa83de0b72020-07-19 20:13:09 +0200987
988 /*
989 * Process any possible referenced metrics
990 * included in the expression.
991 */
992 ret = resolve_metric(metric_no_group,
Jiri Olsaf6fb0962020-07-19 20:13:16 +0200993 &list, map, &ids);
Jiri Olsaa29c1642020-07-19 20:13:07 +0200994 if (ret)
Namhyung Kim27adafc2020-09-15 12:18:16 +0900995 goto out;
Andi Kleenb18f3e32017-08-31 12:40:31 -0700996 }
Jiri Olsace391942020-07-19 20:13:06 +0200997
998 /* End of pmu events. */
Namhyung Kim27adafc2020-09-15 12:18:16 +0900999 if (!has_match) {
1000 ret = -EINVAL;
1001 goto out;
1002 }
Jiri Olsace391942020-07-19 20:13:06 +02001003
Jiri Olsaa0c05b32020-07-19 20:13:19 +02001004 list_for_each_entry(m, &list, nd) {
Ian Rogers7f9eca52020-05-20 11:20:07 -07001005 if (events->len > 0)
1006 strbuf_addf(events, ",");
1007
Jiri Olsaa0c05b32020-07-19 20:13:19 +02001008 if (m->has_constraint) {
Ian Rogers7f9eca52020-05-20 11:20:07 -07001009 metricgroup__add_metric_non_group(events,
Jiri Olsaa0c05b32020-07-19 20:13:19 +02001010 &m->pctx);
Ian Rogers7f9eca52020-05-20 11:20:07 -07001011 } else {
1012 metricgroup__add_metric_weak_group(events,
Jiri Olsaa0c05b32020-07-19 20:13:19 +02001013 &m->pctx);
Ian Rogers7f9eca52020-05-20 11:20:07 -07001014 }
1015 }
Jiri Olsa98461d92020-07-19 20:13:13 +02001016
Namhyung Kim27adafc2020-09-15 12:18:16 +09001017out:
1018 /*
1019 * add to metric_list so that they can be released
1020 * even if it's failed
1021 */
Jiri Olsa119e5212020-07-19 20:13:20 +02001022 list_splice(&list, metric_list);
Jiri Olsaf6fb0962020-07-19 20:13:16 +02001023 expr_ids__exit(&ids);
Namhyung Kim27adafc2020-09-15 12:18:16 +09001024 return ret;
Andi Kleenb18f3e32017-08-31 12:40:31 -07001025}
1026
Ian Rogers05530a72020-05-20 11:20:10 -07001027static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
1028 struct strbuf *events,
Jiri Olsa119e5212020-07-19 20:13:20 +02001029 struct list_head *metric_list,
Jiri Olsa13813962020-06-09 12:57:47 -03001030 struct pmu_events_map *map)
Andi Kleenb18f3e32017-08-31 12:40:31 -07001031{
1032 char *llist, *nlist, *p;
1033 int ret = -EINVAL;
1034
1035 nlist = strdup(list);
1036 if (!nlist)
1037 return -ENOMEM;
1038 llist = nlist;
Andi Kleen411bc312017-09-14 13:57:35 -07001039
1040 strbuf_init(events, 100);
1041 strbuf_addf(events, "%s", "");
1042
Andi Kleenb18f3e32017-08-31 12:40:31 -07001043 while ((p = strsep(&llist, ",")) != NULL) {
Ian Rogers05530a72020-05-20 11:20:10 -07001044 ret = metricgroup__add_metric(p, metric_no_group, events,
Jiri Olsa119e5212020-07-19 20:13:20 +02001045 metric_list, map);
Andi Kleenb18f3e32017-08-31 12:40:31 -07001046 if (ret == -EINVAL) {
1047 fprintf(stderr, "Cannot find metric or group `%s'\n",
1048 p);
1049 break;
1050 }
1051 }
1052 free(nlist);
Kan Liangab483d82020-02-24 13:59:23 -08001053
1054 if (!ret)
1055 metricgroup___watchdog_constraint_hint(NULL, true);
1056
Andi Kleenb18f3e32017-08-31 12:40:31 -07001057 return ret;
1058}
1059
Jiri Olsaa0c05b32020-07-19 20:13:19 +02001060static void metric__free_refs(struct metric *metric)
Jiri Olsa83de0b72020-07-19 20:13:09 +02001061{
1062 struct metric_ref_node *ref, *tmp;
1063
Jiri Olsaa0c05b32020-07-19 20:13:19 +02001064 list_for_each_entry_safe(ref, tmp, &metric->metric_refs, list) {
Jiri Olsa83de0b72020-07-19 20:13:09 +02001065 list_del(&ref->list);
1066 free(ref);
1067 }
1068}
1069
Jiri Olsa119e5212020-07-19 20:13:20 +02001070static void metricgroup__free_metrics(struct list_head *metric_list)
Andi Kleenb18f3e32017-08-31 12:40:31 -07001071{
Jiri Olsaa0c05b32020-07-19 20:13:19 +02001072 struct metric *m, *tmp;
Andi Kleenb18f3e32017-08-31 12:40:31 -07001073
Jiri Olsa119e5212020-07-19 20:13:20 +02001074 list_for_each_entry_safe (m, tmp, metric_list, nd) {
Jiri Olsaa0c05b32020-07-19 20:13:19 +02001075 metric__free_refs(m);
1076 expr__ctx_clear(&m->pctx);
1077 list_del_init(&m->nd);
1078 free(m);
Andi Kleenb18f3e32017-08-31 12:40:31 -07001079 }
1080}
1081
Jiri Olsa8b4468a2020-06-02 23:47:33 +02001082static int parse_groups(struct evlist *perf_evlist, const char *str,
1083 bool metric_no_group,
1084 bool metric_no_merge,
Jiri Olsa68173bd2020-06-09 12:50:42 -03001085 struct perf_pmu *fake_pmu,
Jiri Olsa13813962020-06-09 12:57:47 -03001086 struct rblist *metric_events,
1087 struct pmu_events_map *map)
Andi Kleenb18f3e32017-08-31 12:40:31 -07001088{
1089 struct parse_events_error parse_error;
Andi Kleenb18f3e32017-08-31 12:40:31 -07001090 struct strbuf extra_events;
Jiri Olsa119e5212020-07-19 20:13:20 +02001091 LIST_HEAD(metric_list);
Andi Kleenb18f3e32017-08-31 12:40:31 -07001092 int ret;
1093
1094 if (metric_events->nr_entries == 0)
1095 metricgroup__rblist_init(metric_events);
Ian Rogers05530a72020-05-20 11:20:10 -07001096 ret = metricgroup__add_metric_list(str, metric_no_group,
Jiri Olsa119e5212020-07-19 20:13:20 +02001097 &extra_events, &metric_list, map);
Andi Kleenb18f3e32017-08-31 12:40:31 -07001098 if (ret)
Namhyung Kim4f57a1e2020-09-15 12:18:09 +09001099 goto out;
Andi Kleenb18f3e32017-08-31 12:40:31 -07001100 pr_debug("adding %s\n", extra_events.buf);
Ian Rogersa910e462019-11-15 23:46:52 -08001101 bzero(&parse_error, sizeof(parse_error));
Jiri Olsa68173bd2020-06-09 12:50:42 -03001102 ret = __parse_events(perf_evlist, extra_events.buf, &parse_error, fake_pmu);
Andi Kleenb18f3e32017-08-31 12:40:31 -07001103 if (ret) {
Andi Kleen333b5662017-09-13 14:50:06 -07001104 parse_events_print_error(&parse_error, extra_events.buf);
Andi Kleenb18f3e32017-08-31 12:40:31 -07001105 goto out;
1106 }
Jiri Olsa119e5212020-07-19 20:13:20 +02001107 ret = metricgroup__setup_events(&metric_list, metric_no_merge,
Ian Rogers05530a72020-05-20 11:20:10 -07001108 perf_evlist, metric_events);
Andi Kleenb18f3e32017-08-31 12:40:31 -07001109out:
Jiri Olsa119e5212020-07-19 20:13:20 +02001110 metricgroup__free_metrics(&metric_list);
Namhyung Kim4f57a1e2020-09-15 12:18:09 +09001111 strbuf_release(&extra_events);
Andi Kleenb18f3e32017-08-31 12:40:31 -07001112 return ret;
1113}
Thomas Richter742d92f2018-06-26 09:17:01 +02001114
Jiri Olsa8b4468a2020-06-02 23:47:33 +02001115int metricgroup__parse_groups(const struct option *opt,
1116 const char *str,
1117 bool metric_no_group,
1118 bool metric_no_merge,
1119 struct rblist *metric_events)
1120{
1121 struct evlist *perf_evlist = *(struct evlist **)opt->value;
Jiri Olsa13813962020-06-09 12:57:47 -03001122 struct pmu_events_map *map = perf_pmu__find_map(NULL);
1123
1124 if (!map)
1125 return 0;
Jiri Olsa8b4468a2020-06-02 23:47:33 +02001126
1127 return parse_groups(perf_evlist, str, metric_no_group,
Jiri Olsa13813962020-06-09 12:57:47 -03001128 metric_no_merge, NULL, metric_events, map);
Jiri Olsa8b4468a2020-06-02 23:47:33 +02001129}
1130
Jiri Olsaf78ac002020-06-02 23:47:36 +02001131int metricgroup__parse_groups_test(struct evlist *evlist,
1132 struct pmu_events_map *map,
1133 const char *str,
1134 bool metric_no_group,
1135 bool metric_no_merge,
1136 struct rblist *metric_events)
1137{
1138 return parse_groups(evlist, str, metric_no_group,
1139 metric_no_merge, &perf_pmu__fake, metric_events, map);
1140}
1141
Thomas Richter742d92f2018-06-26 09:17:01 +02001142bool metricgroup__has_metric(const char *metric)
1143{
1144 struct pmu_events_map *map = perf_pmu__find_map(NULL);
1145 struct pmu_event *pe;
1146 int i;
1147
1148 if (!map)
1149 return false;
1150
1151 for (i = 0; ; i++) {
1152 pe = &map->table[i];
1153
1154 if (!pe->name && !pe->metric_group && !pe->metric_name)
1155 break;
1156 if (!pe->metric_expr)
1157 continue;
1158 if (match_metric(pe->metric_name, metric))
1159 return true;
1160 }
1161 return false;
1162}
Namhyung Kimb214ba82020-09-24 21:44:53 +09001163
1164int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1165 struct rblist *new_metric_events,
1166 struct rblist *old_metric_events)
1167{
1168 unsigned i;
1169
1170 for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1171 struct rb_node *nd;
1172 struct metric_event *old_me, *new_me;
1173 struct metric_expr *old_expr, *new_expr;
1174 struct evsel *evsel;
1175 size_t alloc_size;
1176 int idx, nr;
1177
1178 nd = rblist__entry(old_metric_events, i);
1179 old_me = container_of(nd, struct metric_event, nd);
1180
1181 evsel = evlist__find_evsel(evlist, old_me->evsel->idx);
1182 if (!evsel)
1183 return -EINVAL;
1184 new_me = metricgroup__lookup(new_metric_events, evsel, true);
1185 if (!new_me)
1186 return -ENOMEM;
1187
1188 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1189 cgrp ? cgrp->name : "root", evsel->name, evsel->idx);
1190
1191 list_for_each_entry(old_expr, &old_me->head, nd) {
1192 new_expr = malloc(sizeof(*new_expr));
1193 if (!new_expr)
1194 return -ENOMEM;
1195
1196 new_expr->metric_expr = old_expr->metric_expr;
1197 new_expr->metric_name = old_expr->metric_name;
1198 new_expr->metric_unit = old_expr->metric_unit;
1199 new_expr->runtime = old_expr->runtime;
1200
1201 if (old_expr->metric_refs) {
1202 /* calculate number of metric_events */
1203 for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1204 continue;
1205 alloc_size = sizeof(*new_expr->metric_refs);
1206 new_expr->metric_refs = calloc(nr + 1, alloc_size);
1207 if (!new_expr->metric_refs) {
1208 free(new_expr);
1209 return -ENOMEM;
1210 }
1211
1212 memcpy(new_expr->metric_refs, old_expr->metric_refs,
1213 nr * alloc_size);
1214 } else {
1215 new_expr->metric_refs = NULL;
1216 }
1217
1218 /* calculate number of metric_events */
1219 for (nr = 0; old_expr->metric_events[nr]; nr++)
1220 continue;
1221 alloc_size = sizeof(*new_expr->metric_events);
1222 new_expr->metric_events = calloc(nr + 1, alloc_size);
1223 if (!new_expr->metric_events) {
1224 free(new_expr->metric_refs);
1225 free(new_expr);
1226 return -ENOMEM;
1227 }
1228
1229 /* copy evsel in the same position */
1230 for (idx = 0; idx < nr; idx++) {
1231 evsel = old_expr->metric_events[idx];
1232 evsel = evlist__find_evsel(evlist, evsel->idx);
1233 if (evsel == NULL) {
1234 free(new_expr->metric_events);
1235 free(new_expr->metric_refs);
1236 free(new_expr);
1237 return -EINVAL;
1238 }
1239 new_expr->metric_events[idx] = evsel;
1240 }
1241
1242 list_add(&new_expr->nd, &new_me->head);
1243 }
1244 }
1245 return 0;
1246}