blob: ebdd130557fb8e2ae6257bcfda8ac1de74ec197d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -03002#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -03003#include <inttypes.h>
Xiao Guangrong0007ece2012-09-17 16:31:14 +08004#include <math.h>
Arnaldo Carvalho de Melof2a39fe2019-08-30 14:45:20 -03005#include <string.h>
Arnaldo Carvalho de Melobfc49182019-08-21 14:02:05 -03006#include "counts.h"
Arnaldo Carvalho de Melo87ffb6c2019-09-10 16:29:02 +01007#include "cpumap.h"
Arnaldo Carvalho de Melob4209022019-08-29 15:56:40 -03008#include "debug.h"
Arnaldo Carvalho de Melof2a39fe2019-08-30 14:45:20 -03009#include "header.h"
Xiao Guangrong0007ece2012-09-17 16:31:14 +080010#include "stat.h"
Arnaldo Carvalho de Melof2a39fe2019-08-30 14:45:20 -030011#include "session.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030012#include "target.h"
Jiri Olsa24e34f62015-06-26 11:29:16 +020013#include "evlist.h"
Jiri Olsae2f56da2015-06-04 15:50:55 +020014#include "evsel.h"
Jiri Olsa24e34f62015-06-26 11:29:16 +020015#include "thread_map.h"
Arnaldo Carvalho de Melo7f7c5362019-07-04 11:32:27 -030016#include <linux/zalloc.h>
Xiao Guangrong0007ece2012-09-17 16:31:14 +080017
18void update_stats(struct stats *stats, u64 val)
19{
20 double delta;
21
22 stats->n++;
23 delta = val - stats->mean;
24 stats->mean += delta / stats->n;
25 stats->M2 += delta*(val - stats->mean);
David Ahernffe4f3c2013-08-02 14:05:40 -060026
27 if (val > stats->max)
28 stats->max = val;
29
30 if (val < stats->min)
31 stats->min = val;
Xiao Guangrong0007ece2012-09-17 16:31:14 +080032}
33
34double avg_stats(struct stats *stats)
35{
36 return stats->mean;
37}
38
39/*
40 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
41 *
42 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
43 * s^2 = -------------------------------
44 * n - 1
45 *
46 * http://en.wikipedia.org/wiki/Stddev
47 *
48 * The std dev of the mean is related to the std dev by:
49 *
50 * s
51 * s_mean = -------
52 * sqrt(n)
53 *
54 */
55double stddev_stats(struct stats *stats)
56{
57 double variance, variance_mean;
58
David Ahern45528f72013-05-25 18:24:48 -060059 if (stats->n < 2)
Xiao Guangrong0007ece2012-09-17 16:31:14 +080060 return 0.0;
61
62 variance = stats->M2 / (stats->n - 1);
63 variance_mean = variance / stats->n;
64
65 return sqrt(variance_mean);
66}
67
68double rel_stddev_stats(double stddev, double avg)
69{
70 double pct = 0.0;
71
72 if (avg)
73 pct = 100.0 * stddev/avg;
74
75 return pct;
76}
Jiri Olsae2f56da2015-06-04 15:50:55 +020077
Jiri Olsa32dcd022019-07-21 13:23:51 +020078bool __perf_evsel_stat__is(struct evsel *evsel,
Jiri Olsae2f56da2015-06-04 15:50:55 +020079 enum perf_stat_evsel_id id)
80{
Arnaldo Carvalho de Meloe669e832017-10-26 14:22:34 -030081 struct perf_stat_evsel *ps = evsel->stats;
Jiri Olsae2f56da2015-06-04 15:50:55 +020082
83 return ps->id == id;
84}
85
86#define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
87static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
Jiri Olsa4c358d52015-06-03 16:25:52 +020088 ID(NONE, x),
89 ID(CYCLES_IN_TX, cpu/cycles-t/),
90 ID(TRANSACTION_START, cpu/tx-start/),
91 ID(ELISION_START, cpu/el-start/),
92 ID(CYCLES_IN_TX_CP, cpu/cycles-ct/),
Andi Kleen239bd472016-05-24 12:52:37 -070093 ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots),
94 ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued),
95 ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
96 ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
97 ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
Kan Liangdaefd0b2017-05-26 12:05:38 -070098 ID(SMI_NUM, msr/smi/),
99 ID(APERF, msr/aperf/),
Jiri Olsae2f56da2015-06-04 15:50:55 +0200100};
101#undef ID
102
Jiri Olsa32dcd022019-07-21 13:23:51 +0200103static void perf_stat_evsel_id_init(struct evsel *evsel)
Jiri Olsae2f56da2015-06-04 15:50:55 +0200104{
Arnaldo Carvalho de Meloe669e832017-10-26 14:22:34 -0300105 struct perf_stat_evsel *ps = evsel->stats;
Jiri Olsae2f56da2015-06-04 15:50:55 +0200106 int i;
107
108 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
109
110 for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
111 if (!strcmp(perf_evsel__name(evsel), id_str[i])) {
112 ps->id = i;
113 break;
114 }
115 }
116}
Jiri Olsaa9a3a4d2015-06-14 10:19:26 +0200117
Jiri Olsa32dcd022019-07-21 13:23:51 +0200118static void perf_evsel__reset_stat_priv(struct evsel *evsel)
Jiri Olsa9689edf2015-06-26 11:29:14 +0200119{
120 int i;
Arnaldo Carvalho de Meloe669e832017-10-26 14:22:34 -0300121 struct perf_stat_evsel *ps = evsel->stats;
Jiri Olsa9689edf2015-06-26 11:29:14 +0200122
123 for (i = 0; i < 3; i++)
124 init_stats(&ps->res_stats[i]);
125
126 perf_stat_evsel_id_init(evsel);
127}
128
Jiri Olsa32dcd022019-07-21 13:23:51 +0200129static int perf_evsel__alloc_stat_priv(struct evsel *evsel)
Jiri Olsa9689edf2015-06-26 11:29:14 +0200130{
Arnaldo Carvalho de Meloe669e832017-10-26 14:22:34 -0300131 evsel->stats = zalloc(sizeof(struct perf_stat_evsel));
132 if (evsel->stats == NULL)
Jiri Olsa9689edf2015-06-26 11:29:14 +0200133 return -ENOMEM;
134 perf_evsel__reset_stat_priv(evsel);
135 return 0;
136}
137
Jiri Olsa32dcd022019-07-21 13:23:51 +0200138static void perf_evsel__free_stat_priv(struct evsel *evsel)
Jiri Olsa9689edf2015-06-26 11:29:14 +0200139{
Arnaldo Carvalho de Meloe669e832017-10-26 14:22:34 -0300140 struct perf_stat_evsel *ps = evsel->stats;
Jiri Olsaf7794d52017-07-26 14:02:05 +0200141
142 if (ps)
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -0300143 zfree(&ps->group_data);
Arnaldo Carvalho de Meloe669e832017-10-26 14:22:34 -0300144 zfree(&evsel->stats);
Jiri Olsa9689edf2015-06-26 11:29:14 +0200145}
Jiri Olsaa9395122015-06-26 11:29:15 +0200146
Jiri Olsa32dcd022019-07-21 13:23:51 +0200147static int perf_evsel__alloc_prev_raw_counts(struct evsel *evsel,
Jiri Olsa86a2cf32016-01-20 12:56:35 +0100148 int ncpus, int nthreads)
Jiri Olsaa9395122015-06-26 11:29:15 +0200149{
150 struct perf_counts *counts;
151
152 counts = perf_counts__new(ncpus, nthreads);
153 if (counts)
154 evsel->prev_raw_counts = counts;
155
156 return counts ? 0 : -ENOMEM;
157}
158
Jiri Olsa32dcd022019-07-21 13:23:51 +0200159static void perf_evsel__free_prev_raw_counts(struct evsel *evsel)
Jiri Olsaa9395122015-06-26 11:29:15 +0200160{
161 perf_counts__delete(evsel->prev_raw_counts);
162 evsel->prev_raw_counts = NULL;
163}
Jiri Olsa24e34f62015-06-26 11:29:16 +0200164
Srikar Dronamrajub63fd112019-09-04 15:17:37 +0530165static void perf_evsel__reset_prev_raw_counts(struct evsel *evsel)
166{
167 if (evsel->prev_raw_counts) {
168 evsel->prev_raw_counts->aggr.val = 0;
169 evsel->prev_raw_counts->aggr.ena = 0;
170 evsel->prev_raw_counts->aggr.run = 0;
171 }
172}
173
Jiri Olsa32dcd022019-07-21 13:23:51 +0200174static int perf_evsel__alloc_stats(struct evsel *evsel, bool alloc_raw)
Jiri Olsaa7d0a102015-06-26 11:29:17 +0200175{
176 int ncpus = perf_evsel__nr_cpus(evsel);
Jiri Olsaa2f354e2019-08-22 13:11:41 +0200177 int nthreads = perf_thread_map__nr(evsel->core.threads);
Jiri Olsaa7d0a102015-06-26 11:29:17 +0200178
179 if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
180 perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
181 (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
182 return -ENOMEM;
183
184 return 0;
185}
186
Jiri Olsa63503db2019-07-21 13:23:52 +0200187int perf_evlist__alloc_stats(struct evlist *evlist, bool alloc_raw)
Jiri Olsa24e34f62015-06-26 11:29:16 +0200188{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200189 struct evsel *evsel;
Jiri Olsa24e34f62015-06-26 11:29:16 +0200190
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300191 evlist__for_each_entry(evlist, evsel) {
Jiri Olsaa7d0a102015-06-26 11:29:17 +0200192 if (perf_evsel__alloc_stats(evsel, alloc_raw))
Jiri Olsa24e34f62015-06-26 11:29:16 +0200193 goto out_free;
194 }
195
196 return 0;
197
198out_free:
199 perf_evlist__free_stats(evlist);
200 return -1;
201}
202
Jiri Olsa63503db2019-07-21 13:23:52 +0200203void perf_evlist__free_stats(struct evlist *evlist)
Jiri Olsa24e34f62015-06-26 11:29:16 +0200204{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200205 struct evsel *evsel;
Jiri Olsa24e34f62015-06-26 11:29:16 +0200206
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300207 evlist__for_each_entry(evlist, evsel) {
Jiri Olsa24e34f62015-06-26 11:29:16 +0200208 perf_evsel__free_stat_priv(evsel);
209 perf_evsel__free_counts(evsel);
210 perf_evsel__free_prev_raw_counts(evsel);
211 }
212}
213
Jiri Olsa63503db2019-07-21 13:23:52 +0200214void perf_evlist__reset_stats(struct evlist *evlist)
Jiri Olsa24e34f62015-06-26 11:29:16 +0200215{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200216 struct evsel *evsel;
Jiri Olsa24e34f62015-06-26 11:29:16 +0200217
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300218 evlist__for_each_entry(evlist, evsel) {
Jiri Olsa24e34f62015-06-26 11:29:16 +0200219 perf_evsel__reset_stat_priv(evsel);
220 perf_evsel__reset_counts(evsel);
221 }
222}
Jiri Olsaf80010e2015-07-21 14:31:27 +0200223
Srikar Dronamrajub63fd112019-09-04 15:17:37 +0530224void perf_evlist__reset_prev_raw_counts(struct evlist *evlist)
225{
226 struct evsel *evsel;
227
228 evlist__for_each_entry(evlist, evsel)
229 perf_evsel__reset_prev_raw_counts(evsel);
230}
231
Jiri Olsa32dcd022019-07-21 13:23:51 +0200232static void zero_per_pkg(struct evsel *counter)
Jiri Olsaf80010e2015-07-21 14:31:27 +0200233{
234 if (counter->per_pkg_mask)
Kyle Meyer92b5a152019-08-27 16:43:48 -0500235 memset(counter->per_pkg_mask, 0, cpu__max_cpu());
Jiri Olsaf80010e2015-07-21 14:31:27 +0200236}
237
Jiri Olsa32dcd022019-07-21 13:23:51 +0200238static int check_per_pkg(struct evsel *counter,
Stephane Eranian02d8dab2015-09-03 15:23:40 +0200239 struct perf_counts_values *vals, int cpu, bool *skip)
Jiri Olsaf80010e2015-07-21 14:31:27 +0200240{
241 unsigned long *mask = counter->per_pkg_mask;
Jiri Olsab49aca32019-07-21 13:24:05 +0200242 struct perf_cpu_map *cpus = evsel__cpus(counter);
Jiri Olsaf80010e2015-07-21 14:31:27 +0200243 int s;
244
245 *skip = false;
246
247 if (!counter->per_pkg)
248 return 0;
249
Jiri Olsa315c0a12019-08-22 13:11:39 +0200250 if (perf_cpu_map__empty(cpus))
Jiri Olsaf80010e2015-07-21 14:31:27 +0200251 return 0;
252
253 if (!mask) {
Kyle Meyer92b5a152019-08-27 16:43:48 -0500254 mask = zalloc(cpu__max_cpu());
Jiri Olsaf80010e2015-07-21 14:31:27 +0200255 if (!mask)
256 return -ENOMEM;
257
258 counter->per_pkg_mask = mask;
259 }
260
Stephane Eranian02d8dab2015-09-03 15:23:40 +0200261 /*
262 * we do not consider an event that has not run as a good
263 * instance to mark a package as used (skip=1). Otherwise
264 * we may run into a situation where the first CPU in a package
265 * is not running anything, yet the second is, and this function
266 * would mark the package as used after the first CPU and would
267 * not read the values from the second CPU.
268 */
269 if (!(vals->run && vals->ena))
270 return 0;
271
Jiri Olsa1fe7a302015-10-16 12:41:15 +0200272 s = cpu_map__get_socket(cpus, cpu, NULL);
Jiri Olsaf80010e2015-07-21 14:31:27 +0200273 if (s < 0)
274 return -1;
275
276 *skip = test_and_set_bit(s, mask) == 1;
277 return 0;
278}
279
280static int
Jiri Olsa32dcd022019-07-21 13:23:51 +0200281process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
Jiri Olsaf80010e2015-07-21 14:31:27 +0200282 int cpu, int thread,
283 struct perf_counts_values *count)
284{
285 struct perf_counts_values *aggr = &evsel->counts->aggr;
286 static struct perf_counts_values zero;
287 bool skip = false;
288
Stephane Eranian02d8dab2015-09-03 15:23:40 +0200289 if (check_per_pkg(evsel, count, cpu, &skip)) {
Jiri Olsaf80010e2015-07-21 14:31:27 +0200290 pr_err("failed to read per-pkg counter\n");
291 return -1;
292 }
293
294 if (skip)
295 count = &zero;
296
297 switch (config->aggr_mode) {
298 case AGGR_THREAD:
299 case AGGR_CORE:
Kan Liangdb5742b2019-06-04 15:50:42 -0700300 case AGGR_DIE:
Jiri Olsaf80010e2015-07-21 14:31:27 +0200301 case AGGR_SOCKET:
302 case AGGR_NONE:
303 if (!evsel->snapshot)
304 perf_evsel__compute_deltas(evsel, cpu, thread, count);
305 perf_counts_values__scale(count, config->scale, NULL);
Jin Yao4fc4d8d2019-04-12 21:59:49 +0800306 if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) {
307 perf_stat__update_shadow_stats(evsel, count->val,
308 cpu, &rt_stat);
309 }
310
Jin Yao14e72a22017-12-05 22:03:08 +0800311 if (config->aggr_mode == AGGR_THREAD) {
312 if (config->stats)
313 perf_stat__update_shadow_stats(evsel,
314 count->val, 0, &config->stats[thread]);
315 else
316 perf_stat__update_shadow_stats(evsel,
317 count->val, 0, &rt_stat);
318 }
Jiri Olsaf80010e2015-07-21 14:31:27 +0200319 break;
320 case AGGR_GLOBAL:
321 aggr->val += count->val;
Andi Kleen75998bb2019-03-14 15:50:01 -0700322 aggr->ena += count->ena;
323 aggr->run += count->run;
Jiri Olsa208df992015-10-16 12:41:04 +0200324 case AGGR_UNSET:
Jiri Olsaf80010e2015-07-21 14:31:27 +0200325 default:
326 break;
327 }
328
329 return 0;
330}
331
332static int process_counter_maps(struct perf_stat_config *config,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200333 struct evsel *counter)
Jiri Olsaf80010e2015-07-21 14:31:27 +0200334{
Jiri Olsaa2f354e2019-08-22 13:11:41 +0200335 int nthreads = perf_thread_map__nr(counter->core.threads);
Jiri Olsaf80010e2015-07-21 14:31:27 +0200336 int ncpus = perf_evsel__nr_cpus(counter);
337 int cpu, thread;
338
Jiri Olsa648b5af2019-08-06 11:35:19 +0200339 if (counter->core.system_wide)
Jiri Olsaf80010e2015-07-21 14:31:27 +0200340 nthreads = 1;
341
342 for (thread = 0; thread < nthreads; thread++) {
343 for (cpu = 0; cpu < ncpus; cpu++) {
344 if (process_counter_values(config, counter, cpu, thread,
345 perf_counts(counter->counts, cpu, thread)))
346 return -1;
347 }
348 }
349
350 return 0;
351}
352
353int perf_stat_process_counter(struct perf_stat_config *config,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200354 struct evsel *counter)
Jiri Olsaf80010e2015-07-21 14:31:27 +0200355{
356 struct perf_counts_values *aggr = &counter->counts->aggr;
Arnaldo Carvalho de Meloe669e832017-10-26 14:22:34 -0300357 struct perf_stat_evsel *ps = counter->stats;
Jiri Olsaf80010e2015-07-21 14:31:27 +0200358 u64 *count = counter->counts->aggr.values;
359 int i, ret;
360
361 aggr->val = aggr->ena = aggr->run = 0;
Jiri Olsaf80010e2015-07-21 14:31:27 +0200362
Jiri Olsa51fd2df2016-02-03 08:43:56 +0100363 /*
364 * We calculate counter's data every interval,
365 * and the display code shows ps->res_stats
366 * avg value. We need to zero the stats for
367 * interval mode, otherwise overall avg running
368 * averages will be shown for each interval.
369 */
370 if (config->interval)
371 init_stats(ps->res_stats);
372
Jiri Olsaf80010e2015-07-21 14:31:27 +0200373 if (counter->per_pkg)
374 zero_per_pkg(counter);
375
376 ret = process_counter_maps(config, counter);
377 if (ret)
378 return ret;
379
380 if (config->aggr_mode != AGGR_GLOBAL)
381 return 0;
382
383 if (!counter->snapshot)
384 perf_evsel__compute_deltas(counter, -1, -1, aggr);
385 perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
386
387 for (i = 0; i < 3; i++)
388 update_stats(&ps->res_stats[i], count[i]);
389
Namhyung Kimbb963e12017-02-17 17:17:38 +0900390 if (verbose > 0) {
Jiri Olsaf80010e2015-07-21 14:31:27 +0200391 fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
392 perf_evsel__name(counter), count[0], count[1], count[2]);
393 }
394
395 /*
396 * Save the full runtime - to allow normalization during printout:
397 */
Jin Yao1fcd0392017-12-05 22:03:04 +0800398 perf_stat__update_shadow_stats(counter, *count, 0, &rt_stat);
Jiri Olsaf80010e2015-07-21 14:31:27 +0200399
400 return 0;
401}
Jiri Olsa0ea0e352015-10-25 15:51:32 +0100402
Jiri Olsa89f16882018-09-13 14:54:03 +0200403int perf_event__process_stat_event(struct perf_session *session,
404 union perf_event *event)
Jiri Olsa0ea0e352015-10-25 15:51:32 +0100405{
406 struct perf_counts_values count;
Jiri Olsa72932372019-08-28 15:57:16 +0200407 struct perf_record_stat *st = &event->stat;
Jiri Olsa32dcd022019-07-21 13:23:51 +0200408 struct evsel *counter;
Jiri Olsa0ea0e352015-10-25 15:51:32 +0100409
410 count.val = st->val;
411 count.ena = st->ena;
412 count.run = st->run;
413
414 counter = perf_evlist__id2evsel(session->evlist, st->id);
415 if (!counter) {
416 pr_err("Failed to resolve counter for stat event.\n");
417 return -EINVAL;
418 }
419
420 *perf_counts(counter->counts, st->cpu, st->thread) = count;
421 counter->supported = true;
422 return 0;
423}
Jiri Olsae08a4562015-10-25 15:51:35 +0100424
425size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
426{
Jiri Olsa72932372019-08-28 15:57:16 +0200427 struct perf_record_stat *st = (struct perf_record_stat *)event;
Jiri Olsae08a4562015-10-25 15:51:35 +0100428 size_t ret;
429
Jiri Olsa18a13a62019-08-28 15:57:10 +0200430 ret = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n",
Jiri Olsae08a4562015-10-25 15:51:35 +0100431 st->id, st->cpu, st->thread);
Jiri Olsa18a13a62019-08-28 15:57:10 +0200432 ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n",
Jiri Olsae08a4562015-10-25 15:51:35 +0100433 st->val, st->ena, st->run);
434
435 return ret;
436}
437
438size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
439{
Jiri Olsa72932372019-08-28 15:57:16 +0200440 struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event;
Jiri Olsae08a4562015-10-25 15:51:35 +0100441 size_t ret;
442
Jiri Olsa782adbe2019-08-28 15:57:11 +0200443 ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time,
Jiri Olsae08a4562015-10-25 15:51:35 +0100444 rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
445
446 return ret;
447}
448
449size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
450{
451 struct perf_stat_config sc;
452 size_t ret;
453
454 perf_event__read_stat_config(&sc, &event->stat_config);
455
456 ret = fprintf(fp, "\n");
457 ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
458 ret += fprintf(fp, "... scale %d\n", sc.scale);
459 ret += fprintf(fp, "... interval %u\n", sc.interval);
460
461 return ret;
462}
Jiri Olsad09cefd2018-08-30 08:32:17 +0200463
Jiri Olsa32dcd022019-07-21 13:23:51 +0200464int create_perf_stat_counter(struct evsel *evsel,
Jiri Olsad09cefd2018-08-30 08:32:17 +0200465 struct perf_stat_config *config,
466 struct target *target)
467{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200468 struct perf_event_attr *attr = &evsel->core.attr;
Jiri Olsa32dcd022019-07-21 13:23:51 +0200469 struct evsel *leader = evsel->leader;
Jiri Olsad09cefd2018-08-30 08:32:17 +0200470
Andi Kleen75998bb2019-03-14 15:50:01 -0700471 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
472 PERF_FORMAT_TOTAL_TIME_RUNNING;
Jiri Olsad09cefd2018-08-30 08:32:17 +0200473
474 /*
475 * The event is part of non trivial group, let's enable
476 * the group read (for leader) and ID retrieval for all
477 * members.
478 */
Jiri Olsa5643b1a2019-07-21 13:24:46 +0200479 if (leader->core.nr_members > 1)
Jiri Olsad09cefd2018-08-30 08:32:17 +0200480 attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
481
482 attr->inherit = !config->no_inherit;
483
484 /*
485 * Some events get initialized with sample_(period/type) set,
486 * like tracepoints. Clear it up for counting.
487 */
488 attr->sample_period = 0;
489
490 if (config->identifier)
491 attr->sample_type = PERF_SAMPLE_IDENTIFIER;
492
493 /*
494 * Disabling all counters initially, they will be enabled
495 * either manually by us or by kernel via enable_on_exec
496 * set later.
497 */
498 if (perf_evsel__is_group_leader(evsel)) {
499 attr->disabled = 1;
500
501 /*
502 * In case of initial_delay we enable tracee
503 * events manually.
504 */
505 if (target__none(target) && !config->initial_delay)
506 attr->enable_on_exec = 1;
507 }
508
509 if (target__has_cpu(target) && !target__has_per_thread(target))
Jiri Olsab49aca32019-07-21 13:24:05 +0200510 return perf_evsel__open_per_cpu(evsel, evsel__cpus(evsel));
Jiri Olsad09cefd2018-08-30 08:32:17 +0200511
Jiri Olsaaf663bd2019-07-21 13:24:39 +0200512 return perf_evsel__open_per_thread(evsel, evsel->core.threads);
Jiri Olsad09cefd2018-08-30 08:32:17 +0200513}