blob: 46a3f4ab13715858591692688c7f3aca1322c79a [file] [log] [blame]
Thomas Gleixner91007042019-05-29 07:12:25 -07001// SPDX-License-Identifier: GPL-2.0-only
Ingo Molnarddcacfa2009-04-20 15:37:32 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-stat.c
4 *
5 * Builtin stat command: Give a precise performance counters summary
6 * overview about any workload, CPU or specific PID.
7 *
8 * Sample output:
Ingo Molnarddcacfa2009-04-20 15:37:32 +02009
Ingo Molnar2cba3ff2011-05-19 13:30:56 +020010 $ perf stat ./hackbench 10
Ingo Molnarddcacfa2009-04-20 15:37:32 +020011
Ingo Molnar2cba3ff2011-05-19 13:30:56 +020012 Time: 0.118
Ingo Molnarddcacfa2009-04-20 15:37:32 +020013
Ingo Molnar2cba3ff2011-05-19 13:30:56 +020014 Performance counter stats for './hackbench 10':
Ingo Molnarddcacfa2009-04-20 15:37:32 +020015
Ingo Molnar2cba3ff2011-05-19 13:30:56 +020016 1708.761321 task-clock # 11.037 CPUs utilized
17 41,190 context-switches # 0.024 M/sec
18 6,735 CPU-migrations # 0.004 M/sec
19 17,318 page-faults # 0.010 M/sec
20 5,205,202,243 cycles # 3.046 GHz
21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
23 2,603,501,247 instructions # 0.50 insns per cycle
24 # 1.48 stalled cycles per insn
25 484,357,498 branches # 283.455 M/sec
26 6,388,934 branch-misses # 1.32% of all branches
27
28 0.154822978 seconds time elapsed
Ingo Molnarddcacfa2009-04-20 15:37:32 +020029
Ingo Molnar52425192009-05-26 09:17:18 +020030 *
Ingo Molnar2cba3ff2011-05-19 13:30:56 +020031 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
Ingo Molnar52425192009-05-26 09:17:18 +020032 *
33 * Improvements and fixes by:
34 *
35 * Arjan van de Ven <arjan@linux.intel.com>
36 * Yanmin Zhang <yanmin.zhang@intel.com>
37 * Wu Fengguang <fengguang.wu@intel.com>
38 * Mike Galbraith <efault@gmx.de>
39 * Paul Mackerras <paulus@samba.org>
Jaswinder Singh Rajput6e750a8f2009-06-27 03:02:07 +053040 * Jaswinder Singh Rajput <jaswinder@kernel.org>
Ingo Molnarddcacfa2009-04-20 15:37:32 +020041 */
42
Ingo Molnar16f762a2009-05-27 09:10:38 +020043#include "builtin.h"
Arnaldo Carvalho de Meloc1a604d2019-08-29 15:20:59 -030044#include "perf.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030045#include "util/cgroup.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060046#include <subcmd/parse-options.h>
Ingo Molnar52425192009-05-26 09:17:18 +020047#include "util/parse-events.h"
Andi Kleen4cabc3d2013-08-21 16:47:26 -070048#include "util/pmu.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020049#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020050#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020051#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020052#include "util/debug.h"
Ingo Molnara5d243d2011-04-27 05:39:24 +020053#include "util/color.h"
Xiao Guangrong0007ece2012-09-17 16:31:14 +080054#include "util/stat.h"
Liming Wang60666c62009-12-31 16:05:50 +080055#include "util/header.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110056#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020057#include "util/thread_map.h"
Jiri Olsad8095602015-08-07 12:51:03 +020058#include "util/counts.h"
Kan Liang687986b2020-09-11 07:48:05 -070059#include "util/topdown.h"
Jiri Olsa4979d0c2015-11-05 15:40:46 +010060#include "util/session.h"
Jiri Olsaba6039b62015-11-05 15:40:55 +010061#include "util/tool.h"
Arnaldo Carvalho de Meloa0675582017-04-17 16:51:59 -030062#include "util/string2.h"
Andi Kleenb18f3e32017-08-31 12:40:31 -070063#include "util/metricgroup.h"
Arnaldo Carvalho de Meloea49e012019-09-18 11:36:13 -030064#include "util/synthetic-events.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030065#include "util/target.h"
Arnaldo Carvalho de Melof3711022019-08-29 15:16:27 -030066#include "util/time-utils.h"
Jiri Olsa9660e082018-06-07 00:15:06 +020067#include "util/top.h"
Andi Kleen4804e012019-11-20 16:15:19 -080068#include "util/affinity.h"
Stephane Eranian70943492020-05-05 11:29:43 -070069#include "util/pfm.h"
Song Liufa853c42020-12-29 13:42:14 -080070#include "util/bpf_counter.h"
Jiri Olsaba6039b62015-11-05 15:40:55 +010071#include "asm/bug.h"
Ingo Molnarddcacfa2009-04-20 15:37:32 +020072
Arnaldo Carvalho de Melobd48c632016-08-05 15:40:30 -030073#include <linux/time64.h>
Arnaldo Carvalho de Melo7f7c5362019-07-04 11:32:27 -030074#include <linux/zalloc.h>
Andi Kleen44b1e602016-05-30 12:49:42 -030075#include <api/fs/fs.h>
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030076#include <errno.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030077#include <signal.h>
Peter Zijlstra1f16c572012-10-23 13:40:14 +020078#include <stdlib.h>
Ingo Molnarddcacfa2009-04-20 15:37:32 +020079#include <sys/prctl.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030080#include <inttypes.h>
Stephane Eranian5af52b52010-05-18 15:00:01 +020081#include <locale.h>
Andi Kleene3b03b62016-05-05 16:04:03 -070082#include <math.h>
Arnaldo Carvalho de Melo7a8ef4c2017-04-19 20:57:47 -030083#include <sys/types.h>
84#include <sys/stat.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030085#include <sys/wait.h>
Arnaldo Carvalho de Melo7a8ef4c2017-04-19 20:57:47 -030086#include <unistd.h>
Jiri Olsa0ce2da12018-06-05 14:13:13 +020087#include <sys/time.h>
88#include <sys/resource.h>
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +053089#include <linux/err.h>
Peter Zijlstra16c8a102009-05-05 17:50:27 +020090
Arnaldo Carvalho de Melo3052ba52019-06-25 17:27:31 -030091#include <linux/ctype.h>
Jiri Olsa453fa032019-07-21 13:24:43 +020092#include <perf/evlist.h>
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -030093
Stephane Eraniand7470b62010-12-01 18:49:05 +020094#define DEFAULT_SEPARATOR " "
Kan Liangdaefd0b2017-05-26 12:05:38 -070095#define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi"
Stephane Eraniand7470b62010-12-01 18:49:05 +020096
Jiri Olsad4f63a42015-06-26 11:29:26 +020097static void print_counters(struct timespec *ts, int argc, const char **argv);
Stephane Eranian13370a92013-01-29 12:47:44 +010098
Andi Kleen4cabc3d2013-08-21 16:47:26 -070099/* Default events used for perf stat -T */
Jiri Olsaa4547422015-06-03 16:25:53 +0200100static const char *transaction_attrs = {
101 "task-clock,"
Andi Kleen4cabc3d2013-08-21 16:47:26 -0700102 "{"
103 "instructions,"
104 "cycles,"
105 "cpu/cycles-t/,"
106 "cpu/tx-start/,"
107 "cpu/el-start/,"
108 "cpu/cycles-ct/"
109 "}"
110};
111
112/* More limited version when the CPU does not have all events. */
Jiri Olsaa4547422015-06-03 16:25:53 +0200113static const char * transaction_limited_attrs = {
114 "task-clock,"
Andi Kleen4cabc3d2013-08-21 16:47:26 -0700115 "{"
116 "instructions,"
117 "cycles,"
118 "cpu/cycles-t/,"
119 "cpu/tx-start/"
120 "}"
121};
122
Andi Kleen44b1e602016-05-30 12:49:42 -0300123static const char * topdown_attrs[] = {
124 "topdown-total-slots",
125 "topdown-slots-retired",
126 "topdown-recovery-bubbles",
127 "topdown-fetch-bubbles",
128 "topdown-slots-issued",
129 NULL,
130};
131
Andi Kleen55c36a92020-09-11 07:48:07 -0700132static const char *topdown_metric_attrs[] = {
133 "slots",
134 "topdown-retiring",
135 "topdown-bad-spec",
136 "topdown-fe-bound",
137 "topdown-be-bound",
138 NULL,
139};
140
Kan Liangdaefd0b2017-05-26 12:05:38 -0700141static const char *smi_cost_attrs = {
142 "{"
143 "msr/aperf/,"
144 "msr/smi/,"
145 "cycles"
146 "}"
147};
148
Jiri Olsa63503db2019-07-21 13:23:52 +0200149static struct evlist *evsel_list;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200150
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -0300151static struct target target = {
Namhyung Kim77a6f012012-05-07 14:09:04 +0900152 .uid = UINT_MAX,
153};
Jaswinder Singh Rajput3d632592009-06-24 18:19:34 +0530154
Jiri Olsac1a1f5d2018-06-07 00:15:09 +0200155#define METRIC_ONLY_LEN 20
156
Stephane Eraniand07f0b12013-06-04 17:44:26 +0200157static volatile pid_t child_pid = -1;
Ingo Molnar2cba3ff2011-05-19 13:30:56 +0200158static int detailed_run = 0;
Andi Kleen4cabc3d2013-08-21 16:47:26 -0700159static bool transaction_run;
Andi Kleen44b1e602016-05-30 12:49:42 -0300160static bool topdown_run = false;
Kan Liangdaefd0b2017-05-26 12:05:38 -0700161static bool smi_cost = false;
162static bool smi_reset = false;
Stephane Eraniand7470b62010-12-01 18:49:05 +0200163static int big_num_opt = -1;
Lin Ming43bece72011-08-17 18:42:07 +0800164static bool group = false;
Peter Zijlstra1f16c572012-10-23 13:40:14 +0200165static const char *pre_cmd = NULL;
166static const char *post_cmd = NULL;
167static bool sync_run = false;
Frederik Deweerdta7e191c2013-03-01 13:02:27 -0500168static bool forever = false;
Andi Kleen44b1e602016-05-30 12:49:42 -0300169static bool force_metric_only = false;
Stephane Eranian13370a92013-01-29 12:47:44 +0100170static struct timespec ref_time;
Jiri Olsae0547312015-11-05 15:40:45 +0100171static bool append_file;
yuzhoujiandb06a262018-01-29 10:25:22 +0100172static bool interval_count;
Jiri Olsae0547312015-11-05 15:40:45 +0100173static const char *output_name;
174static int output_fd;
Stephane Eranian5af52b52010-05-18 15:00:01 +0200175
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100176struct perf_stat {
177 bool record;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100178 struct perf_data data;
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100179 struct perf_session *session;
180 u64 bytes_written;
Jiri Olsaba6039b62015-11-05 15:40:55 +0100181 struct perf_tool tool;
Jiri Olsa1975d362015-11-05 15:40:56 +0100182 bool maps_allocated;
Jiri Olsaf8548392019-07-21 13:23:49 +0200183 struct perf_cpu_map *cpus;
Jiri Olsa9749b902019-07-21 13:23:50 +0200184 struct perf_thread_map *threads;
Jiri Olsa89af4e02015-11-05 15:41:02 +0100185 enum aggr_mode aggr_mode;
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100186};
187
188static struct perf_stat perf_stat;
189#define STAT_RECORD perf_stat.record
190
Liming Wang60666c62009-12-31 16:05:50 +0800191static volatile int done = 0;
192
Jiri Olsa421a50f2015-07-21 14:31:22 +0200193static struct perf_stat_config stat_config = {
Jiri Olsa26893a62018-08-30 08:32:40 +0200194 .aggr_mode = AGGR_GLOBAL,
195 .scale = true,
196 .unit_width = 4, /* strlen("unit") */
197 .run_count = 1,
198 .metric_only_len = METRIC_ONLY_LEN,
199 .walltime_nsecs_stats = &walltime_nsecs_stats,
Jiri Olsa34ff0862018-08-30 08:32:47 +0200200 .big_num = true,
Alexey Budankov27e97692020-07-17 10:05:41 +0300201 .ctl_fd = -1,
202 .ctl_fd_ack = -1
Jiri Olsa421a50f2015-07-21 14:31:22 +0200203};
204
Jiri Olsaa9a179022020-06-02 12:17:36 +0200205static bool cpus_map_matched(struct evsel *a, struct evsel *b)
206{
207 if (!a->core.cpus && !b->core.cpus)
208 return true;
209
210 if (!a->core.cpus || !b->core.cpus)
211 return false;
212
213 if (a->core.cpus->nr != b->core.cpus->nr)
214 return false;
215
216 for (int i = 0; i < a->core.cpus->nr; i++) {
217 if (a->core.cpus->map[i] != b->core.cpus->map[i])
218 return false;
219 }
220
221 return true;
222}
223
224static void evlist__check_cpu_maps(struct evlist *evlist)
225{
226 struct evsel *evsel, *pos, *leader;
227 char buf[1024];
228
229 evlist__for_each_entry(evlist, evsel) {
230 leader = evsel->leader;
231
232 /* Check that leader matches cpus with each member. */
233 if (leader == evsel)
234 continue;
235 if (cpus_map_matched(leader, evsel))
236 continue;
237
238 /* If there's mismatch disable the group and warn user. */
239 WARN_ONCE(1, "WARNING: grouped events cpus do not match, disabling group:\n");
240 evsel__group_desc(leader, buf, sizeof(buf));
241 pr_warning(" %s\n", buf);
242
243 if (verbose) {
244 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf));
245 pr_warning(" %s: %s\n", leader->name, buf);
246 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf));
247 pr_warning(" %s: %s\n", evsel->name, buf);
248 }
249
250 for_each_group_evsel(pos, leader) {
251 pos->leader = pos;
252 pos->core.nr_members = 0;
253 }
254 evsel->leader->core.nr_members = 0;
255 }
256}
257
Stephane Eranian13370a92013-01-29 12:47:44 +0100258static inline void diff_timespec(struct timespec *r, struct timespec *a,
259 struct timespec *b)
260{
261 r->tv_sec = a->tv_sec - b->tv_sec;
262 if (a->tv_nsec < b->tv_nsec) {
Arnaldo Carvalho de Melo310ebb92016-08-08 14:57:04 -0300263 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec;
Stephane Eranian13370a92013-01-29 12:47:44 +0100264 r->tv_sec--;
265 } else {
266 r->tv_nsec = a->tv_nsec - b->tv_nsec ;
267 }
268}
269
Jiri Olsa254ecbc72015-06-26 11:29:13 +0200270static void perf_stat__reset_stats(void)
271{
Jin Yao56739442017-12-05 22:03:07 +0800272 int i;
273
Arnaldo Carvalho de Melo53f5e902020-11-30 09:31:04 -0300274 evlist__reset_stats(evsel_list);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200275 perf_stat__reset_shadow_stats();
Jin Yao56739442017-12-05 22:03:07 +0800276
277 for (i = 0; i < stat_config.stats_num; i++)
278 perf_stat__reset_shadow_per_stat(&stat_config.stats[i]);
Jiri Olsa1eda3b22015-06-03 16:25:55 +0200279}
280
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100281static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100282 union perf_event *event,
283 struct perf_sample *sample __maybe_unused,
284 struct machine *machine __maybe_unused)
285{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100286 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) {
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100287 pr_err("failed to write perf data, error: %m\n");
288 return -1;
289 }
290
291 perf_stat.bytes_written += event->header.size;
292 return 0;
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100293}
294
Jiri Olsa1975d362015-11-05 15:40:56 +0100295static int write_stat_round_event(u64 tm, u64 type)
Jiri Olsa7aad0c32015-11-05 15:40:52 +0100296{
Jiri Olsa1975d362015-11-05 15:40:56 +0100297 return perf_event__synthesize_stat_round(NULL, tm, type,
Jiri Olsa7aad0c32015-11-05 15:40:52 +0100298 process_synthesized_event,
299 NULL);
300}
301
302#define WRITE_STAT_ROUND_EVENT(time, interval) \
303 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
304
Jiri Olsa8cd36f32019-09-02 22:04:12 +0200305#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
Jiri Olsa5a6ea812015-11-05 15:40:51 +0100306
Arnaldo Carvalho de Meloddc69992020-05-04 13:46:34 -0300307static int evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread,
308 struct perf_counts_values *count)
Jiri Olsa5a6ea812015-11-05 15:40:51 +0100309{
310 struct perf_sample_id *sid = SID(counter, cpu, thread);
311
312 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
313 process_synthesized_event, NULL);
314}
315
Jiri Olsa32dcd022019-07-21 13:23:51 +0200316static int read_single_counter(struct evsel *counter, int cpu,
Andi Kleenf0fbb112019-03-26 15:18:21 -0700317 int thread, struct timespec *rs)
318{
319 if (counter->tool_event == PERF_TOOL_DURATION_TIME) {
320 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL;
321 struct perf_counts_values *count =
322 perf_counts(counter->counts, cpu, thread);
323 count->ena = count->run = val;
324 count->val = val;
325 return 0;
326 }
Arnaldo Carvalho de Meloea089692020-04-30 11:00:53 -0300327 return evsel__read_counter(counter, cpu, thread);
Andi Kleenf0fbb112019-03-26 15:18:21 -0700328}
329
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200330/*
331 * Read out the results of a single counter:
332 * do not aggregate counts across CPUs in system-wide mode
333 */
Andi Kleen4b49ab72019-11-20 16:15:20 -0800334static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu)
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200335{
Jiri Olsaa2f354e2019-08-22 13:11:41 +0200336 int nthreads = perf_thread_map__nr(evsel_list->core.threads);
Andi Kleen4b49ab72019-11-20 16:15:20 -0800337 int thread;
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200338
Suzuki K. Poulose3b4331d2015-02-13 18:40:58 +0000339 if (!counter->supported)
340 return -ENOENT;
341
Jiri Olsa648b5af2019-08-06 11:35:19 +0200342 if (counter->core.system_wide)
Jiri Olsa9bf1a522014-11-21 10:31:09 +0100343 nthreads = 1;
344
345 for (thread = 0; thread < nthreads; thread++) {
Andi Kleen4b49ab72019-11-20 16:15:20 -0800346 struct perf_counts_values *count;
Jiri Olsa3b3eb042015-06-26 11:29:20 +0200347
Andi Kleen4b49ab72019-11-20 16:15:20 -0800348 count = perf_counts(counter->counts, cpu, thread);
Jiri Olsa82bf3112017-07-26 14:02:06 +0200349
Andi Kleen4b49ab72019-11-20 16:15:20 -0800350 /*
351 * The leader's group read loads data into its group members
Arnaldo Carvalho de Meloea089692020-04-30 11:00:53 -0300352 * (via evsel__read_counter()) and sets their count->loaded.
Andi Kleen4b49ab72019-11-20 16:15:20 -0800353 */
354 if (!perf_counts__is_loaded(counter->counts, cpu, thread) &&
355 read_single_counter(counter, cpu, thread, rs)) {
356 counter->counts->scaled = -1;
357 perf_counts(counter->counts, cpu, thread)->ena = 0;
358 perf_counts(counter->counts, cpu, thread)->run = 0;
359 return -1;
360 }
361
362 perf_counts__set_loaded(counter->counts, cpu, thread, false);
363
364 if (STAT_RECORD) {
Arnaldo Carvalho de Meloddc69992020-05-04 13:46:34 -0300365 if (evsel__write_stat_event(counter, cpu, thread, count)) {
Andi Kleen4b49ab72019-11-20 16:15:20 -0800366 pr_err("failed to write stat event\n");
Jiri Olsa9bf1a522014-11-21 10:31:09 +0100367 return -1;
Stephane Eraniandb49a712017-04-12 11:23:01 -0700368 }
Andi Kleen4b49ab72019-11-20 16:15:20 -0800369 }
Jiri Olsa5a6ea812015-11-05 15:40:51 +0100370
Andi Kleen4b49ab72019-11-20 16:15:20 -0800371 if (verbose > 1) {
372 fprintf(stat_config.output,
373 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300374 evsel__name(counter),
Andi Kleen4b49ab72019-11-20 16:15:20 -0800375 cpu,
376 count->val, count->ena, count->run);
Jiri Olsa9bf1a522014-11-21 10:31:09 +0100377 }
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200378 }
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200379
380 return 0;
Ingo Molnar2996f5d2009-05-29 09:10:54 +0200381}
382
Jin Yaoc7e5b322020-05-20 12:27:37 +0800383static int read_affinity_counters(struct timespec *rs)
Jiri Olsa106a94a2015-06-26 11:29:19 +0200384{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200385 struct evsel *counter;
Andi Kleen4b49ab72019-11-20 16:15:20 -0800386 struct affinity affinity;
387 int i, ncpus, cpu;
388
389 if (affinity__setup(&affinity) < 0)
Jin Yaoc7e5b322020-05-20 12:27:37 +0800390 return -1;
Andi Kleen4b49ab72019-11-20 16:15:20 -0800391
392 ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus);
393 if (!target__has_cpu(&target) || target__has_per_thread(&target))
394 ncpus = 1;
395 evlist__for_each_cpu(evsel_list, i, cpu) {
396 if (i >= ncpus)
397 break;
398 affinity__set(&affinity, cpu);
399
400 evlist__for_each_entry(evsel_list, counter) {
401 if (evsel__cpu_iter_skip(counter, cpu))
402 continue;
403 if (!counter->err) {
404 counter->err = read_counter_cpu(counter, rs,
405 counter->cpu_iter - 1);
406 }
407 }
408 }
409 affinity__cleanup(&affinity);
Jin Yaoc7e5b322020-05-20 12:27:37 +0800410 return 0;
411}
412
Song Liufa853c42020-12-29 13:42:14 -0800413static int read_bpf_map_counters(void)
414{
415 struct evsel *counter;
416 int err;
417
418 evlist__for_each_entry(evsel_list, counter) {
419 err = bpf_counter__read(counter);
420 if (err)
421 return err;
422 }
423 return 0;
424}
425
Jin Yaoc7e5b322020-05-20 12:27:37 +0800426static void read_counters(struct timespec *rs)
427{
428 struct evsel *counter;
Song Liufa853c42020-12-29 13:42:14 -0800429 int err;
Jin Yaoc7e5b322020-05-20 12:27:37 +0800430
Song Liufa853c42020-12-29 13:42:14 -0800431 if (!stat_config.stop_read_counter) {
432 if (target__has_bpf(&target))
433 err = read_bpf_map_counters();
434 else
435 err = read_affinity_counters(rs);
436 if (err < 0)
437 return;
438 }
Jiri Olsa106a94a2015-06-26 11:29:19 +0200439
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300440 evlist__for_each_entry(evsel_list, counter) {
Andi Kleen4b49ab72019-11-20 16:15:20 -0800441 if (counter->err)
Andi Kleen245bad82015-09-01 15:52:46 -0700442 pr_debug("failed to read counter %s\n", counter->name);
Andi Kleen4b49ab72019-11-20 16:15:20 -0800443 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter))
Jiri Olsa3b3eb042015-06-26 11:29:20 +0200444 pr_warning("failed to process counter %s\n", counter->name);
Andi Kleen4b49ab72019-11-20 16:15:20 -0800445 counter->err = 0;
Jiri Olsa106a94a2015-06-26 11:29:19 +0200446 }
447}
448
Jin Yao72f02a92020-05-20 12:27:33 +0800449static int runtime_stat_new(struct perf_stat_config *config, int nthreads)
450{
451 int i;
452
453 config->stats = calloc(nthreads, sizeof(struct runtime_stat));
454 if (!config->stats)
455 return -1;
456
457 config->stats_num = nthreads;
458
459 for (i = 0; i < nthreads; i++)
460 runtime_stat__init(&config->stats[i]);
461
462 return 0;
463}
464
465static void runtime_stat_delete(struct perf_stat_config *config)
466{
467 int i;
468
469 if (!config->stats)
470 return;
471
472 for (i = 0; i < config->stats_num; i++)
473 runtime_stat__exit(&config->stats[i]);
474
475 zfree(&config->stats);
476}
477
478static void runtime_stat_reset(struct perf_stat_config *config)
479{
480 int i;
481
482 if (!config->stats)
483 return;
484
485 for (i = 0; i < config->stats_num; i++)
486 perf_stat__reset_shadow_per_stat(&config->stats[i]);
487}
488
Jiri Olsaba411a92015-06-26 11:29:24 +0200489static void process_interval(void)
Stephane Eranian13370a92013-01-29 12:47:44 +0100490{
Stephane Eranian13370a92013-01-29 12:47:44 +0100491 struct timespec ts, rs;
Stephane Eranian13370a92013-01-29 12:47:44 +0100492
Stephane Eranian13370a92013-01-29 12:47:44 +0100493 clock_gettime(CLOCK_MONOTONIC, &ts);
494 diff_timespec(&rs, &ts, &ref_time);
Stephane Eranian13370a92013-01-29 12:47:44 +0100495
Jin Yao197ba862020-04-20 22:54:17 +0800496 perf_stat__reset_shadow_per_stat(&rt_stat);
Jin Yao72f02a92020-05-20 12:27:33 +0800497 runtime_stat_reset(&stat_config);
Andi Kleenf0fbb112019-03-26 15:18:21 -0700498 read_counters(&rs);
499
Jiri Olsa7aad0c32015-11-05 15:40:52 +0100500 if (STAT_RECORD) {
Arnaldo Carvalho de Melobd48c632016-08-05 15:40:30 -0300501 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
Jiri Olsa7aad0c32015-11-05 15:40:52 +0100502 pr_err("failed to write stat round event\n");
503 }
504
Andi Kleenb90f1332017-08-31 12:40:36 -0700505 init_stats(&walltime_nsecs_stats);
Jiri Olsaea9eb1f2020-05-18 15:14:45 +0200506 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL);
Jiri Olsad4f63a42015-06-26 11:29:26 +0200507 print_counters(&rs, 0, NULL);
Stephane Eranian13370a92013-01-29 12:47:44 +0100508}
509
Alexey Budankovdece3a42020-07-17 10:02:08 +0300510static bool handle_interval(unsigned int interval, int *times)
511{
512 if (interval) {
513 process_interval();
514 if (interval_count && !(--(*times)))
515 return true;
516 }
517 return false;
518}
519
Song Liufa853c42020-12-29 13:42:14 -0800520static int enable_counters(void)
Andi Kleen41191682013-08-02 17:41:11 -0700521{
Song Liufa853c42020-12-29 13:42:14 -0800522 struct evsel *evsel;
523 int err;
524
525 if (target__has_bpf(&target)) {
526 evlist__for_each_entry(evsel_list, evsel) {
527 err = bpf_counter__enable(evsel);
528 if (err)
529 return err;
530 }
531 }
532
Alexey Budankov2162b9c2020-07-17 10:04:33 +0300533 if (stat_config.initial_delay < 0) {
534 pr_info(EVLIST_DISABLED_MSG);
Song Liufa853c42020-12-29 13:42:14 -0800535 return 0;
Alexey Budankov2162b9c2020-07-17 10:04:33 +0300536 }
537
538 if (stat_config.initial_delay > 0) {
539 pr_info(EVLIST_DISABLED_MSG);
Jiri Olsa728c0ee2018-08-30 08:32:11 +0200540 usleep(stat_config.initial_delay * USEC_PER_MSEC);
Alexey Budankov2162b9c2020-07-17 10:04:33 +0300541 }
Jiri Olsa67ccdec2015-12-03 10:06:44 +0100542
543 /*
544 * We need to enable counters only if:
545 * - we don't have tracee (attaching to task or cpu)
546 * - we have initial delay configured
547 */
Alexey Budankov2162b9c2020-07-17 10:04:33 +0300548 if (!target__none(&target) || stat_config.initial_delay) {
Jiri Olsa1c87f162019-07-21 13:24:08 +0200549 evlist__enable(evsel_list);
Alexey Budankov2162b9c2020-07-17 10:04:33 +0300550 if (stat_config.initial_delay > 0)
551 pr_info(EVLIST_ENABLED_MSG);
552 }
Song Liufa853c42020-12-29 13:42:14 -0800553 return 0;
Andi Kleen41191682013-08-02 17:41:11 -0700554}
555
Mark Rutland3df33ef2016-08-09 14:04:29 +0100556static void disable_counters(void)
557{
558 /*
559 * If we don't have tracee (attaching to task or cpu), counters may
560 * still be running. To get accurate group ratios, we must stop groups
561 * from counting before reading their constituent counters.
562 */
563 if (!target__none(&target))
Jiri Olsae74676d2019-07-21 13:24:09 +0200564 evlist__disable(evsel_list);
Mark Rutland3df33ef2016-08-09 14:04:29 +0100565}
566
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300567static volatile int workload_exec_errno;
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300568
569/*
Arnaldo Carvalho de Melo7b392ef2020-11-30 09:26:54 -0300570 * evlist__prepare_workload will send a SIGUSR1
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300571 * if the fork fails, since we asked by setting its
572 * want_signal to true.
573 */
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300574static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info,
575 void *ucontext __maybe_unused)
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300576{
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300577 workload_exec_errno = info->si_value.sival_int;
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300578}
579
Arnaldo Carvalho de Meloddc69992020-05-04 13:46:34 -0300580static bool evsel__should_store_id(struct evsel *counter)
Jiri Olsa82bf3112017-07-26 14:02:06 +0200581{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200582 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID;
Jiri Olsa82bf3112017-07-26 14:02:06 +0200583}
584
Jiri Olsacbb5df7e2018-10-22 11:30:15 +0200585static bool is_target_alive(struct target *_target,
Jiri Olsa9749b902019-07-21 13:23:50 +0200586 struct perf_thread_map *threads)
Jiri Olsacbb5df7e2018-10-22 11:30:15 +0200587{
588 struct stat st;
589 int i;
590
591 if (!target__has_task(_target))
592 return true;
593
594 for (i = 0; i < threads->nr; i++) {
595 char path[PATH_MAX];
596
597 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(),
598 threads->map[i].pid);
599
600 if (!stat(path, &st))
601 return true;
602 }
603
604 return false;
605}
606
Alexey Budankovbee328c2020-07-17 10:05:06 +0300607static void process_evlist(struct evlist *evlist, unsigned int interval)
608{
609 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
610
611 if (evlist__ctlfd_process(evlist, &cmd) > 0) {
612 switch (cmd) {
613 case EVLIST_CTL_CMD_ENABLE:
Alexey Budankovbee328c2020-07-17 10:05:06 +0300614 if (interval)
615 process_interval();
616 break;
617 case EVLIST_CTL_CMD_DISABLE:
618 if (interval)
619 process_interval();
Alexey Budankovbee328c2020-07-17 10:05:06 +0300620 break;
Adrian Hunterd20aff12020-09-01 12:37:57 +0300621 case EVLIST_CTL_CMD_SNAPSHOT:
Alexey Budankovbee328c2020-07-17 10:05:06 +0300622 case EVLIST_CTL_CMD_ACK:
623 case EVLIST_CTL_CMD_UNSUPPORTED:
Jiri Olsa142544a2020-12-27 00:20:36 +0100624 case EVLIST_CTL_CMD_EVLIST:
Jiri Olsaf186cd62020-12-27 00:20:37 +0100625 case EVLIST_CTL_CMD_STOP:
Alexey Budankovbee328c2020-07-17 10:05:06 +0300626 default:
627 break;
628 }
629 }
630}
631
632static void compute_tts(struct timespec *time_start, struct timespec *time_stop,
633 int *time_to_sleep)
634{
635 int tts = *time_to_sleep;
636 struct timespec time_diff;
637
638 diff_timespec(&time_diff, time_stop, time_start);
639
640 tts -= time_diff.tv_sec * MSEC_PER_SEC +
641 time_diff.tv_nsec / NSEC_PER_MSEC;
642
643 if (tts < 0)
644 tts = 0;
645
646 *time_to_sleep = tts;
647}
648
649static int dispatch_events(bool forks, int timeout, int interval, int *times)
Alexey Budankov987b8232020-07-17 10:04:02 +0300650{
651 int child_exited = 0, status = 0;
Alexey Budankovbee328c2020-07-17 10:05:06 +0300652 int time_to_sleep, sleep_time;
653 struct timespec time_start, time_stop;
654
655 if (interval)
656 sleep_time = interval;
657 else if (timeout)
658 sleep_time = timeout;
659 else
660 sleep_time = 1000;
661
662 time_to_sleep = sleep_time;
Alexey Budankov987b8232020-07-17 10:04:02 +0300663
664 while (!done) {
665 if (forks)
666 child_exited = waitpid(child_pid, &status, WNOHANG);
667 else
668 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0;
669
670 if (child_exited)
671 break;
672
Alexey Budankovbee328c2020-07-17 10:05:06 +0300673 clock_gettime(CLOCK_MONOTONIC, &time_start);
674 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */
675 if (timeout || handle_interval(interval, times))
676 break;
677 time_to_sleep = sleep_time;
678 } else { /* fd revent */
679 process_evlist(evsel_list, interval);
680 clock_gettime(CLOCK_MONOTONIC, &time_stop);
681 compute_tts(&time_start, &time_stop, &time_to_sleep);
682 }
Alexey Budankov987b8232020-07-17 10:04:02 +0300683 }
684
685 return status;
686}
687
Andi Kleene0e6a6c2019-11-20 16:15:18 -0800688enum counter_recovery {
689 COUNTER_SKIP,
690 COUNTER_RETRY,
691 COUNTER_FATAL,
692};
693
694static enum counter_recovery stat_handle_error(struct evsel *counter)
695{
696 char msg[BUFSIZ];
697 /*
698 * PPC returns ENXIO for HW counters until 2.6.37
699 * (behavior changed with commit b0a873e).
700 */
701 if (errno == EINVAL || errno == ENOSYS ||
702 errno == ENOENT || errno == EOPNOTSUPP ||
703 errno == ENXIO) {
704 if (verbose > 0)
705 ui__warning("%s event is not supported by the kernel.\n",
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300706 evsel__name(counter));
Andi Kleene0e6a6c2019-11-20 16:15:18 -0800707 counter->supported = false;
Andi Kleen4804e012019-11-20 16:15:19 -0800708 /*
709 * errored is a sticky flag that means one of the counter's
710 * cpu event had a problem and needs to be reexamined.
711 */
712 counter->errored = true;
Andi Kleene0e6a6c2019-11-20 16:15:18 -0800713
714 if ((counter->leader != counter) ||
715 !(counter->leader->core.nr_members > 1))
716 return COUNTER_SKIP;
Arnaldo Carvalho de Meloae430892020-04-30 11:46:15 -0300717 } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
Andi Kleene0e6a6c2019-11-20 16:15:18 -0800718 if (verbose > 0)
719 ui__warning("%s\n", msg);
720 return COUNTER_RETRY;
721 } else if (target__has_per_thread(&target) &&
722 evsel_list->core.threads &&
723 evsel_list->core.threads->err_thread != -1) {
724 /*
725 * For global --per-thread case, skip current
726 * error thread.
727 */
728 if (!thread_map__remove(evsel_list->core.threads,
729 evsel_list->core.threads->err_thread)) {
730 evsel_list->core.threads->err_thread = -1;
731 return COUNTER_RETRY;
732 }
733 }
734
Arnaldo Carvalho de Melo2bb72db2020-05-04 13:43:03 -0300735 evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
Andi Kleene0e6a6c2019-11-20 16:15:18 -0800736 ui__error("%s\n", msg);
737
738 if (child_pid != -1)
739 kill(child_pid, SIGTERM);
740 return COUNTER_FATAL;
741}
742
Jiri Olsae55c14a2018-04-23 11:08:21 +0200743static int __run_perf_stat(int argc, const char **argv, int run_idx)
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200744{
Jiri Olsaec0d3d12015-07-21 14:31:25 +0200745 int interval = stat_config.interval;
yuzhoujiandb06a262018-01-29 10:25:22 +0100746 int times = stat_config.times;
yuzhoujianf1f8ad52018-01-29 10:25:23 +0100747 int timeout = stat_config.timeout;
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300748 char msg[BUFSIZ];
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200749 unsigned long long t0, t1;
Jiri Olsa32dcd022019-07-21 13:23:51 +0200750 struct evsel *counter;
Stephane Eranian410136f2013-11-12 17:58:49 +0100751 size_t l;
Ingo Molnar42202dd2009-06-13 14:57:28 +0200752 int status = 0;
Zhang, Yanmin6be28502010-03-18 11:36:03 -0300753 const bool forks = (argc > 0);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100754 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
Andi Kleen4804e012019-11-20 16:15:19 -0800755 struct affinity affinity;
Song Liufa853c42020-12-29 13:42:14 -0800756 int i, cpu, err;
Andi Kleen4804e012019-11-20 16:15:19 -0800757 bool second_pass = false;
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200758
Liming Wang60666c62009-12-31 16:05:50 +0800759 if (forks) {
Arnaldo Carvalho de Melo7b392ef2020-11-30 09:26:54 -0300760 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) {
Namhyung Kimacf28922013-03-11 16:43:18 +0900761 perror("failed to prepare workload");
762 return -1;
Liming Wang60666c62009-12-31 16:05:50 +0800763 }
Namhyung Kimd20a47e2013-09-30 18:01:11 +0900764 child_pid = evsel_list->workload.pid;
Paul Mackerras051ae7f2009-06-29 21:13:21 +1000765 }
766
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200767 if (group)
Arnaldo Carvalho de Meloa622eaf2020-11-30 09:22:07 -0300768 evlist__set_leader(evsel_list);
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200769
Andi Kleen4804e012019-11-20 16:15:19 -0800770 if (affinity__setup(&affinity) < 0)
771 return -1;
Andi Kleen5a5dfe42017-08-31 12:40:26 -0700772
Song Liufa853c42020-12-29 13:42:14 -0800773 if (target__has_bpf(&target)) {
774 evlist__for_each_entry(evsel_list, counter) {
775 if (bpf_counter__load(counter, &target))
776 return -1;
777 }
778 }
779
Andi Kleen4804e012019-11-20 16:15:19 -0800780 evlist__for_each_cpu (evsel_list, i, cpu) {
781 affinity__set(&affinity, cpu);
Andi Kleen5a5dfe42017-08-31 12:40:26 -0700782
Andi Kleen4804e012019-11-20 16:15:19 -0800783 evlist__for_each_entry(evsel_list, counter) {
784 if (evsel__cpu_iter_skip(counter, cpu))
Andi Kleene0e6a6c2019-11-20 16:15:18 -0800785 continue;
Andi Kleen4804e012019-11-20 16:15:19 -0800786 if (counter->reset_group || counter->errored)
787 continue;
788try_again:
789 if (create_perf_stat_counter(counter, &stat_config, &target,
790 counter->cpu_iter - 1) < 0) {
791
792 /*
793 * Weak group failed. We cannot just undo this here
794 * because earlier CPUs might be in group mode, and the kernel
795 * doesn't support mixing group and non group reads. Defer
796 * it to later.
797 * Don't close here because we're in the wrong affinity.
798 */
799 if ((errno == EINVAL || errno == EBADF) &&
800 counter->leader != counter &&
801 counter->weak_group) {
Arnaldo Carvalho de Melo64b47782020-11-30 14:58:32 -0300802 evlist__reset_weak_group(evsel_list, counter, false);
Andi Kleen4804e012019-11-20 16:15:19 -0800803 assert(counter->reset_group);
804 second_pass = true;
805 continue;
806 }
807
808 switch (stat_handle_error(counter)) {
809 case COUNTER_FATAL:
810 return -1;
811 case COUNTER_RETRY:
812 goto try_again;
813 case COUNTER_SKIP:
814 continue;
815 default:
816 break;
817 }
818
819 }
820 counter->supported = true;
821 }
822 }
823
824 if (second_pass) {
825 /*
826 * Now redo all the weak group after closing them,
827 * and also close errored counters.
828 */
829
830 evlist__for_each_cpu(evsel_list, i, cpu) {
831 affinity__set(&affinity, cpu);
832 /* First close errored or weak retry */
833 evlist__for_each_entry(evsel_list, counter) {
834 if (!counter->reset_group && !counter->errored)
835 continue;
836 if (evsel__cpu_iter_skip_no_inc(counter, cpu))
837 continue;
838 perf_evsel__close_cpu(&counter->core, counter->cpu_iter);
839 }
840 /* Now reopen weak */
841 evlist__for_each_entry(evsel_list, counter) {
842 if (!counter->reset_group && !counter->errored)
843 continue;
844 if (evsel__cpu_iter_skip(counter, cpu))
845 continue;
846 if (!counter->reset_group)
847 continue;
848try_again_reset:
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300849 pr_debug2("reopening weak %s\n", evsel__name(counter));
Andi Kleen4804e012019-11-20 16:15:19 -0800850 if (create_perf_stat_counter(counter, &stat_config, &target,
851 counter->cpu_iter - 1) < 0) {
852
853 switch (stat_handle_error(counter)) {
854 case COUNTER_FATAL:
855 return -1;
856 case COUNTER_RETRY:
857 goto try_again_reset;
858 case COUNTER_SKIP:
859 continue;
860 default:
861 break;
862 }
863 }
864 counter->supported = true;
Jin Yaoab6c79b2018-01-16 23:43:08 +0800865 }
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200866 }
Andi Kleen4804e012019-11-20 16:15:19 -0800867 }
868 affinity__cleanup(&affinity);
869
870 evlist__for_each_entry(evsel_list, counter) {
871 if (!counter->supported) {
872 perf_evsel__free_fd(&counter->core);
873 continue;
874 }
Stephane Eranian410136f2013-11-12 17:58:49 +0100875
876 l = strlen(counter->unit);
Jiri Olsadf4f7b42018-08-30 08:32:32 +0200877 if (l > stat_config.unit_width)
878 stat_config.unit_width = l;
Jiri Olsa2af46462015-11-05 15:40:49 +0100879
Arnaldo Carvalho de Meloddc69992020-05-04 13:46:34 -0300880 if (evsel__should_store_id(counter) &&
Arnaldo Carvalho de Melo34397752020-05-04 13:45:19 -0300881 evsel__store_ids(counter, evsel_list))
Jiri Olsa2af46462015-11-05 15:40:49 +0100882 return -1;
Arnaldo Carvalho de Melo084ab9f2010-03-22 13:10:28 -0300883 }
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200884
Arnaldo Carvalho de Melo24bf91a2020-11-30 09:38:02 -0300885 if (evlist__apply_filters(evsel_list, &counter)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300886 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300887 counter->filter, evsel__name(counter), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300888 str_error_r(errno, msg, sizeof(msg)));
Frederic Weisbeckercfd748a2011-03-14 16:40:30 +0100889 return -1;
890 }
891
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100892 if (STAT_RECORD) {
Song Liufa853c42020-12-29 13:42:14 -0800893 int fd = perf_data__fd(&perf_stat.data);
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100894
Jiri Olsa664c98d2015-11-05 15:40:50 +0100895 if (is_pipe) {
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100896 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data));
Jiri Olsa664c98d2015-11-05 15:40:50 +0100897 } else {
898 err = perf_session__write_header(perf_stat.session, evsel_list,
899 fd, false);
900 }
901
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100902 if (err < 0)
903 return err;
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100904
Arnaldo Carvalho de Melob2518922019-09-10 17:17:33 +0100905 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list,
906 process_synthesized_event, is_pipe);
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100907 if (err < 0)
908 return err;
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100909 }
910
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200911 /*
912 * Enable counters and exec the command:
913 */
914 t0 = rdclock();
Stephane Eranian13370a92013-01-29 12:47:44 +0100915 clock_gettime(CLOCK_MONOTONIC, &ref_time);
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200916
Liming Wang60666c62009-12-31 16:05:50 +0800917 if (forks) {
Arnaldo Carvalho de Melo7b392ef2020-11-30 09:26:54 -0300918 evlist__start_workload(evsel_list);
Song Liufa853c42020-12-29 13:42:14 -0800919 err = enable_counters();
920 if (err)
921 return -1;
Namhyung Kimacf28922013-03-11 16:43:18 +0900922
Alexey Budankov27e97692020-07-17 10:05:41 +0300923 if (interval || timeout || evlist__ctlfd_initialized(evsel_list))
Alexey Budankovbee328c2020-07-17 10:05:06 +0300924 status = dispatch_events(forks, timeout, interval, &times);
Arnaldo Carvalho de Melocfbd41b2020-04-15 12:31:26 -0300925 if (child_pid != -1) {
926 if (timeout)
927 kill(child_pid, SIGTERM);
Jin Yao8a992552019-01-03 15:40:45 +0800928 wait4(child_pid, &status, 0, &stat_config.ru_data);
Arnaldo Carvalho de Melocfbd41b2020-04-15 12:31:26 -0300929 }
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300930
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300931 if (workload_exec_errno) {
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300932 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300933 pr_err("Workload failed: %s\n", emsg);
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300934 return -1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300935 }
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300936
Andi Kleen33e49ea2011-09-15 14:31:40 -0700937 if (WIFSIGNALED(status))
938 psignal(WTERMSIG(status), argv[0]);
Liming Wang60666c62009-12-31 16:05:50 +0800939 } else {
Song Liufa853c42020-12-29 13:42:14 -0800940 err = enable_counters();
941 if (err)
942 return -1;
Alexey Budankovbee328c2020-07-17 10:05:06 +0300943 status = dispatch_events(forks, timeout, interval, &times);
Liming Wang60666c62009-12-31 16:05:50 +0800944 }
Ingo Molnar44db76c2009-06-03 19:36:07 +0200945
Mark Rutland3df33ef2016-08-09 14:04:29 +0100946 disable_counters();
947
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200948 t1 = rdclock();
949
Jiri Olsa54ac0b12018-08-30 08:32:50 +0200950 if (stat_config.walltime_run_table)
951 stat_config.walltime_run[run_idx] = t1 - t0;
Jiri Olsae55c14a2018-04-23 11:08:21 +0200952
Jin Yaoee6a9612020-09-03 09:01:13 +0800953 if (interval && stat_config.summary) {
Jin Yaoc7e5b322020-05-20 12:27:37 +0800954 stat_config.interval = 0;
Jin Yaoee6a9612020-09-03 09:01:13 +0800955 stat_config.stop_read_counter = true;
Jin Yaoc7e5b322020-05-20 12:27:37 +0800956 init_stats(&walltime_nsecs_stats);
957 update_stats(&walltime_nsecs_stats, t1 - t0);
958
959 if (stat_config.aggr_mode == AGGR_GLOBAL)
Arnaldo Carvalho de Melo53f5e902020-11-30 09:31:04 -0300960 evlist__save_aggr_prev_raw_counts(evsel_list);
Jin Yaoc7e5b322020-05-20 12:27:37 +0800961
Arnaldo Carvalho de Melo53f5e902020-11-30 09:31:04 -0300962 evlist__copy_prev_raw_counts(evsel_list);
963 evlist__reset_prev_raw_counts(evsel_list);
Jin Yaoc7e5b322020-05-20 12:27:37 +0800964 runtime_stat_reset(&stat_config);
965 perf_stat__reset_shadow_per_stat(&rt_stat);
966 } else
967 update_stats(&walltime_nsecs_stats, t1 - t0);
Ingo Molnar42202dd2009-06-13 14:57:28 +0200968
Mark Rutland3df33ef2016-08-09 14:04:29 +0100969 /*
970 * Closing a group leader splits the group, and as we only disable
971 * group leaders, results in remaining events becoming enabled. To
972 * avoid arbitrary skew, we must read all counters before closing any
973 * group leaders.
974 */
Andi Kleenf0fbb112019-03-26 15:18:21 -0700975 read_counters(&(struct timespec) { .tv_nsec = t1-t0 });
Jiri Olsa08ef3af2019-07-15 16:21:21 +0200976
977 /*
978 * We need to keep evsel_list alive, because it's processed
979 * later the evsel_list will be closed after.
980 */
981 if (!STAT_RECORD)
Jiri Olsa750b4ed2019-07-21 13:24:07 +0200982 evlist__close(evsel_list);
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200983
Ingo Molnar42202dd2009-06-13 14:57:28 +0200984 return WEXITSTATUS(status);
985}
986
Jiri Olsae55c14a2018-04-23 11:08:21 +0200987static int run_perf_stat(int argc, const char **argv, int run_idx)
Peter Zijlstra1f16c572012-10-23 13:40:14 +0200988{
989 int ret;
990
991 if (pre_cmd) {
992 ret = system(pre_cmd);
993 if (ret)
994 return ret;
995 }
996
997 if (sync_run)
998 sync();
999
Jiri Olsae55c14a2018-04-23 11:08:21 +02001000 ret = __run_perf_stat(argc, argv, run_idx);
Peter Zijlstra1f16c572012-10-23 13:40:14 +02001001 if (ret)
1002 return ret;
1003
1004 if (post_cmd) {
1005 ret = system(post_cmd);
1006 if (ret)
1007 return ret;
1008 }
1009
1010 return ret;
1011}
1012
Jiri Olsaa5a9eac2018-08-30 08:32:24 +02001013static void print_counters(struct timespec *ts, int argc, const char **argv)
1014{
Jiri Olsa01748202018-08-30 08:32:25 +02001015 /* Do not print anything if we record to the pipe. */
1016 if (STAT_RECORD && perf_stat.data.is_pipe)
1017 return;
Andi Kleen55a4de92020-10-26 17:27:36 -07001018 if (stat_config.quiet)
1019 return;
Jiri Olsa01748202018-08-30 08:32:25 +02001020
Arnaldo Carvalho de Melo71273722020-11-30 14:55:12 -03001021 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv);
Jiri Olsaa5a9eac2018-08-30 08:32:24 +02001022}
1023
Peter Zijlstraf7b7c262009-06-10 15:55:59 +02001024static volatile int signr = -1;
1025
Ingo Molnar52425192009-05-26 09:17:18 +02001026static void skip_signal(int signo)
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001027{
Jiri Olsaec0d3d12015-07-21 14:31:25 +02001028 if ((child_pid == -1) || stat_config.interval)
Liming Wang60666c62009-12-31 16:05:50 +08001029 done = 1;
1030
Peter Zijlstraf7b7c262009-06-10 15:55:59 +02001031 signr = signo;
Stephane Eraniand07f0b12013-06-04 17:44:26 +02001032 /*
1033 * render child_pid harmless
1034 * won't send SIGTERM to a random
1035 * process in case of race condition
1036 * and fast PID recycling
1037 */
1038 child_pid = -1;
Peter Zijlstraf7b7c262009-06-10 15:55:59 +02001039}
1040
1041static void sig_atexit(void)
1042{
Stephane Eraniand07f0b12013-06-04 17:44:26 +02001043 sigset_t set, oset;
1044
1045 /*
1046 * avoid race condition with SIGCHLD handler
1047 * in skip_signal() which is modifying child_pid
1048 * goal is to avoid send SIGTERM to a random
1049 * process
1050 */
1051 sigemptyset(&set);
1052 sigaddset(&set, SIGCHLD);
1053 sigprocmask(SIG_BLOCK, &set, &oset);
1054
Chris Wilson933da832009-10-04 01:35:01 +01001055 if (child_pid != -1)
1056 kill(child_pid, SIGTERM);
1057
Stephane Eraniand07f0b12013-06-04 17:44:26 +02001058 sigprocmask(SIG_SETMASK, &oset, NULL);
1059
Peter Zijlstraf7b7c262009-06-10 15:55:59 +02001060 if (signr == -1)
1061 return;
1062
1063 signal(signr, SIG_DFL);
1064 kill(getpid(), signr);
Ingo Molnar52425192009-05-26 09:17:18 +02001065}
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001066
Paul A. Clarked778a772020-05-20 11:23:35 -05001067void perf_stat__set_big_num(int set)
1068{
1069 stat_config.big_num = (set != 0);
1070}
1071
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001072static int stat__set_big_num(const struct option *opt __maybe_unused,
1073 const char *s __maybe_unused, int unset)
Stephane Eraniand7470b62010-12-01 18:49:05 +02001074{
1075 big_num_opt = unset ? 0 : 1;
Paul A. Clarked778a772020-05-20 11:23:35 -05001076 perf_stat__set_big_num(!unset);
Stephane Eraniand7470b62010-12-01 18:49:05 +02001077 return 0;
1078}
1079
Andi Kleen44b1e602016-05-30 12:49:42 -03001080static int enable_metric_only(const struct option *opt __maybe_unused,
1081 const char *s __maybe_unused, int unset)
1082{
1083 force_metric_only = true;
Jiri Olsa0ce5aa02018-08-30 08:32:31 +02001084 stat_config.metric_only = !unset;
Andi Kleen44b1e602016-05-30 12:49:42 -03001085 return 0;
1086}
1087
Andi Kleenb18f3e32017-08-31 12:40:31 -07001088static int parse_metric_groups(const struct option *opt,
1089 const char *str,
1090 int unset __maybe_unused)
1091{
Ian Rogers05530a72020-05-20 11:20:10 -07001092 return metricgroup__parse_groups(opt, str,
1093 stat_config.metric_no_group,
1094 stat_config.metric_no_merge,
1095 &stat_config.metric_events);
Andi Kleenb18f3e32017-08-31 12:40:31 -07001096}
1097
Alexey Budankov27e97692020-07-17 10:05:41 +03001098static int parse_control_option(const struct option *opt,
1099 const char *str,
1100 int unset __maybe_unused)
1101{
Adrian Hunter9864a662020-09-01 12:37:53 +03001102 struct perf_stat_config *config = opt->value;
Alexey Budankov27e97692020-07-17 10:05:41 +03001103
Adrian Huntera8fcbd22020-09-02 13:57:07 +03001104 return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close);
1105}
1106
Namhyung Kimd1c5a0e2020-09-24 21:44:52 +09001107static int parse_stat_cgroups(const struct option *opt,
1108 const char *str, int unset)
1109{
1110 if (stat_config.cgroup_list) {
1111 pr_err("--cgroup and --for-each-cgroup cannot be used together\n");
1112 return -1;
1113 }
1114
1115 return parse_cgroups(opt, str, unset);
1116}
1117
Michael Petlan51433ea2018-12-10 11:00:04 -05001118static struct option stat_options[] = {
Jiri Olsae0547312015-11-05 15:40:45 +01001119 OPT_BOOLEAN('T', "transaction", &transaction_run,
1120 "hardware transaction statistics"),
1121 OPT_CALLBACK('e', "event", &evsel_list, "event",
1122 "event selector. use 'perf list' to list available events",
1123 parse_events_option),
1124 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1125 "event filter", parse_filter),
Jiri Olsa5698f262018-08-30 08:32:12 +02001126 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit,
Jiri Olsae0547312015-11-05 15:40:45 +01001127 "child tasks do not inherit counters"),
1128 OPT_STRING('p', "pid", &target.pid, "pid",
1129 "stat events on existing process id"),
1130 OPT_STRING('t', "tid", &target.tid, "tid",
1131 "stat events on existing thread id"),
Song Liufa853c42020-12-29 13:42:14 -08001132#ifdef HAVE_BPF_SKEL
1133 OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id",
1134 "stat events on existing bpf program id"),
1135#endif
Jiri Olsae0547312015-11-05 15:40:45 +01001136 OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1137 "system-wide collection from all CPUs"),
1138 OPT_BOOLEAN('g', "group", &group,
1139 "put the counters into a counter group"),
Andi Kleen75998bb2019-03-14 15:50:01 -07001140 OPT_BOOLEAN(0, "scale", &stat_config.scale,
1141 "Use --no-scale to disable counter scaling for multiplexing"),
Jiri Olsae0547312015-11-05 15:40:45 +01001142 OPT_INCR('v', "verbose", &verbose,
1143 "be more verbose (show counter open errors, etc)"),
Jiri Olsad97ae042018-08-30 08:32:36 +02001144 OPT_INTEGER('r', "repeat", &stat_config.run_count,
Jiri Olsae0547312015-11-05 15:40:45 +01001145 "repeat command and print average + stddev (max: 100, forever: 0)"),
Jiri Olsa54ac0b12018-08-30 08:32:50 +02001146 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table,
Jiri Olsae55c14a2018-04-23 11:08:21 +02001147 "display details about each run (only with -r option)"),
Jiri Olsaaea0dca2018-08-30 08:32:41 +02001148 OPT_BOOLEAN('n', "null", &stat_config.null_run,
Jiri Olsae0547312015-11-05 15:40:45 +01001149 "null run - dont start any counters"),
1150 OPT_INCR('d', "detailed", &detailed_run,
1151 "detailed run - start a lot of events"),
1152 OPT_BOOLEAN('S', "sync", &sync_run,
1153 "call sync() before starting a run"),
1154 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1155 "print large numbers with thousands\' separators",
1156 stat__set_big_num),
1157 OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
1158 "list of cpus to monitor in system-wide"),
1159 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
1160 "disable CPU count aggregation", AGGR_NONE),
Jiri Olsafdee3352018-08-30 08:32:48 +02001161 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"),
Jiri Olsafa7070a2018-08-30 08:32:29 +02001162 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
Jiri Olsae0547312015-11-05 15:40:45 +01001163 "print counts with custom separator"),
1164 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
Namhyung Kimd1c5a0e2020-09-24 21:44:52 +09001165 "monitor event in cgroup name only", parse_stat_cgroups),
1166 OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name",
1167 "expand events for each cgroup"),
Jiri Olsae0547312015-11-05 15:40:45 +01001168 OPT_STRING('o', "output", &output_name, "file", "output file name"),
1169 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
1170 OPT_INTEGER(0, "log-fd", &output_fd,
1171 "log output to fd, instead of stderr"),
1172 OPT_STRING(0, "pre", &pre_cmd, "command",
1173 "command to run prior to the measured command"),
1174 OPT_STRING(0, "post", &post_cmd, "command",
1175 "command to run after to the measured command"),
1176 OPT_UINTEGER('I', "interval-print", &stat_config.interval,
Alexey Budankov9dc9a952018-04-03 21:18:33 +03001177 "print counts at regular interval in ms "
1178 "(overhead is possible for values <= 100ms)"),
yuzhoujiandb06a262018-01-29 10:25:22 +01001179 OPT_INTEGER(0, "interval-count", &stat_config.times,
1180 "print counts for fixed number of times"),
Jiri Olsa132c6ba2018-08-30 08:32:30 +02001181 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear,
Jiri Olsa9660e082018-06-07 00:15:06 +02001182 "clear screen in between new interval"),
yuzhoujianf1f8ad52018-01-29 10:25:23 +01001183 OPT_UINTEGER(0, "timeout", &stat_config.timeout,
1184 "stop workload and print counts after a timeout period in ms (>= 10ms)"),
Jiri Olsae0547312015-11-05 15:40:45 +01001185 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
1186 "aggregate counts per processor socket", AGGR_SOCKET),
Kan Liangdb5742b2019-06-04 15:50:42 -07001187 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode,
1188 "aggregate counts per processor die", AGGR_DIE),
Jiri Olsae0547312015-11-05 15:40:45 +01001189 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
1190 "aggregate counts per physical processor core", AGGR_CORE),
1191 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
1192 "aggregate counts per thread", AGGR_THREAD),
Jiri Olsa86895b42019-08-28 10:17:43 +02001193 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
1194 "aggregate counts per numa node", AGGR_NODE),
Alexey Budankov2162b9c2020-07-17 10:04:33 +03001195 OPT_INTEGER('D', "delay", &stat_config.initial_delay,
1196 "ms to wait before starting measurement after program start (-1: start with events disabled)"),
Jiri Olsa0ce5aa02018-08-30 08:32:31 +02001197 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
Andi Kleen44b1e602016-05-30 12:49:42 -03001198 "Only print computed metrics. No raw values", enable_metric_only),
Ian Rogers05530a72020-05-20 11:20:10 -07001199 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group,
1200 "don't group metric events, impacts multiplexing"),
1201 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge,
1202 "don't try to share events between metrics in a group"),
Andi Kleen44b1e602016-05-30 12:49:42 -03001203 OPT_BOOLEAN(0, "topdown", &topdown_run,
1204 "measure topdown level 1 statistics"),
Kan Liangdaefd0b2017-05-26 12:05:38 -07001205 OPT_BOOLEAN(0, "smi-cost", &smi_cost,
1206 "measure SMI cost"),
Andi Kleenb18f3e32017-08-31 12:40:31 -07001207 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
1208 "monitor specified metrics or metric groups (separated by ,)",
1209 parse_metric_groups),
Jin Yaodd071022019-10-11 13:05:45 +08001210 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel,
1211 "Configure all used events to run in kernel space.",
1212 PARSE_OPT_EXCLUSIVE),
1213 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user,
1214 "Configure all used events to run in user space.",
1215 PARSE_OPT_EXCLUSIVE),
Jin Yao1af62ce2020-02-14 16:04:52 +08001216 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread,
1217 "Use with 'percore' event qualifier to show the event "
1218 "counts of one hardware thread by sum up total hardware "
1219 "threads of same physical core"),
Jin Yaoee6a9612020-09-03 09:01:13 +08001220 OPT_BOOLEAN(0, "summary", &stat_config.summary,
1221 "print summary for interval mode"),
Andi Kleen55a4de92020-10-26 17:27:36 -07001222 OPT_BOOLEAN(0, "quiet", &stat_config.quiet,
1223 "don't print output (useful with record)"),
Stephane Eranian70943492020-05-05 11:29:43 -07001224#ifdef HAVE_LIBPFM
1225 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
1226 "libpfm4 event selector. use 'perf list' to list available events",
1227 parse_libpfm_events_option),
1228#endif
Adrian Huntera8fcbd22020-09-02 13:57:07 +03001229 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
Alexey Budankov27e97692020-07-17 10:05:41 +03001230 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n"
Adrian Huntera8fcbd22020-09-02 13:57:07 +03001231 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
1232 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
Alexey Budankov27e97692020-07-17 10:05:41 +03001233 parse_control_option),
Jiri Olsae0547312015-11-05 15:40:45 +01001234 OPT_END()
1235};
1236
James Clark2760f5a12020-11-26 16:13:20 +02001237static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
Jiri Olsaf8548392019-07-21 13:23:49 +02001238 struct perf_cpu_map *map, int cpu)
Jiri Olsa1fe7a302015-10-16 12:41:15 +02001239{
1240 return cpu_map__get_socket(map, cpu, NULL);
1241}
1242
James Clark2760f5a12020-11-26 16:13:20 +02001243static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused,
Jiri Olsaf8548392019-07-21 13:23:49 +02001244 struct perf_cpu_map *map, int cpu)
Kan Liangdb5742b2019-06-04 15:50:42 -07001245{
1246 return cpu_map__get_die(map, cpu, NULL);
1247}
1248
James Clark2760f5a12020-11-26 16:13:20 +02001249static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
Jiri Olsaf8548392019-07-21 13:23:49 +02001250 struct perf_cpu_map *map, int cpu)
Jiri Olsa1fe7a302015-10-16 12:41:15 +02001251{
1252 return cpu_map__get_core(map, cpu, NULL);
1253}
1254
James Clark2760f5a12020-11-26 16:13:20 +02001255static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused,
Jiri Olsa86895b42019-08-28 10:17:43 +02001256 struct perf_cpu_map *map, int cpu)
1257{
1258 return cpu_map__get_node(map, cpu, NULL);
1259}
1260
James Clark2760f5a12020-11-26 16:13:20 +02001261static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config,
Jiri Olsaf8548392019-07-21 13:23:49 +02001262 aggr_get_id_t get_id, struct perf_cpu_map *map, int idx)
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001263{
1264 int cpu;
James Clark2760f5a12020-11-26 16:13:20 +02001265 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id();
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001266
1267 if (idx >= map->nr)
James Clark2760f5a12020-11-26 16:13:20 +02001268 return id;
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001269
1270 cpu = map->map[idx];
1271
James Clarkff523292020-11-26 16:13:23 +02001272 if (cpu_map__aggr_cpu_id_is_empty(config->cpus_aggr_map->map[cpu]))
1273 config->cpus_aggr_map->map[cpu] = get_id(config, map, idx);
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001274
James Clarkff523292020-11-26 16:13:23 +02001275 id = config->cpus_aggr_map->map[cpu];
James Clark2760f5a12020-11-26 16:13:20 +02001276 return id;
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001277}
1278
James Clark2760f5a12020-11-26 16:13:20 +02001279static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config,
Jiri Olsaf8548392019-07-21 13:23:49 +02001280 struct perf_cpu_map *map, int idx)
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001281{
Jiri Olsa6f6b6592018-08-30 08:32:45 +02001282 return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx);
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001283}
1284
James Clark2760f5a12020-11-26 16:13:20 +02001285static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config,
Jiri Olsaf8548392019-07-21 13:23:49 +02001286 struct perf_cpu_map *map, int idx)
Kan Liangdb5742b2019-06-04 15:50:42 -07001287{
1288 return perf_stat__get_aggr(config, perf_stat__get_die, map, idx);
1289}
1290
James Clark2760f5a12020-11-26 16:13:20 +02001291static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config,
Jiri Olsaf8548392019-07-21 13:23:49 +02001292 struct perf_cpu_map *map, int idx)
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001293{
Jiri Olsa6f6b6592018-08-30 08:32:45 +02001294 return perf_stat__get_aggr(config, perf_stat__get_core, map, idx);
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001295}
1296
James Clark2760f5a12020-11-26 16:13:20 +02001297static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config,
Jiri Olsa86895b42019-08-28 10:17:43 +02001298 struct perf_cpu_map *map, int idx)
1299{
1300 return perf_stat__get_aggr(config, perf_stat__get_node, map, idx);
1301}
1302
Jin Yao4fc4d8d2019-04-12 21:59:49 +08001303static bool term_percore_set(void)
1304{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001305 struct evsel *counter;
Jin Yao4fc4d8d2019-04-12 21:59:49 +08001306
1307 evlist__for_each_entry(evsel_list, counter) {
1308 if (counter->percore)
1309 return true;
1310 }
1311
1312 return false;
1313}
1314
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001315static int perf_stat_init_aggr_mode(void)
1316{
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001317 int nr;
1318
Jiri Olsa421a50f2015-07-21 14:31:22 +02001319 switch (stat_config.aggr_mode) {
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001320 case AGGR_SOCKET:
Jiri Olsaf72f9012019-07-21 13:24:41 +02001321 if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001322 perror("cannot build socket map");
1323 return -1;
1324 }
Jiri Olsa6f6b6592018-08-30 08:32:45 +02001325 stat_config.aggr_get_id = perf_stat__get_socket_cached;
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001326 break;
Kan Liangdb5742b2019-06-04 15:50:42 -07001327 case AGGR_DIE:
Jiri Olsaf72f9012019-07-21 13:24:41 +02001328 if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
Kan Liangdb5742b2019-06-04 15:50:42 -07001329 perror("cannot build die map");
1330 return -1;
1331 }
1332 stat_config.aggr_get_id = perf_stat__get_die_cached;
1333 break;
Stephane Eranian12c08a92013-02-14 13:57:29 +01001334 case AGGR_CORE:
Jiri Olsaf72f9012019-07-21 13:24:41 +02001335 if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
Stephane Eranian12c08a92013-02-14 13:57:29 +01001336 perror("cannot build core map");
1337 return -1;
1338 }
Jiri Olsa6f6b6592018-08-30 08:32:45 +02001339 stat_config.aggr_get_id = perf_stat__get_core_cached;
Stephane Eranian12c08a92013-02-14 13:57:29 +01001340 break;
Jiri Olsa86895b42019-08-28 10:17:43 +02001341 case AGGR_NODE:
1342 if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
1343 perror("cannot build core map");
1344 return -1;
1345 }
1346 stat_config.aggr_get_id = perf_stat__get_node_cached;
1347 break;
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001348 case AGGR_NONE:
Jin Yao4fc4d8d2019-04-12 21:59:49 +08001349 if (term_percore_set()) {
Jiri Olsaf72f9012019-07-21 13:24:41 +02001350 if (cpu_map__build_core_map(evsel_list->core.cpus,
Jin Yao4fc4d8d2019-04-12 21:59:49 +08001351 &stat_config.aggr_map)) {
1352 perror("cannot build core map");
1353 return -1;
1354 }
1355 stat_config.aggr_get_id = perf_stat__get_core_cached;
1356 }
1357 break;
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001358 case AGGR_GLOBAL:
Jiri Olsa32b8af82015-06-26 11:29:27 +02001359 case AGGR_THREAD:
Jiri Olsa208df992015-10-16 12:41:04 +02001360 case AGGR_UNSET:
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001361 default:
1362 break;
1363 }
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001364
1365 /*
1366 * The evsel_list->cpus is the base we operate on,
1367 * taking the highest cpu number to be the size of
1368 * the aggregation translate cpumap.
1369 */
Jiri Olsa4256d432019-09-02 14:12:53 +02001370 nr = perf_cpu_map__max(evsel_list->core.cpus);
James Clarkd526e1a2020-11-26 16:13:22 +02001371 stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1);
Jiri Olsa6f6b6592018-08-30 08:32:45 +02001372 return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001373}
1374
James Clarkd526e1a2020-11-26 16:13:22 +02001375static void cpu_aggr_map__delete(struct cpu_aggr_map *map)
1376{
1377 if (map) {
1378 WARN_ONCE(refcount_read(&map->refcnt) != 0,
1379 "cpu_aggr_map refcnt unbalanced\n");
1380 free(map);
1381 }
1382}
1383
1384static void cpu_aggr_map__put(struct cpu_aggr_map *map)
1385{
1386 if (map && refcount_dec_and_test(&map->refcnt))
1387 cpu_aggr_map__delete(map);
1388}
1389
Masami Hiramatsu544c2ae2015-12-09 11:11:27 +09001390static void perf_stat__exit_aggr_mode(void)
1391{
James Clarkd526e1a2020-11-26 16:13:22 +02001392 cpu_aggr_map__put(stat_config.aggr_map);
1393 cpu_aggr_map__put(stat_config.cpus_aggr_map);
Jiri Olsa6f6b6592018-08-30 08:32:45 +02001394 stat_config.aggr_map = NULL;
1395 stat_config.cpus_aggr_map = NULL;
Masami Hiramatsu544c2ae2015-12-09 11:11:27 +09001396}
1397
Jiri Olsaf8548392019-07-21 13:23:49 +02001398static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *map, int idx)
Jiri Olsa68d702f2015-11-05 15:40:58 +01001399{
1400 int cpu;
1401
1402 if (idx > map->nr)
1403 return -1;
1404
1405 cpu = map->map[idx];
1406
Jan Stancekda8a58b2017-02-17 12:10:26 +01001407 if (cpu >= env->nr_cpus_avail)
Jiri Olsa68d702f2015-11-05 15:40:58 +01001408 return -1;
1409
1410 return cpu;
1411}
1412
James Clark2760f5a12020-11-26 16:13:20 +02001413static struct aggr_cpu_id perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data)
Jiri Olsa68d702f2015-11-05 15:40:58 +01001414{
1415 struct perf_env *env = data;
1416 int cpu = perf_env__get_cpu(env, map, idx);
James Clark2760f5a12020-11-26 16:13:20 +02001417 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id();
Jiri Olsa68d702f2015-11-05 15:40:58 +01001418
James Clark2760f5a12020-11-26 16:13:20 +02001419 if (cpu != -1)
James Clark1a270cb2020-11-26 16:13:25 +02001420 id.socket = env->cpu[cpu].socket_id;
James Clark2760f5a12020-11-26 16:13:20 +02001421
1422 return id;
Jiri Olsa68d702f2015-11-05 15:40:58 +01001423}
1424
James Clark2760f5a12020-11-26 16:13:20 +02001425static struct aggr_cpu_id perf_env__get_die(struct perf_cpu_map *map, int idx, void *data)
Kan Liangdb5742b2019-06-04 15:50:42 -07001426{
1427 struct perf_env *env = data;
James Clark2760f5a12020-11-26 16:13:20 +02001428 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id();
1429 int cpu = perf_env__get_cpu(env, map, idx);
Kan Liangdb5742b2019-06-04 15:50:42 -07001430
1431 if (cpu != -1) {
1432 /*
James Clark1a270cb2020-11-26 16:13:25 +02001433 * die_id is relative to socket, so start
1434 * with the socket ID and then add die to
1435 * make a unique ID.
Kan Liangdb5742b2019-06-04 15:50:42 -07001436 */
James Clark1a270cb2020-11-26 16:13:25 +02001437 id.socket = env->cpu[cpu].socket_id;
James Clarkba2ee162020-11-26 16:13:26 +02001438 id.die = env->cpu[cpu].die_id;
Kan Liangdb5742b2019-06-04 15:50:42 -07001439 }
1440
James Clark2760f5a12020-11-26 16:13:20 +02001441 return id;
Kan Liangdb5742b2019-06-04 15:50:42 -07001442}
1443
James Clark2760f5a12020-11-26 16:13:20 +02001444static struct aggr_cpu_id perf_env__get_core(struct perf_cpu_map *map, int idx, void *data)
Jiri Olsa68d702f2015-11-05 15:40:58 +01001445{
1446 struct perf_env *env = data;
James Clark2760f5a12020-11-26 16:13:20 +02001447 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id();
1448 int cpu = perf_env__get_cpu(env, map, idx);
Jiri Olsa68d702f2015-11-05 15:40:58 +01001449
1450 if (cpu != -1) {
Jiri Olsa68d702f2015-11-05 15:40:58 +01001451 /*
Kan Liangdb5742b2019-06-04 15:50:42 -07001452 * core_id is relative to socket and die,
James Clarkb9933812020-11-26 16:13:27 +02001453 * we need a global id. So we set
1454 * socket, die id and core id
Jiri Olsa68d702f2015-11-05 15:40:58 +01001455 */
James Clark1a270cb2020-11-26 16:13:25 +02001456 id.socket = env->cpu[cpu].socket_id;
James Clarkba2ee162020-11-26 16:13:26 +02001457 id.die = env->cpu[cpu].die_id;
James Clarkb9933812020-11-26 16:13:27 +02001458 id.core = env->cpu[cpu].core_id;
Jiri Olsa68d702f2015-11-05 15:40:58 +01001459 }
1460
James Clark2760f5a12020-11-26 16:13:20 +02001461 return id;
Jiri Olsa68d702f2015-11-05 15:40:58 +01001462}
1463
James Clark2760f5a12020-11-26 16:13:20 +02001464static struct aggr_cpu_id perf_env__get_node(struct perf_cpu_map *map, int idx, void *data)
Jiri Olsa86895b42019-08-28 10:17:43 +02001465{
1466 int cpu = perf_env__get_cpu(data, map, idx);
James Clark2760f5a12020-11-26 16:13:20 +02001467 struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id();
Jiri Olsa86895b42019-08-28 10:17:43 +02001468
James Clarkfcd83a32020-11-26 16:13:24 +02001469 id.node = perf_env__numa_node(data, cpu);
James Clark2760f5a12020-11-26 16:13:20 +02001470 return id;
Jiri Olsa86895b42019-08-28 10:17:43 +02001471}
1472
Jiri Olsaf8548392019-07-21 13:23:49 +02001473static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus,
James Clarkd526e1a2020-11-26 16:13:22 +02001474 struct cpu_aggr_map **sockp)
Jiri Olsa68d702f2015-11-05 15:40:58 +01001475{
1476 return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env);
1477}
1478
Jiri Olsaf8548392019-07-21 13:23:49 +02001479static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus,
James Clarkd526e1a2020-11-26 16:13:22 +02001480 struct cpu_aggr_map **diep)
Kan Liangdb5742b2019-06-04 15:50:42 -07001481{
1482 return cpu_map__build_map(cpus, diep, perf_env__get_die, env);
1483}
1484
Jiri Olsaf8548392019-07-21 13:23:49 +02001485static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus,
James Clarkd526e1a2020-11-26 16:13:22 +02001486 struct cpu_aggr_map **corep)
Jiri Olsa68d702f2015-11-05 15:40:58 +01001487{
1488 return cpu_map__build_map(cpus, corep, perf_env__get_core, env);
1489}
1490
Jiri Olsa86895b42019-08-28 10:17:43 +02001491static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *cpus,
James Clarkd526e1a2020-11-26 16:13:22 +02001492 struct cpu_aggr_map **nodep)
Jiri Olsa86895b42019-08-28 10:17:43 +02001493{
1494 return cpu_map__build_map(cpus, nodep, perf_env__get_node, env);
1495}
1496
James Clark2760f5a12020-11-26 16:13:20 +02001497static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
Jiri Olsaf8548392019-07-21 13:23:49 +02001498 struct perf_cpu_map *map, int idx)
Jiri Olsa68d702f2015-11-05 15:40:58 +01001499{
1500 return perf_env__get_socket(map, idx, &perf_stat.session->header.env);
1501}
James Clark2760f5a12020-11-26 16:13:20 +02001502static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused,
Jiri Olsaf8548392019-07-21 13:23:49 +02001503 struct perf_cpu_map *map, int idx)
Kan Liangdb5742b2019-06-04 15:50:42 -07001504{
1505 return perf_env__get_die(map, idx, &perf_stat.session->header.env);
1506}
Jiri Olsa68d702f2015-11-05 15:40:58 +01001507
James Clark2760f5a12020-11-26 16:13:20 +02001508static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
Jiri Olsaf8548392019-07-21 13:23:49 +02001509 struct perf_cpu_map *map, int idx)
Jiri Olsa68d702f2015-11-05 15:40:58 +01001510{
1511 return perf_env__get_core(map, idx, &perf_stat.session->header.env);
1512}
1513
James Clark2760f5a12020-11-26 16:13:20 +02001514static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused,
Jiri Olsa86895b42019-08-28 10:17:43 +02001515 struct perf_cpu_map *map, int idx)
1516{
1517 return perf_env__get_node(map, idx, &perf_stat.session->header.env);
1518}
1519
Jiri Olsa68d702f2015-11-05 15:40:58 +01001520static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
1521{
1522 struct perf_env *env = &st->session->header.env;
1523
1524 switch (stat_config.aggr_mode) {
1525 case AGGR_SOCKET:
Jiri Olsaf72f9012019-07-21 13:24:41 +02001526 if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
Jiri Olsa68d702f2015-11-05 15:40:58 +01001527 perror("cannot build socket map");
1528 return -1;
1529 }
Jiri Olsa6f6b6592018-08-30 08:32:45 +02001530 stat_config.aggr_get_id = perf_stat__get_socket_file;
Jiri Olsa68d702f2015-11-05 15:40:58 +01001531 break;
Kan Liangdb5742b2019-06-04 15:50:42 -07001532 case AGGR_DIE:
Jiri Olsaf72f9012019-07-21 13:24:41 +02001533 if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
Kan Liangdb5742b2019-06-04 15:50:42 -07001534 perror("cannot build die map");
1535 return -1;
1536 }
1537 stat_config.aggr_get_id = perf_stat__get_die_file;
1538 break;
Jiri Olsa68d702f2015-11-05 15:40:58 +01001539 case AGGR_CORE:
Jiri Olsaf72f9012019-07-21 13:24:41 +02001540 if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
Jiri Olsa68d702f2015-11-05 15:40:58 +01001541 perror("cannot build core map");
1542 return -1;
1543 }
Jiri Olsa6f6b6592018-08-30 08:32:45 +02001544 stat_config.aggr_get_id = perf_stat__get_core_file;
Jiri Olsa68d702f2015-11-05 15:40:58 +01001545 break;
Jiri Olsa86895b42019-08-28 10:17:43 +02001546 case AGGR_NODE:
1547 if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
1548 perror("cannot build core map");
1549 return -1;
1550 }
1551 stat_config.aggr_get_id = perf_stat__get_node_file;
1552 break;
Jiri Olsa68d702f2015-11-05 15:40:58 +01001553 case AGGR_NONE:
1554 case AGGR_GLOBAL:
1555 case AGGR_THREAD:
1556 case AGGR_UNSET:
1557 default:
1558 break;
1559 }
1560
1561 return 0;
1562}
1563
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001564/*
1565 * Add default attributes, if there were no attributes specified or
1566 * if -d/--detailed, -d -d or -d -d -d is used:
1567 */
1568static int add_default_attributes(void)
1569{
Andi Kleen44b1e602016-05-30 12:49:42 -03001570 int err;
Andi Kleen9dec4472016-02-26 16:27:56 -08001571 struct perf_event_attr default_attrs0[] = {
Arnaldo Carvalho de Melob070a542012-10-01 15:20:58 -03001572
1573 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
1574 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
1575 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
1576 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
1577
1578 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
Andi Kleen9dec4472016-02-26 16:27:56 -08001579};
1580 struct perf_event_attr frontend_attrs[] = {
Arnaldo Carvalho de Melob070a542012-10-01 15:20:58 -03001581 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
Andi Kleen9dec4472016-02-26 16:27:56 -08001582};
1583 struct perf_event_attr backend_attrs[] = {
Arnaldo Carvalho de Melob070a542012-10-01 15:20:58 -03001584 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
Andi Kleen9dec4472016-02-26 16:27:56 -08001585};
1586 struct perf_event_attr default_attrs1[] = {
Arnaldo Carvalho de Melob070a542012-10-01 15:20:58 -03001587 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
1588 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
1589 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
1590
1591};
1592
1593/*
1594 * Detailed stats (-d), covering the L1 and last level data caches:
1595 */
1596 struct perf_event_attr detailed_attrs[] = {
1597
1598 { .type = PERF_TYPE_HW_CACHE,
1599 .config =
1600 PERF_COUNT_HW_CACHE_L1D << 0 |
1601 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1602 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1603
1604 { .type = PERF_TYPE_HW_CACHE,
1605 .config =
1606 PERF_COUNT_HW_CACHE_L1D << 0 |
1607 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1608 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1609
1610 { .type = PERF_TYPE_HW_CACHE,
1611 .config =
1612 PERF_COUNT_HW_CACHE_LL << 0 |
1613 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1614 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1615
1616 { .type = PERF_TYPE_HW_CACHE,
1617 .config =
1618 PERF_COUNT_HW_CACHE_LL << 0 |
1619 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1620 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1621};
1622
1623/*
1624 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
1625 */
1626 struct perf_event_attr very_detailed_attrs[] = {
1627
1628 { .type = PERF_TYPE_HW_CACHE,
1629 .config =
1630 PERF_COUNT_HW_CACHE_L1I << 0 |
1631 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1632 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1633
1634 { .type = PERF_TYPE_HW_CACHE,
1635 .config =
1636 PERF_COUNT_HW_CACHE_L1I << 0 |
1637 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1638 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1639
1640 { .type = PERF_TYPE_HW_CACHE,
1641 .config =
1642 PERF_COUNT_HW_CACHE_DTLB << 0 |
1643 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1644 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1645
1646 { .type = PERF_TYPE_HW_CACHE,
1647 .config =
1648 PERF_COUNT_HW_CACHE_DTLB << 0 |
1649 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1650 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1651
1652 { .type = PERF_TYPE_HW_CACHE,
1653 .config =
1654 PERF_COUNT_HW_CACHE_ITLB << 0 |
1655 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1656 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1657
1658 { .type = PERF_TYPE_HW_CACHE,
1659 .config =
1660 PERF_COUNT_HW_CACHE_ITLB << 0 |
1661 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1662 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1663
1664};
1665
1666/*
1667 * Very, very detailed stats (-d -d -d), adding prefetch events:
1668 */
1669 struct perf_event_attr very_very_detailed_attrs[] = {
1670
1671 { .type = PERF_TYPE_HW_CACHE,
1672 .config =
1673 PERF_COUNT_HW_CACHE_L1D << 0 |
1674 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
1675 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1676
1677 { .type = PERF_TYPE_HW_CACHE,
1678 .config =
1679 PERF_COUNT_HW_CACHE_L1D << 0 |
1680 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
1681 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1682};
Jiri Olsaa5cfa622018-06-07 00:15:10 +02001683 struct parse_events_error errinfo;
Arnaldo Carvalho de Melob070a542012-10-01 15:20:58 -03001684
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001685 /* Set attrs if no event is selected and !null_run: */
Jiri Olsaaea0dca2018-08-30 08:32:41 +02001686 if (stat_config.null_run)
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001687 return 0;
1688
Ian Rogersa910e462019-11-15 23:46:52 -08001689 bzero(&errinfo, sizeof(errinfo));
Andi Kleen4cabc3d2013-08-21 16:47:26 -07001690 if (transaction_run) {
Thomas Richter742d92f2018-06-26 09:17:01 +02001691 /* Handle -T as -M transaction. Once platform specific metrics
1692 * support has been added to the json files, all archictures
1693 * will use this approach. To determine transaction support
1694 * on an architecture test for such a metric name.
1695 */
1696 if (metricgroup__has_metric("transaction")) {
1697 struct option opt = { .value = &evsel_list };
1698
1699 return metricgroup__parse_groups(&opt, "transaction",
Ian Rogers05530a72020-05-20 11:20:10 -07001700 stat_config.metric_no_group,
1701 stat_config.metric_no_merge,
Jiri Olsad0192fd2018-08-30 08:32:51 +02001702 &stat_config.metric_events);
Thomas Richter742d92f2018-06-26 09:17:01 +02001703 }
1704
Andi Kleen4cabc3d2013-08-21 16:47:26 -07001705 if (pmu_have_event("cpu", "cycles-ct") &&
1706 pmu_have_event("cpu", "el-start"))
Thomas Richterfca323402018-03-08 15:57:35 +01001707 err = parse_events(evsel_list, transaction_attrs,
1708 &errinfo);
Andi Kleen4cabc3d2013-08-21 16:47:26 -07001709 else
Thomas Richterfca323402018-03-08 15:57:35 +01001710 err = parse_events(evsel_list,
1711 transaction_limited_attrs,
1712 &errinfo);
Jiri Olsaa4547422015-06-03 16:25:53 +02001713 if (err) {
Andi Kleen4cabc3d2013-08-21 16:47:26 -07001714 fprintf(stderr, "Cannot set up transaction events\n");
Jiri Olsaa5cfa622018-06-07 00:15:10 +02001715 parse_events_print_error(&errinfo, transaction_attrs);
Andi Kleen4cabc3d2013-08-21 16:47:26 -07001716 return -1;
1717 }
1718 return 0;
1719 }
1720
Kan Liangdaefd0b2017-05-26 12:05:38 -07001721 if (smi_cost) {
1722 int smi;
1723
1724 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
1725 fprintf(stderr, "freeze_on_smi is not supported.\n");
1726 return -1;
1727 }
1728
1729 if (!smi) {
1730 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) {
1731 fprintf(stderr, "Failed to set freeze_on_smi.\n");
1732 return -1;
1733 }
1734 smi_reset = true;
1735 }
1736
1737 if (pmu_have_event("msr", "aperf") &&
1738 pmu_have_event("msr", "smi")) {
1739 if (!force_metric_only)
Jiri Olsa0ce5aa02018-08-30 08:32:31 +02001740 stat_config.metric_only = true;
Jiri Olsaa5cfa622018-06-07 00:15:10 +02001741 err = parse_events(evsel_list, smi_cost_attrs, &errinfo);
Kan Liangdaefd0b2017-05-26 12:05:38 -07001742 } else {
1743 fprintf(stderr, "To measure SMI cost, it needs "
1744 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
Jiri Olsaa5cfa622018-06-07 00:15:10 +02001745 parse_events_print_error(&errinfo, smi_cost_attrs);
Kan Liangdaefd0b2017-05-26 12:05:38 -07001746 return -1;
1747 }
1748 if (err) {
Ian Rogersa910e462019-11-15 23:46:52 -08001749 parse_events_print_error(&errinfo, smi_cost_attrs);
Kan Liangdaefd0b2017-05-26 12:05:38 -07001750 fprintf(stderr, "Cannot set up SMI cost events\n");
1751 return -1;
1752 }
1753 return 0;
1754 }
1755
Andi Kleen44b1e602016-05-30 12:49:42 -03001756 if (topdown_run) {
1757 char *str = NULL;
1758 bool warn = false;
1759
Andi Kleen55c36a92020-09-11 07:48:07 -07001760 if (!force_metric_only)
1761 stat_config.metric_only = true;
1762
1763 if (topdown_filter_events(topdown_metric_attrs, &str, 1) < 0) {
1764 pr_err("Out of memory\n");
1765 return -1;
1766 }
1767 if (topdown_metric_attrs[0] && str) {
1768 if (!stat_config.interval && !stat_config.metric_only) {
1769 fprintf(stat_config.output,
1770 "Topdown accuracy may decrease when measuring long periods.\n"
1771 "Please print the result regularly, e.g. -I1000\n");
1772 }
1773 goto setup_metrics;
1774 }
1775
1776 zfree(&str);
1777
Andi Kleen44b1e602016-05-30 12:49:42 -03001778 if (stat_config.aggr_mode != AGGR_GLOBAL &&
1779 stat_config.aggr_mode != AGGR_CORE) {
1780 pr_err("top down event configuration requires --per-core mode\n");
1781 return -1;
1782 }
1783 stat_config.aggr_mode = AGGR_CORE;
1784 if (nr_cgroups || !target__has_cpu(&target)) {
1785 pr_err("top down event configuration requires system-wide mode (-a)\n");
1786 return -1;
1787 }
1788
Andi Kleen44b1e602016-05-30 12:49:42 -03001789 if (topdown_filter_events(topdown_attrs, &str,
1790 arch_topdown_check_group(&warn)) < 0) {
1791 pr_err("Out of memory\n");
1792 return -1;
1793 }
1794 if (topdown_attrs[0] && str) {
1795 if (warn)
1796 arch_topdown_group_warn();
Andi Kleen55c36a92020-09-11 07:48:07 -07001797setup_metrics:
Jiri Olsaa5cfa622018-06-07 00:15:10 +02001798 err = parse_events(evsel_list, str, &errinfo);
Andi Kleen44b1e602016-05-30 12:49:42 -03001799 if (err) {
1800 fprintf(stderr,
1801 "Cannot set up top down events %s: %d\n",
1802 str, err);
Jiri Olsaa5cfa622018-06-07 00:15:10 +02001803 parse_events_print_error(&errinfo, str);
Leo Yanc74b0502019-07-02 18:34:11 +08001804 free(str);
Andi Kleen44b1e602016-05-30 12:49:42 -03001805 return -1;
1806 }
1807 } else {
1808 fprintf(stderr, "System does not support topdown\n");
1809 return -1;
1810 }
1811 free(str);
1812 }
1813
Jiri Olsa6484d2f2019-07-21 13:24:28 +02001814 if (!evsel_list->core.nr_entries) {
Namhyung Kima1f3d562016-05-13 15:01:03 +09001815 if (target__has_cpu(&target))
1816 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
1817
Arnaldo Carvalho de Meloe251abe2020-06-17 09:16:20 -03001818 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
Andi Kleen9dec4472016-02-26 16:27:56 -08001819 return -1;
1820 if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
Arnaldo Carvalho de Meloe251abe2020-06-17 09:16:20 -03001821 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0)
Andi Kleen9dec4472016-02-26 16:27:56 -08001822 return -1;
1823 }
1824 if (pmu_have_event("cpu", "stalled-cycles-backend")) {
Arnaldo Carvalho de Meloe251abe2020-06-17 09:16:20 -03001825 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0)
Andi Kleen9dec4472016-02-26 16:27:56 -08001826 return -1;
1827 }
Arnaldo Carvalho de Meloe251abe2020-06-17 09:16:20 -03001828 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -02001829 return -1;
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001830 }
1831
1832 /* Detailed events get appended to the event list: */
1833
1834 if (detailed_run < 1)
1835 return 0;
1836
1837 /* Append detailed run extra attributes: */
Arnaldo Carvalho de Meloe251abe2020-06-17 09:16:20 -03001838 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -02001839 return -1;
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001840
1841 if (detailed_run < 2)
1842 return 0;
1843
1844 /* Append very detailed run extra attributes: */
Arnaldo Carvalho de Meloe251abe2020-06-17 09:16:20 -03001845 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -02001846 return -1;
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001847
1848 if (detailed_run < 3)
1849 return 0;
1850
1851 /* Append very, very detailed run extra attributes: */
Arnaldo Carvalho de Meloe251abe2020-06-17 09:16:20 -03001852 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001853}
1854
Jiri Olsa8a59f3c2016-01-12 10:35:29 +01001855static const char * const stat_record_usage[] = {
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001856 "perf stat record [<options>]",
1857 NULL,
1858};
1859
Jiri Olsa3ba78bd2015-11-05 15:40:47 +01001860static void init_features(struct perf_session *session)
1861{
1862 int feat;
1863
1864 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1865 perf_header__set_feat(&session->header, feat);
1866
Jiri Olsa8002a632019-04-09 12:01:56 +02001867 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Jiri Olsa3ba78bd2015-11-05 15:40:47 +01001868 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1869 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1870 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
1871 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
1872}
1873
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001874static int __cmd_record(int argc, const char **argv)
1875{
1876 struct perf_session *session;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001877 struct perf_data *data = &perf_stat.data;
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001878
Jiri Olsa8a59f3c2016-01-12 10:35:29 +01001879 argc = parse_options(argc, argv, stat_options, stat_record_usage,
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001880 PARSE_OPT_STOP_AT_NON_OPTION);
1881
1882 if (output_name)
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001883 data->path = output_name;
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001884
Jiri Olsad97ae042018-08-30 08:32:36 +02001885 if (stat_config.run_count != 1 || forever) {
Jiri Olsae9d6db8e82015-11-05 15:40:53 +01001886 pr_err("Cannot use -r option with perf stat record.\n");
1887 return -1;
1888 }
1889
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001890 session = perf_session__new(data, false, NULL);
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05301891 if (IS_ERR(session)) {
1892 pr_err("Perf session creation failed\n");
1893 return PTR_ERR(session);
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001894 }
1895
Jiri Olsa3ba78bd2015-11-05 15:40:47 +01001896 init_features(session);
1897
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001898 session->evlist = evsel_list;
1899 perf_stat.session = session;
1900 perf_stat.record = true;
1901 return argc;
1902}
1903
Jiri Olsa89f16882018-09-13 14:54:03 +02001904static int process_stat_round_event(struct perf_session *session,
1905 union perf_event *event)
Jiri Olsaa56f9392015-11-05 15:40:59 +01001906{
Jiri Olsa72932372019-08-28 15:57:16 +02001907 struct perf_record_stat_round *stat_round = &event->stat_round;
Jiri Olsa32dcd022019-07-21 13:23:51 +02001908 struct evsel *counter;
Jiri Olsaa56f9392015-11-05 15:40:59 +01001909 struct timespec tsh, *ts = NULL;
1910 const char **argv = session->header.env.cmdline_argv;
1911 int argc = session->header.env.nr_cmdline;
1912
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001913 evlist__for_each_entry(evsel_list, counter)
Jiri Olsaa56f9392015-11-05 15:40:59 +01001914 perf_stat_process_counter(&stat_config, counter);
1915
Andi Kleene3b03b62016-05-05 16:04:03 -07001916 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
1917 update_stats(&walltime_nsecs_stats, stat_round->time);
Jiri Olsaa56f9392015-11-05 15:40:59 +01001918
Andi Kleene3b03b62016-05-05 16:04:03 -07001919 if (stat_config.interval && stat_round->time) {
Arnaldo Carvalho de Melobd48c632016-08-05 15:40:30 -03001920 tsh.tv_sec = stat_round->time / NSEC_PER_SEC;
1921 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC;
Jiri Olsaa56f9392015-11-05 15:40:59 +01001922 ts = &tsh;
1923 }
1924
1925 print_counters(ts, argc, argv);
1926 return 0;
1927}
1928
Jiri Olsa62ba18ba2015-11-05 15:40:57 +01001929static
Jiri Olsa89f16882018-09-13 14:54:03 +02001930int process_stat_config_event(struct perf_session *session,
1931 union perf_event *event)
Jiri Olsa62ba18ba2015-11-05 15:40:57 +01001932{
Jiri Olsa89f16882018-09-13 14:54:03 +02001933 struct perf_tool *tool = session->tool;
Jiri Olsa68d702f2015-11-05 15:40:58 +01001934 struct perf_stat *st = container_of(tool, struct perf_stat, tool);
1935
Jiri Olsa62ba18ba2015-11-05 15:40:57 +01001936 perf_event__read_stat_config(&stat_config, &event->stat_config);
Jiri Olsa68d702f2015-11-05 15:40:58 +01001937
Jiri Olsa315c0a12019-08-22 13:11:39 +02001938 if (perf_cpu_map__empty(st->cpus)) {
Jiri Olsa89af4e02015-11-05 15:41:02 +01001939 if (st->aggr_mode != AGGR_UNSET)
1940 pr_warning("warning: processing task data, aggregation mode not set\n");
1941 return 0;
1942 }
1943
1944 if (st->aggr_mode != AGGR_UNSET)
1945 stat_config.aggr_mode = st->aggr_mode;
1946
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001947 if (perf_stat.data.is_pipe)
Jiri Olsa68d702f2015-11-05 15:40:58 +01001948 perf_stat_init_aggr_mode();
1949 else
1950 perf_stat_init_aggr_mode_file(st);
1951
Jiri Olsa62ba18ba2015-11-05 15:40:57 +01001952 return 0;
1953}
1954
Jiri Olsa1975d362015-11-05 15:40:56 +01001955static int set_maps(struct perf_stat *st)
1956{
1957 if (!st->cpus || !st->threads)
1958 return 0;
1959
1960 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n"))
1961 return -EINVAL;
1962
Jiri Olsa453fa032019-07-21 13:24:43 +02001963 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads);
Jiri Olsa1975d362015-11-05 15:40:56 +01001964
Arnaldo Carvalho de Melo53f5e902020-11-30 09:31:04 -03001965 if (evlist__alloc_stats(evsel_list, true))
Jiri Olsa1975d362015-11-05 15:40:56 +01001966 return -ENOMEM;
1967
1968 st->maps_allocated = true;
1969 return 0;
1970}
1971
1972static
Jiri Olsa89f16882018-09-13 14:54:03 +02001973int process_thread_map_event(struct perf_session *session,
1974 union perf_event *event)
Jiri Olsa1975d362015-11-05 15:40:56 +01001975{
Jiri Olsa89f16882018-09-13 14:54:03 +02001976 struct perf_tool *tool = session->tool;
Jiri Olsa1975d362015-11-05 15:40:56 +01001977 struct perf_stat *st = container_of(tool, struct perf_stat, tool);
1978
1979 if (st->threads) {
1980 pr_warning("Extra thread map event, ignoring.\n");
1981 return 0;
1982 }
1983
1984 st->threads = thread_map__new_event(&event->thread_map);
1985 if (!st->threads)
1986 return -ENOMEM;
1987
1988 return set_maps(st);
1989}
1990
1991static
Jiri Olsa89f16882018-09-13 14:54:03 +02001992int process_cpu_map_event(struct perf_session *session,
1993 union perf_event *event)
Jiri Olsa1975d362015-11-05 15:40:56 +01001994{
Jiri Olsa89f16882018-09-13 14:54:03 +02001995 struct perf_tool *tool = session->tool;
Jiri Olsa1975d362015-11-05 15:40:56 +01001996 struct perf_stat *st = container_of(tool, struct perf_stat, tool);
Jiri Olsaf8548392019-07-21 13:23:49 +02001997 struct perf_cpu_map *cpus;
Jiri Olsa1975d362015-11-05 15:40:56 +01001998
1999 if (st->cpus) {
2000 pr_warning("Extra cpu map event, ignoring.\n");
2001 return 0;
2002 }
2003
2004 cpus = cpu_map__new_data(&event->cpu_map.data);
2005 if (!cpus)
2006 return -ENOMEM;
2007
2008 st->cpus = cpus;
2009 return set_maps(st);
2010}
2011
Jiri Olsa8a59f3c2016-01-12 10:35:29 +01002012static const char * const stat_report_usage[] = {
Jiri Olsaba6039b62015-11-05 15:40:55 +01002013 "perf stat report [<options>]",
2014 NULL,
2015};
2016
2017static struct perf_stat perf_stat = {
2018 .tool = {
2019 .attr = perf_event__process_attr,
Jiri Olsafa6ea782015-11-05 15:41:00 +01002020 .event_update = perf_event__process_event_update,
Jiri Olsa1975d362015-11-05 15:40:56 +01002021 .thread_map = process_thread_map_event,
2022 .cpu_map = process_cpu_map_event,
Jiri Olsa62ba18ba2015-11-05 15:40:57 +01002023 .stat_config = process_stat_config_event,
Jiri Olsaa56f9392015-11-05 15:40:59 +01002024 .stat = perf_event__process_stat_event,
2025 .stat_round = process_stat_round_event,
Jiri Olsaba6039b62015-11-05 15:40:55 +01002026 },
Jiri Olsa89af4e02015-11-05 15:41:02 +01002027 .aggr_mode = AGGR_UNSET,
Jiri Olsaba6039b62015-11-05 15:40:55 +01002028};
2029
2030static int __cmd_report(int argc, const char **argv)
2031{
2032 struct perf_session *session;
2033 const struct option options[] = {
2034 OPT_STRING('i', "input", &input_name, "file", "input file name"),
Jiri Olsa89af4e02015-11-05 15:41:02 +01002035 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode,
2036 "aggregate counts per processor socket", AGGR_SOCKET),
Kan Liangdb5742b2019-06-04 15:50:42 -07002037 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode,
2038 "aggregate counts per processor die", AGGR_DIE),
Jiri Olsa89af4e02015-11-05 15:41:02 +01002039 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
2040 "aggregate counts per physical processor core", AGGR_CORE),
Jiri Olsa86895b42019-08-28 10:17:43 +02002041 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode,
2042 "aggregate counts per numa node", AGGR_NODE),
Jiri Olsa89af4e02015-11-05 15:41:02 +01002043 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
2044 "disable CPU count aggregation", AGGR_NONE),
Jiri Olsaba6039b62015-11-05 15:40:55 +01002045 OPT_END()
2046 };
2047 struct stat st;
2048 int ret;
2049
Jiri Olsa8a59f3c2016-01-12 10:35:29 +01002050 argc = parse_options(argc, argv, options, stat_report_usage, 0);
Jiri Olsaba6039b62015-11-05 15:40:55 +01002051
2052 if (!input_name || !strlen(input_name)) {
2053 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
2054 input_name = "-";
2055 else
2056 input_name = "perf.data";
2057 }
2058
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002059 perf_stat.data.path = input_name;
2060 perf_stat.data.mode = PERF_DATA_MODE_READ;
Jiri Olsaba6039b62015-11-05 15:40:55 +01002061
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002062 session = perf_session__new(&perf_stat.data, false, &perf_stat.tool);
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05302063 if (IS_ERR(session))
2064 return PTR_ERR(session);
Jiri Olsaba6039b62015-11-05 15:40:55 +01002065
2066 perf_stat.session = session;
2067 stat_config.output = stderr;
2068 evsel_list = session->evlist;
2069
2070 ret = perf_session__process_events(session);
2071 if (ret)
2072 return ret;
2073
2074 perf_session__delete(session);
2075 return 0;
2076}
2077
Jiri Olsae3ba76d2017-02-27 10:48:18 +01002078static void setup_system_wide(int forks)
2079{
2080 /*
2081 * Make system wide (-a) the default target if
2082 * no target was specified and one of following
2083 * conditions is met:
2084 *
2085 * - there's no workload specified
2086 * - there is workload specified but all requested
2087 * events are system wide events
2088 */
2089 if (!target__none(&target))
2090 return;
2091
2092 if (!forks)
2093 target.system_wide = true;
2094 else {
Jiri Olsa32dcd022019-07-21 13:23:51 +02002095 struct evsel *counter;
Jiri Olsae3ba76d2017-02-27 10:48:18 +01002096
2097 evlist__for_each_entry(evsel_list, counter) {
Jin Yao002a3d62020-09-22 09:50:04 +08002098 if (!counter->core.system_wide &&
2099 strcmp(counter->name, "duration_time")) {
Jiri Olsae3ba76d2017-02-27 10:48:18 +01002100 return;
Jin Yao002a3d62020-09-22 09:50:04 +08002101 }
Jiri Olsae3ba76d2017-02-27 10:48:18 +01002102 }
2103
Jiri Olsa6484d2f2019-07-21 13:24:28 +02002104 if (evsel_list->core.nr_entries)
Jiri Olsae3ba76d2017-02-27 10:48:18 +01002105 target.system_wide = true;
2106 }
2107}
2108
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002109int cmd_stat(int argc, const char **argv)
Ingo Molnar52425192009-05-26 09:17:18 +02002110{
Arnaldo Carvalho de Melob070a542012-10-01 15:20:58 -03002111 const char * const stat_usage[] = {
2112 "perf stat [<options>] [<command>]",
2113 NULL
2114 };
Song Liufa853c42020-12-29 13:42:14 -08002115 int status = -EINVAL, run_idx, err;
Stephane Eranian4aa90152011-08-15 22:22:33 +02002116 const char *mode;
Jiri Olsa58215222015-07-21 14:31:24 +02002117 FILE *output = stderr;
yuzhoujianf1f8ad52018-01-29 10:25:23 +01002118 unsigned int interval, timeout;
Jiri Olsaba6039b62015-11-05 15:40:55 +01002119 const char * const stat_subcommands[] = { "record", "report" };
Song Liufa853c42020-12-29 13:42:14 -08002120 char errbuf[BUFSIZ];
Ingo Molnar42202dd2009-06-13 14:57:28 +02002121
Stephane Eranian5af52b52010-05-18 15:00:01 +02002122 setlocale(LC_ALL, "");
2123
Jiri Olsa0f98b112019-07-21 13:23:55 +02002124 evsel_list = evlist__new();
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002125 if (evsel_list == NULL)
2126 return -ENOMEM;
2127
Wang Nan1669e502016-02-19 11:43:58 +00002128 parse_events__shrink_config_terms();
Michael Petlan51433ea2018-12-10 11:00:04 -05002129
2130 /* String-parsing callback-based options would segfault when negated */
2131 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG);
2132 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG);
2133 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG);
2134
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002135 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
2136 (const char **) stat_usage,
2137 PARSE_OPT_STOP_AT_NON_OPTION);
Andi Kleen37932c12017-03-20 13:17:08 -07002138 perf_stat__collect_metric_expr(evsel_list);
Andi Kleenfb4605b2016-03-01 10:57:52 -08002139 perf_stat__init_shadow_stats();
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002140
Jiri Olsafa7070a2018-08-30 08:32:29 +02002141 if (stat_config.csv_sep) {
2142 stat_config.csv_output = true;
2143 if (!strcmp(stat_config.csv_sep, "\\t"))
2144 stat_config.csv_sep = "\t";
Jiri Olsa6edb78a2015-11-05 15:41:01 +01002145 } else
Jiri Olsafa7070a2018-08-30 08:32:29 +02002146 stat_config.csv_sep = DEFAULT_SEPARATOR;
Jiri Olsa6edb78a2015-11-05 15:41:01 +01002147
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002148 if (argc && !strncmp(argv[0], "rec", 3)) {
2149 argc = __cmd_record(argc, argv);
2150 if (argc < 0)
2151 return -1;
Jiri Olsaba6039b62015-11-05 15:40:55 +01002152 } else if (argc && !strncmp(argv[0], "rep", 3))
2153 return __cmd_report(argc, argv);
Stephane Eraniand7470b62010-12-01 18:49:05 +02002154
Jiri Olsaec0d3d12015-07-21 14:31:25 +02002155 interval = stat_config.interval;
yuzhoujianf1f8ad52018-01-29 10:25:23 +01002156 timeout = stat_config.timeout;
Jiri Olsaec0d3d12015-07-21 14:31:25 +02002157
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002158 /*
2159 * For record command the -o is already taken care of.
2160 */
2161 if (!STAT_RECORD && output_name && strcmp(output_name, "-"))
Stephane Eranian4aa90152011-08-15 22:22:33 +02002162 output = NULL;
2163
Jim Cromie56f3bae2011-09-07 17:14:00 -06002164 if (output_name && output_fd) {
2165 fprintf(stderr, "cannot use both --output and --log-fd\n");
Jiri Olsae0547312015-11-05 15:40:45 +01002166 parse_options_usage(stat_usage, stat_options, "o", 1);
2167 parse_options_usage(NULL, stat_options, "log-fd", 0);
Namhyung Kimcc03c542013-11-01 16:33:15 +09002168 goto out;
Jim Cromie56f3bae2011-09-07 17:14:00 -06002169 }
Stephane Eranianfc3e4d02012-05-15 13:11:11 +02002170
Jiri Olsa0ce5aa02018-08-30 08:32:31 +02002171 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) {
Andi Kleen54b50912016-03-03 15:57:36 -08002172 fprintf(stderr, "--metric-only is not supported with --per-thread\n");
2173 goto out;
2174 }
2175
Jiri Olsad97ae042018-08-30 08:32:36 +02002176 if (stat_config.metric_only && stat_config.run_count > 1) {
Andi Kleen54b50912016-03-03 15:57:36 -08002177 fprintf(stderr, "--metric-only is not supported with -r\n");
2178 goto out;
2179 }
2180
Jiri Olsa54ac0b12018-08-30 08:32:50 +02002181 if (stat_config.walltime_run_table && stat_config.run_count <= 1) {
Jiri Olsae55c14a2018-04-23 11:08:21 +02002182 fprintf(stderr, "--table is only supported with -r\n");
2183 parse_options_usage(stat_usage, stat_options, "r", 1);
2184 parse_options_usage(NULL, stat_options, "table", 0);
2185 goto out;
2186 }
2187
Stephane Eranianfc3e4d02012-05-15 13:11:11 +02002188 if (output_fd < 0) {
2189 fprintf(stderr, "argument to --log-fd must be a > 0\n");
Jiri Olsae0547312015-11-05 15:40:45 +01002190 parse_options_usage(stat_usage, stat_options, "log-fd", 0);
Namhyung Kimcc03c542013-11-01 16:33:15 +09002191 goto out;
Stephane Eranianfc3e4d02012-05-15 13:11:11 +02002192 }
2193
Andi Kleen55a4de92020-10-26 17:27:36 -07002194 if (!output && !stat_config.quiet) {
Stephane Eranian4aa90152011-08-15 22:22:33 +02002195 struct timespec tm;
2196 mode = append_file ? "a" : "w";
2197
2198 output = fopen(output_name, mode);
2199 if (!output) {
2200 perror("failed to create output file");
David Ahernfceda7f2012-08-26 12:24:44 -06002201 return -1;
Stephane Eranian4aa90152011-08-15 22:22:33 +02002202 }
2203 clock_gettime(CLOCK_REALTIME, &tm);
2204 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
Stephane Eranianfc3e4d02012-05-15 13:11:11 +02002205 } else if (output_fd > 0) {
Jim Cromie56f3bae2011-09-07 17:14:00 -06002206 mode = append_file ? "a" : "w";
2207 output = fdopen(output_fd, mode);
2208 if (!output) {
2209 perror("Failed opening logfd");
2210 return -errno;
2211 }
Stephane Eranian4aa90152011-08-15 22:22:33 +02002212 }
2213
Jiri Olsa58215222015-07-21 14:31:24 +02002214 stat_config.output = output;
2215
Stephane Eraniand7470b62010-12-01 18:49:05 +02002216 /*
2217 * let the spreadsheet do the pretty-printing
2218 */
Jiri Olsafa7070a2018-08-30 08:32:29 +02002219 if (stat_config.csv_output) {
Jim Cromie61a9f322011-09-07 17:14:04 -06002220 /* User explicitly passed -B? */
Stephane Eraniand7470b62010-12-01 18:49:05 +02002221 if (big_num_opt == 1) {
2222 fprintf(stderr, "-B option not supported with -x\n");
Jiri Olsae0547312015-11-05 15:40:45 +01002223 parse_options_usage(stat_usage, stat_options, "B", 1);
2224 parse_options_usage(NULL, stat_options, "x", 1);
Namhyung Kimcc03c542013-11-01 16:33:15 +09002225 goto out;
Stephane Eraniand7470b62010-12-01 18:49:05 +02002226 } else /* Nope, so disable big number formatting */
Jiri Olsa34ff0862018-08-30 08:32:47 +02002227 stat_config.big_num = false;
Stephane Eraniand7470b62010-12-01 18:49:05 +02002228 } else if (big_num_opt == 0) /* User passed --no-big-num */
Jiri Olsa34ff0862018-08-30 08:32:47 +02002229 stat_config.big_num = false;
Stephane Eraniand7470b62010-12-01 18:49:05 +02002230
Song Liufa853c42020-12-29 13:42:14 -08002231 err = target__validate(&target);
2232 if (err) {
2233 target__strerror(&target, err, errbuf, BUFSIZ);
2234 pr_warning("%s\n", errbuf);
2235 }
2236
Jiri Olsae3ba76d2017-02-27 10:48:18 +01002237 setup_system_wide(argc);
David Ahernac3063b2013-09-30 07:37:37 -06002238
Jiri Olsa0ce2da12018-06-05 14:13:13 +02002239 /*
2240 * Display user/system times only for single
2241 * run and when there's specified tracee.
2242 */
Jiri Olsad97ae042018-08-30 08:32:36 +02002243 if ((stat_config.run_count == 1) && target__none(&target))
Jiri Olsa8897a892018-08-30 08:32:44 +02002244 stat_config.ru_display = true;
Jiri Olsa0ce2da12018-06-05 14:13:13 +02002245
Jiri Olsad97ae042018-08-30 08:32:36 +02002246 if (stat_config.run_count < 0) {
Namhyung Kimcc03c542013-11-01 16:33:15 +09002247 pr_err("Run count must be a positive number\n");
Jiri Olsae0547312015-11-05 15:40:45 +01002248 parse_options_usage(stat_usage, stat_options, "r", 1);
Namhyung Kimcc03c542013-11-01 16:33:15 +09002249 goto out;
Jiri Olsad97ae042018-08-30 08:32:36 +02002250 } else if (stat_config.run_count == 0) {
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05002251 forever = true;
Jiri Olsad97ae042018-08-30 08:32:36 +02002252 stat_config.run_count = 1;
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05002253 }
Ingo Molnarddcacfa2009-04-20 15:37:32 +02002254
Jiri Olsa54ac0b12018-08-30 08:32:50 +02002255 if (stat_config.walltime_run_table) {
2256 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0]));
2257 if (!stat_config.walltime_run) {
Jiri Olsae55c14a2018-04-23 11:08:21 +02002258 pr_err("failed to setup -r option");
2259 goto out;
2260 }
2261 }
2262
Jin Yao1d9f8d12017-12-05 22:03:10 +08002263 if ((stat_config.aggr_mode == AGGR_THREAD) &&
2264 !target__has_task(&target)) {
2265 if (!target.system_wide || target.cpu_list) {
2266 fprintf(stderr, "The --per-thread option is only "
2267 "available when monitoring via -p -t -a "
2268 "options or only --per-thread.\n");
2269 parse_options_usage(NULL, stat_options, "p", 1);
2270 parse_options_usage(NULL, stat_options, "t", 1);
2271 goto out;
2272 }
Jiri Olsa32b8af82015-06-26 11:29:27 +02002273 }
2274
2275 /*
2276 * no_aggr, cgroup are for system-wide only
2277 * --per-thread is aggregated per thread, we dont mix it with cpu mode
2278 */
Jiri Olsa421a50f2015-07-21 14:31:22 +02002279 if (((stat_config.aggr_mode != AGGR_GLOBAL &&
2280 stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) &&
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002281 !target__has_cpu(&target)) {
Stephane Eranian023695d2011-02-14 11:20:01 +02002282 fprintf(stderr, "both cgroup and no-aggregation "
2283 "modes only available in system-wide mode\n");
2284
Jiri Olsae0547312015-11-05 15:40:45 +01002285 parse_options_usage(stat_usage, stat_options, "G", 1);
2286 parse_options_usage(NULL, stat_options, "A", 1);
2287 parse_options_usage(NULL, stat_options, "a", 1);
Namhyung Kimcc03c542013-11-01 16:33:15 +09002288 goto out;
Stephane Eraniand7e7a452013-02-06 15:46:02 +01002289 }
2290
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02002291 if (add_default_attributes())
2292 goto out;
Ingo Molnarddcacfa2009-04-20 15:37:32 +02002293
Namhyung Kimd1c5a0e2020-09-24 21:44:52 +09002294 if (stat_config.cgroup_list) {
2295 if (nr_cgroups > 0) {
2296 pr_err("--cgroup and --for-each-cgroup cannot be used together\n");
2297 parse_options_usage(stat_usage, stat_options, "G", 1);
2298 parse_options_usage(NULL, stat_options, "for-each-cgroup", 0);
2299 goto out;
2300 }
2301
Namhyung Kimb214ba82020-09-24 21:44:53 +09002302 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list,
Namhyung Kimbb1c15b2020-10-27 16:28:55 +09002303 &stat_config.metric_events, true) < 0) {
2304 parse_options_usage(stat_usage, stat_options,
2305 "for-each-cgroup", 0);
Namhyung Kimd1c5a0e2020-09-24 21:44:52 +09002306 goto out;
Namhyung Kimbb1c15b2020-10-27 16:28:55 +09002307 }
Namhyung Kimd1c5a0e2020-09-24 21:44:52 +09002308 }
2309
Jin Yao1d9f8d12017-12-05 22:03:10 +08002310 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
2311 target.per_thread = true;
2312
Arnaldo Carvalho de Melo7748bb72020-11-30 14:56:52 -03002313 if (evlist__create_maps(evsel_list, &target) < 0) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002314 if (target__has_task(&target)) {
Namhyung Kim77a6f012012-05-07 14:09:04 +09002315 pr_err("Problems finding threads of monitor\n");
Jiri Olsae0547312015-11-05 15:40:45 +01002316 parse_options_usage(stat_usage, stat_options, "p", 1);
2317 parse_options_usage(NULL, stat_options, "t", 1);
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002318 } else if (target__has_cpu(&target)) {
Namhyung Kim77a6f012012-05-07 14:09:04 +09002319 perror("failed to parse CPUs map");
Jiri Olsae0547312015-11-05 15:40:45 +01002320 parse_options_usage(stat_usage, stat_options, "C", 1);
2321 parse_options_usage(NULL, stat_options, "a", 1);
Namhyung Kimcc03c542013-11-01 16:33:15 +09002322 }
2323 goto out;
Arnaldo Carvalho de Melo60d567e2011-01-03 17:49:48 -02002324 }
Jiri Olsa32b8af82015-06-26 11:29:27 +02002325
Jiri Olsaa9a179022020-06-02 12:17:36 +02002326 evlist__check_cpu_maps(evsel_list);
2327
Jiri Olsa32b8af82015-06-26 11:29:27 +02002328 /*
2329 * Initialize thread_map with comm names,
2330 * so we could print it out on output.
2331 */
Jin Yao56739442017-12-05 22:03:07 +08002332 if (stat_config.aggr_mode == AGGR_THREAD) {
Jiri Olsa03617c22019-07-21 13:24:42 +02002333 thread_map__read_comms(evsel_list->core.threads);
Jin Yao56739442017-12-05 22:03:07 +08002334 if (target.system_wide) {
2335 if (runtime_stat_new(&stat_config,
Jiri Olsaa2f354e2019-08-22 13:11:41 +02002336 perf_thread_map__nr(evsel_list->core.threads))) {
Jin Yao56739442017-12-05 22:03:07 +08002337 goto out;
2338 }
2339 }
2340 }
Jiri Olsa32b8af82015-06-26 11:29:27 +02002341
Jiri Olsa86895b42019-08-28 10:17:43 +02002342 if (stat_config.aggr_mode == AGGR_NODE)
2343 cpu__setup_cpunode_map();
2344
yuzhoujiandb06a262018-01-29 10:25:22 +01002345 if (stat_config.times && interval)
2346 interval_count = true;
2347 else if (stat_config.times && !interval) {
2348 pr_err("interval-count option should be used together with "
2349 "interval-print.\n");
2350 parse_options_usage(stat_usage, stat_options, "interval-count", 0);
2351 parse_options_usage(stat_usage, stat_options, "I", 1);
2352 goto out;
2353 }
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002354
yuzhoujianf1f8ad52018-01-29 10:25:23 +01002355 if (timeout && timeout < 100) {
2356 if (timeout < 10) {
2357 pr_err("timeout must be >= 10ms.\n");
2358 parse_options_usage(stat_usage, stat_options, "timeout", 0);
2359 goto out;
2360 } else
2361 pr_warning("timeout < 100ms. "
2362 "The overhead percentage could be high in some cases. "
2363 "Please proceed with caution.\n");
2364 }
2365 if (timeout && interval) {
2366 pr_err("timeout option is not supported with interval-print.\n");
2367 parse_options_usage(stat_usage, stat_options, "timeout", 0);
2368 parse_options_usage(stat_usage, stat_options, "I", 1);
2369 goto out;
2370 }
2371
Arnaldo Carvalho de Melo53f5e902020-11-30 09:31:04 -03002372 if (evlist__alloc_stats(evsel_list, interval))
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -03002373 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002374
Stephane Eranian86ee6e12013-02-14 13:57:27 +01002375 if (perf_stat_init_aggr_mode())
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -03002376 goto out;
Stephane Eranian86ee6e12013-02-14 13:57:27 +01002377
Ingo Molnar58d7e992009-05-15 11:03:23 +02002378 /*
Jiri Olsa7d9ad162018-08-30 08:32:14 +02002379 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
2380 * while avoiding that older tools show confusing messages.
2381 *
2382 * However for pipe sessions we need to keep it zero,
2383 * because script's perf_evsel__check_attr is triggered
2384 * by attr->sample_type != 0, and we can't run it on
2385 * stat sessions.
2386 */
2387 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe);
2388
2389 /*
Ingo Molnar58d7e992009-05-15 11:03:23 +02002390 * We dont want to block the signals - that would cause
2391 * child tasks to inherit that and Ctrl-C would not work.
2392 * What we want is for Ctrl-C to work in the exec()-ed
2393 * task, but being ignored by perf stat itself:
2394 */
Peter Zijlstraf7b7c262009-06-10 15:55:59 +02002395 atexit(sig_atexit);
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05002396 if (!forever)
2397 signal(SIGINT, skip_signal);
Stephane Eranian13370a92013-01-29 12:47:44 +01002398 signal(SIGCHLD, skip_signal);
Ingo Molnar58d7e992009-05-15 11:03:23 +02002399 signal(SIGALRM, skip_signal);
2400 signal(SIGABRT, skip_signal);
2401
Alexey Budankov27e97692020-07-17 10:05:41 +03002402 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack))
2403 goto out;
2404
Ingo Molnar42202dd2009-06-13 14:57:28 +02002405 status = 0;
Jiri Olsad97ae042018-08-30 08:32:36 +02002406 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) {
2407 if (stat_config.run_count != 1 && verbose > 0)
Stephane Eranian4aa90152011-08-15 22:22:33 +02002408 fprintf(output, "[ perf stat: executing run #%d ... ]\n",
2409 run_idx + 1);
Ingo Molnarf9cef0a2011-04-28 18:17:11 +02002410
Srikar Dronamrajub63fd112019-09-04 15:17:37 +05302411 if (run_idx != 0)
Arnaldo Carvalho de Melo53f5e902020-11-30 09:31:04 -03002412 evlist__reset_prev_raw_counts(evsel_list);
Srikar Dronamrajub63fd112019-09-04 15:17:37 +05302413
Jiri Olsae55c14a2018-04-23 11:08:21 +02002414 status = run_perf_stat(argc, argv, run_idx);
Srikar Dronamraju443f2d52019-09-04 15:17:38 +05302415 if (forever && status != -1 && !interval) {
Jiri Olsad4f63a42015-06-26 11:29:26 +02002416 print_counters(NULL, argc, argv);
Jiri Olsa254ecbc72015-06-26 11:29:13 +02002417 perf_stat__reset_stats();
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05002418 }
Ingo Molnar42202dd2009-06-13 14:57:28 +02002419 }
2420
Jin Yaoc7e5b322020-05-20 12:27:37 +08002421 if (!forever && status != -1 && (!interval || stat_config.summary))
Jiri Olsad4f63a42015-06-26 11:29:26 +02002422 print_counters(NULL, argc, argv);
Arnaldo Carvalho de Melod134ffb2013-03-18 11:24:21 -03002423
Alexey Budankov27e97692020-07-17 10:05:41 +03002424 evlist__finalize_ctlfd(evsel_list);
2425
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002426 if (STAT_RECORD) {
2427 /*
2428 * We synthesize the kernel mmap record just so that older tools
2429 * don't emit warnings about not being able to resolve symbols
2430 * due to /proc/sys/kernel/kptr_restrict settings and instear provide
2431 * a saner message about no samples being in the perf.data file.
2432 *
2433 * This also serves to suppress a warning about f_header.data.size == 0
Jiri Olsa8b99b1a2015-11-05 15:40:48 +01002434 * in header.c at the moment 'perf stat record' gets introduced, which
2435 * is not really needed once we start adding the stat specific PERF_RECORD_
2436 * records, but the need to suppress the kptr_restrict messages in older
2437 * tools remain -acme
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002438 */
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002439 int fd = perf_data__fd(&perf_stat.data);
Song Liufa853c42020-12-29 13:42:14 -08002440
2441 err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
2442 process_synthesized_event,
2443 &perf_stat.session->machines.host);
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002444 if (err) {
2445 pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
2446 "older tools may produce warnings about this file\n.");
2447 }
2448
Jiri Olsa7aad0c32015-11-05 15:40:52 +01002449 if (!interval) {
2450 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL))
2451 pr_err("failed to write stat round event\n");
2452 }
2453
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002454 if (!perf_stat.data.is_pipe) {
Jiri Olsa664c98d2015-11-05 15:40:50 +01002455 perf_stat.session->header.data_size += perf_stat.bytes_written;
2456 perf_session__write_header(perf_stat.session, evsel_list, fd, true);
2457 }
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002458
Jiri Olsa750b4ed2019-07-21 13:24:07 +02002459 evlist__close(evsel_list);
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002460 perf_session__delete(perf_stat.session);
2461 }
2462
Masami Hiramatsu544c2ae2015-12-09 11:11:27 +09002463 perf_stat__exit_aggr_mode();
Arnaldo Carvalho de Melo53f5e902020-11-30 09:31:04 -03002464 evlist__free_stats(evsel_list);
Arnaldo Carvalho de Melo0015e2e2011-02-01 16:18:10 -02002465out:
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03002466 zfree(&stat_config.walltime_run);
Jiri Olsae55c14a2018-04-23 11:08:21 +02002467
Kan Liangdaefd0b2017-05-26 12:05:38 -07002468 if (smi_cost && smi_reset)
2469 sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
2470
Jiri Olsac12995a2019-07-21 13:23:56 +02002471 evlist__delete(evsel_list);
Jin Yao56739442017-12-05 22:03:07 +08002472
Jiri Olsa9afe5652020-06-02 23:47:38 +02002473 metricgroup__rblist_exit(&stat_config.metric_events);
Jin Yao56739442017-12-05 22:03:07 +08002474 runtime_stat_delete(&stat_config);
Adrian Hunteree7fe312020-09-03 15:29:37 +03002475 evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close);
Jin Yao56739442017-12-05 22:03:07 +08002476
Ingo Molnar42202dd2009-06-13 14:57:28 +02002477 return status;
Ingo Molnarddcacfa2009-04-20 15:37:32 +02002478}