blob: c5e10552776a93f92d7eb4f7d6901091d5f0d538 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
11#include "perf.h"
12
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020013#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020014#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060015#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020016#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090017#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020018
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030019#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030020#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020021#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020022#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020023#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020024#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020025#include "util/debug.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020026#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020027#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020028#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000037#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000038#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000039#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000040#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030041#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030042#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030043#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080044#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000045#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020046
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030047#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030048#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030049#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030050#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020051#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020052#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030053#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030054#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030055#include <sys/wait.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030056#include <linux/time64.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030057
Jiri Olsa1b43b702017-01-09 10:51:56 +010058struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010059 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010060 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010061 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010062 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010063 const char *str;
64 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070065 char **filenames;
66 int num_files;
67 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010068};
69
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030070struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020071 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030072 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020073 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010074 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030075 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020076 struct perf_evlist *evlist;
77 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020078 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020079 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000080 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020081 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000082 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090083 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000084 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080085 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010086 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070087 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030088 cpu_set_t affinity_mask;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020089};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020090
Jiri Olsadc0c6122017-01-09 10:51:58 +010091static volatile int auxtrace_record__snapshot_started;
92static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
93static DEFINE_TRIGGER(switch_output_trigger);
94
Alexey Budankov9d2ed642019-01-22 20:47:43 +030095static const char *affinity_tags[PERF_AFFINITY_MAX] = {
96 "SYS", "NODE", "CPU"
97};
98
Jiri Olsadc0c6122017-01-09 10:51:58 +010099static bool switch_output_signal(struct record *rec)
100{
101 return rec->switch_output.signal &&
102 trigger_is_ready(&switch_output_trigger);
103}
104
105static bool switch_output_size(struct record *rec)
106{
107 return rec->switch_output.size &&
108 trigger_is_ready(&switch_output_trigger) &&
109 (rec->bytes_written >= rec->switch_output.size);
110}
111
Jiri Olsabfacbe32017-01-09 10:52:00 +0100112static bool switch_output_time(struct record *rec)
113{
114 return rec->switch_output.time &&
115 trigger_is_ready(&switch_output_trigger);
116}
117
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200118static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
119 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200120{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200121 struct perf_data_file *file = &rec->session->data->file;
122
123 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100124 pr_err("failed to write perf data, error: %m\n");
125 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200126 }
David Ahern8d3eca22012-08-26 12:24:47 -0600127
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300128 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100129
130 if (switch_output_size(rec))
131 trigger_hit(&switch_output_trigger);
132
David Ahern8d3eca22012-08-26 12:24:47 -0600133 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200134}
135
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300136#ifdef HAVE_AIO_SUPPORT
137static int record__aio_write(struct aiocb *cblock, int trace_fd,
138 void *buf, size_t size, off_t off)
139{
140 int rc;
141
142 cblock->aio_fildes = trace_fd;
143 cblock->aio_buf = buf;
144 cblock->aio_nbytes = size;
145 cblock->aio_offset = off;
146 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
147
148 do {
149 rc = aio_write(cblock);
150 if (rc == 0) {
151 break;
152 } else if (errno != EAGAIN) {
153 cblock->aio_fildes = -1;
154 pr_err("failed to queue perf data, error: %m\n");
155 break;
156 }
157 } while (1);
158
159 return rc;
160}
161
162static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
163{
164 void *rem_buf;
165 off_t rem_off;
166 size_t rem_size;
167 int rc, aio_errno;
168 ssize_t aio_ret, written;
169
170 aio_errno = aio_error(cblock);
171 if (aio_errno == EINPROGRESS)
172 return 0;
173
174 written = aio_ret = aio_return(cblock);
175 if (aio_ret < 0) {
176 if (aio_errno != EINTR)
177 pr_err("failed to write perf data, error: %m\n");
178 written = 0;
179 }
180
181 rem_size = cblock->aio_nbytes - written;
182
183 if (rem_size == 0) {
184 cblock->aio_fildes = -1;
185 /*
186 * md->refcount is incremented in perf_mmap__push() for
187 * every enqueued aio write request so decrement it because
188 * the request is now complete.
189 */
190 perf_mmap__put(md);
191 rc = 1;
192 } else {
193 /*
194 * aio write request may require restart with the
195 * reminder if the kernel didn't write whole
196 * chunk at once.
197 */
198 rem_off = cblock->aio_offset + written;
199 rem_buf = (void *)(cblock->aio_buf + written);
200 record__aio_write(cblock, cblock->aio_fildes,
201 rem_buf, rem_size, rem_off);
202 rc = 0;
203 }
204
205 return rc;
206}
207
Alexey Budankov93f20c02018-11-06 12:07:19 +0300208static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300209{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300210 struct aiocb **aiocb = md->aio.aiocb;
211 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300212 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300213 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300214
215 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300216 do_suspend = 0;
217 for (i = 0; i < md->aio.nr_cblocks; ++i) {
218 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
219 if (sync_all)
220 aiocb[i] = NULL;
221 else
222 return i;
223 } else {
224 /*
225 * Started aio write is not complete yet
226 * so it has to be waited before the
227 * next allocation.
228 */
229 aiocb[i] = &cblocks[i];
230 do_suspend = 1;
231 }
232 }
233 if (!do_suspend)
234 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300235
Alexey Budankov93f20c02018-11-06 12:07:19 +0300236 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300237 if (!(errno == EAGAIN || errno == EINTR))
238 pr_err("failed to sync perf data, error: %m\n");
239 }
240 } while (1);
241}
242
243static int record__aio_pushfn(void *to, struct aiocb *cblock, void *bf, size_t size, off_t off)
244{
245 struct record *rec = to;
246 int ret, trace_fd = rec->session->data->file.fd;
247
248 rec->samples++;
249
250 ret = record__aio_write(cblock, trace_fd, bf, size, off);
251 if (!ret) {
252 rec->bytes_written += size;
253 if (switch_output_size(rec))
254 trigger_hit(&switch_output_trigger);
255 }
256
257 return ret;
258}
259
260static off_t record__aio_get_pos(int trace_fd)
261{
262 return lseek(trace_fd, 0, SEEK_CUR);
263}
264
265static void record__aio_set_pos(int trace_fd, off_t pos)
266{
267 lseek(trace_fd, pos, SEEK_SET);
268}
269
270static void record__aio_mmap_read_sync(struct record *rec)
271{
272 int i;
273 struct perf_evlist *evlist = rec->evlist;
274 struct perf_mmap *maps = evlist->mmap;
275
276 if (!rec->opts.nr_cblocks)
277 return;
278
279 for (i = 0; i < evlist->nr_mmaps; i++) {
280 struct perf_mmap *map = &maps[i];
281
282 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300283 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300284 }
285}
286
287static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300288static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300289
290static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300291 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300292 int unset)
293{
294 struct record_opts *opts = (struct record_opts *)opt->value;
295
Alexey Budankov93f20c02018-11-06 12:07:19 +0300296 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300297 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300298 } else {
299 if (str)
300 opts->nr_cblocks = strtol(str, NULL, 0);
301 if (!opts->nr_cblocks)
302 opts->nr_cblocks = nr_cblocks_default;
303 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300304
305 return 0;
306}
307#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300308static int nr_cblocks_max = 0;
309
310static int record__aio_sync(struct perf_mmap *md __maybe_unused, bool sync_all __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300311{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300312 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300313}
314
315static int record__aio_pushfn(void *to __maybe_unused, struct aiocb *cblock __maybe_unused,
316 void *bf __maybe_unused, size_t size __maybe_unused, off_t off __maybe_unused)
317{
318 return -1;
319}
320
321static off_t record__aio_get_pos(int trace_fd __maybe_unused)
322{
323 return -1;
324}
325
326static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
327{
328}
329
330static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
331{
332}
333#endif
334
335static int record__aio_enabled(struct record *rec)
336{
337 return rec->opts.nr_cblocks > 0;
338}
339
Alexey Budankov470530b2019-03-18 20:40:26 +0300340#define MMAP_FLUSH_DEFAULT 1
341static int record__mmap_flush_parse(const struct option *opt,
342 const char *str,
343 int unset)
344{
345 int flush_max;
346 struct record_opts *opts = (struct record_opts *)opt->value;
347 static struct parse_tag tags[] = {
348 { .tag = 'B', .mult = 1 },
349 { .tag = 'K', .mult = 1 << 10 },
350 { .tag = 'M', .mult = 1 << 20 },
351 { .tag = 'G', .mult = 1 << 30 },
352 { .tag = 0 },
353 };
354
355 if (unset)
356 return 0;
357
358 if (str) {
359 opts->mmap_flush = parse_tag_value(str, tags);
360 if (opts->mmap_flush == (int)-1)
361 opts->mmap_flush = strtol(str, NULL, 0);
362 }
363
364 if (!opts->mmap_flush)
365 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
366
367 flush_max = perf_evlist__mmap_size(opts->mmap_pages);
368 flush_max /= 4;
369 if (opts->mmap_flush > flush_max)
370 opts->mmap_flush = flush_max;
371
372 return 0;
373}
374
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200375static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200376 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300377 struct perf_sample *sample __maybe_unused,
378 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200379{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300380 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200381 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200382}
383
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200384static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300385{
386 struct record *rec = to;
387
388 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200389 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300390}
391
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300392static volatile int done;
393static volatile int signr = -1;
394static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000395
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300396static void sig_handler(int sig)
397{
398 if (sig == SIGCHLD)
399 child_finished = 1;
400 else
401 signr = sig;
402
403 done = 1;
404}
405
Wang Nana0748652016-11-26 07:03:28 +0000406static void sigsegv_handler(int sig)
407{
408 perf_hooks__recover();
409 sighandler_dump_stack(sig);
410}
411
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300412static void record__sig_exit(void)
413{
414 if (signr == -1)
415 return;
416
417 signal(signr, SIG_DFL);
418 raise(signr);
419}
420
Adrian Huntere31f0d02015-04-30 17:37:27 +0300421#ifdef HAVE_AUXTRACE_SUPPORT
422
Adrian Hunteref149c22015-04-09 18:53:45 +0300423static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200424 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300425 union perf_event *event, void *data1,
426 size_t len1, void *data2, size_t len2)
427{
428 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100429 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300430 size_t padding;
431 u8 pad[8] = {0};
432
Jiri Olsacd3dd8d2019-03-08 14:47:36 +0100433 if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300434 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100435 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300436 int err;
437
438 file_offset = lseek(fd, 0, SEEK_CUR);
439 if (file_offset == -1)
440 return -1;
441 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
442 event, file_offset);
443 if (err)
444 return err;
445 }
446
Adrian Hunteref149c22015-04-09 18:53:45 +0300447 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
448 padding = (len1 + len2) & 7;
449 if (padding)
450 padding = 8 - padding;
451
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200452 record__write(rec, map, event, event->header.size);
453 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300454 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200455 record__write(rec, map, data2, len2);
456 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300457
458 return 0;
459}
460
461static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200462 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300463{
464 int ret;
465
Jiri Olsae035f4c2018-09-13 14:54:05 +0200466 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300467 record__process_auxtrace);
468 if (ret < 0)
469 return ret;
470
471 if (ret)
472 rec->samples++;
473
474 return 0;
475}
476
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300477static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200478 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300479{
480 int ret;
481
Jiri Olsae035f4c2018-09-13 14:54:05 +0200482 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300483 record__process_auxtrace,
484 rec->opts.auxtrace_snapshot_size);
485 if (ret < 0)
486 return ret;
487
488 if (ret)
489 rec->samples++;
490
491 return 0;
492}
493
494static int record__auxtrace_read_snapshot_all(struct record *rec)
495{
496 int i;
497 int rc = 0;
498
499 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200500 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300501
Jiri Olsae035f4c2018-09-13 14:54:05 +0200502 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300503 continue;
504
Jiri Olsae035f4c2018-09-13 14:54:05 +0200505 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300506 rc = -1;
507 goto out;
508 }
509 }
510out:
511 return rc;
512}
513
514static void record__read_auxtrace_snapshot(struct record *rec)
515{
516 pr_debug("Recording AUX area tracing snapshot\n");
517 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000518 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300519 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000520 if (auxtrace_record__snapshot_finish(rec->itr))
521 trigger_error(&auxtrace_snapshot_trigger);
522 else
523 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300524 }
525}
526
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200527static int record__auxtrace_init(struct record *rec)
528{
529 int err;
530
531 if (!rec->itr) {
532 rec->itr = auxtrace_record__init(rec->evlist, &err);
533 if (err)
534 return err;
535 }
536
537 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
538 rec->opts.auxtrace_snapshot_opts);
539 if (err)
540 return err;
541
542 return auxtrace_parse_filters(rec->evlist);
543}
544
Adrian Huntere31f0d02015-04-30 17:37:27 +0300545#else
546
547static inline
548int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200549 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300550{
551 return 0;
552}
553
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300554static inline
555void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
556{
557}
558
559static inline
560int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
561{
562 return 0;
563}
564
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200565static int record__auxtrace_init(struct record *rec __maybe_unused)
566{
567 return 0;
568}
569
Adrian Huntere31f0d02015-04-30 17:37:27 +0300570#endif
571
Wang Nancda57a82016-06-27 10:24:03 +0000572static int record__mmap_evlist(struct record *rec,
573 struct perf_evlist *evlist)
574{
575 struct record_opts *opts = &rec->opts;
576 char msg[512];
577
Alexey Budankovf13de662019-01-22 20:50:57 +0300578 if (opts->affinity != PERF_AFFINITY_SYS)
579 cpu__setup_cpunode_map();
580
Wang Nan7a276ff2017-12-03 02:00:38 +0000581 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000582 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300583 opts->auxtrace_snapshot_mode,
Alexey Budankov470530b2019-03-18 20:40:26 +0300584 opts->nr_cblocks, opts->affinity,
585 opts->mmap_flush) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000586 if (errno == EPERM) {
587 pr_err("Permission error mapping pages.\n"
588 "Consider increasing "
589 "/proc/sys/kernel/perf_event_mlock_kb,\n"
590 "or try again with a smaller value of -m/--mmap_pages.\n"
591 "(current value: %u,%u)\n",
592 opts->mmap_pages, opts->auxtrace_mmap_pages);
593 return -errno;
594 } else {
595 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300596 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000597 if (errno)
598 return -errno;
599 else
600 return -EINVAL;
601 }
602 }
603 return 0;
604}
605
606static int record__mmap(struct record *rec)
607{
608 return record__mmap_evlist(rec, rec->evlist);
609}
610
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300611static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200612{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300613 char msg[BUFSIZ];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200614 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200615 struct perf_evlist *evlist = rec->evlist;
616 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300617 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600618 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200619
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300620 /*
621 * For initial_delay we need to add a dummy event so that we can track
622 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
623 * real events, the ones asked by the user.
624 */
625 if (opts->initial_delay) {
626 if (perf_evlist__add_dummy(evlist))
627 return -ENOMEM;
628
629 pos = perf_evlist__first(evlist);
630 pos->tracking = 0;
631 pos = perf_evlist__last(evlist);
632 pos->tracking = 1;
633 pos->attr.enable_on_exec = 1;
634 }
635
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300636 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100637
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300638 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200639try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400640 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300641 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900642 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300643 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300644 goto try_again;
645 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700646 if ((errno == EINVAL || errno == EBADF) &&
647 pos->leader != pos &&
648 pos->weak_group) {
649 pos = perf_evlist__reset_weak_group(evlist, pos);
650 goto try_again;
651 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300652 rc = -errno;
653 perf_evsel__open_strerror(pos, &opts->target,
654 errno, msg, sizeof(msg));
655 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600656 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300657 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800658
659 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800660 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200661
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300662 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300663 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300664 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300665 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600666 rc = -1;
667 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100668 }
669
Wang Nancda57a82016-06-27 10:24:03 +0000670 rc = record__mmap(rec);
671 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600672 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200673
Jiri Olsa563aecb2013-06-05 13:35:06 +0200674 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300675 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600676out:
677 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200678}
679
Namhyung Kime3d59112015-01-29 17:06:44 +0900680static int process_sample_event(struct perf_tool *tool,
681 union perf_event *event,
682 struct perf_sample *sample,
683 struct perf_evsel *evsel,
684 struct machine *machine)
685{
686 struct record *rec = container_of(tool, struct record, tool);
687
Jin Yao68588ba2017-12-08 21:13:42 +0800688 if (rec->evlist->first_sample_time == 0)
689 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900690
Jin Yao68588ba2017-12-08 21:13:42 +0800691 rec->evlist->last_sample_time = sample->time;
692
693 if (rec->buildid_all)
694 return 0;
695
696 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900697 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
698}
699
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300700static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200701{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200702 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200703
Jiri Olsa45112e82019-02-21 10:41:29 +0100704 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300705 return 0;
706
Namhyung Kim00dc8652014-11-04 10:14:32 +0900707 /*
708 * During this process, it'll load kernel map and replace the
709 * dso->long_name to a real pathname it found. In this case
710 * we prefer the vmlinux path like
711 * /lib/modules/3.16.4/build/vmlinux
712 *
713 * rather than build-id path (in debug directory).
714 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
715 */
716 symbol_conf.ignore_vmlinux_buildid = true;
717
Namhyung Kim61566812016-01-11 22:37:09 +0900718 /*
719 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800720 * so no need to process samples. But if timestamp_boundary is enabled,
721 * it still needs to walk on all samples to get the timestamps of
722 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900723 */
Jin Yao68588ba2017-12-08 21:13:42 +0800724 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900725 rec->tool.sample = NULL;
726
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300727 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200728}
729
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200730static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800731{
732 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200733 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800734 /*
735 *As for guest kernel when processing subcommand record&report,
736 *we arrange module mmap prior to guest kernel mmap and trigger
737 *a preload dso because default guest module symbols are loaded
738 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
739 *method is used to avoid symbol missing when the first addr is
740 *in module instead of in guest kernel.
741 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200742 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200743 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800744 if (err < 0)
745 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300746 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800747
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800748 /*
749 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
750 * have no _text sometimes.
751 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200752 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200753 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800754 if (err < 0)
755 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300756 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800757}
758
Frederic Weisbecker98402802010-05-02 22:05:29 +0200759static struct perf_event_header finished_round_event = {
760 .size = sizeof(struct perf_event_header),
761 .type = PERF_RECORD_FINISHED_ROUND,
762};
763
Alexey Budankovf13de662019-01-22 20:50:57 +0300764static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
765{
766 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
767 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
768 CPU_ZERO(&rec->affinity_mask);
769 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
770 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
771 }
772}
773
Wang Nana4ea0ec2016-07-14 08:34:36 +0000774static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +0300775 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200776{
Jiri Olsadcabb502014-07-25 16:56:16 +0200777 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200778 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600779 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000780 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300781 int trace_fd = rec->data.file.fd;
782 off_t off;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200783
Wang Nancb216862016-06-27 10:24:04 +0000784 if (!evlist)
785 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300786
Wang Nan0b72d692017-12-04 16:51:07 +0000787 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000788 if (!maps)
789 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000790
Wang Nan0b72d692017-12-04 16:51:07 +0000791 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000792 return 0;
793
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300794 if (record__aio_enabled(rec))
795 off = record__aio_get_pos(trace_fd);
796
Wang Nana4ea0ec2016-07-14 08:34:36 +0000797 for (i = 0; i < evlist->nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300798 u64 flush = 0;
Jiri Olsae035f4c2018-09-13 14:54:05 +0200799 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000800
Jiri Olsae035f4c2018-09-13 14:54:05 +0200801 if (map->base) {
Alexey Budankovf13de662019-01-22 20:50:57 +0300802 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +0300803 if (synch) {
804 flush = map->flush;
805 map->flush = 1;
806 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300807 if (!record__aio_enabled(rec)) {
808 if (perf_mmap__push(map, rec, record__pushfn) != 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300809 if (synch)
810 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300811 rc = -1;
812 goto out;
813 }
814 } else {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300815 int idx;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300816 /*
817 * Call record__aio_sync() to wait till map->data buffer
818 * becomes available after previous aio write request.
819 */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300820 idx = record__aio_sync(map, false);
821 if (perf_mmap__aio_push(map, rec, idx, record__aio_pushfn, &off) != 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300822 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +0300823 if (synch)
824 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300825 rc = -1;
826 goto out;
827 }
David Ahern8d3eca22012-08-26 12:24:47 -0600828 }
Alexey Budankov470530b2019-03-18 20:40:26 +0300829 if (synch)
830 map->flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -0600831 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300832
Jiri Olsae035f4c2018-09-13 14:54:05 +0200833 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
834 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300835 rc = -1;
836 goto out;
837 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200838 }
839
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300840 if (record__aio_enabled(rec))
841 record__aio_set_pos(trace_fd, off);
842
Jiri Olsadcabb502014-07-25 16:56:16 +0200843 /*
844 * Mark the round finished in case we wrote
845 * at least one event.
846 */
847 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200848 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600849
Wang Nan0b72d692017-12-04 16:51:07 +0000850 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +0000851 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600852out:
853 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200854}
855
Alexey Budankov470530b2019-03-18 20:40:26 +0300856static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +0000857{
858 int err;
859
Alexey Budankov470530b2019-03-18 20:40:26 +0300860 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +0000861 if (err)
862 return err;
863
Alexey Budankov470530b2019-03-18 20:40:26 +0300864 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +0000865}
866
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300867static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700868{
David Ahern57706ab2013-11-06 11:41:34 -0700869 struct perf_session *session = rec->session;
870 int feat;
871
872 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
873 perf_header__set_feat(&session->header, feat);
874
875 if (rec->no_buildid)
876 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
877
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300878 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700879 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
880
881 if (!rec->opts.branch_stack)
882 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300883
884 if (!rec->opts.full_auxtrace)
885 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100886
Alexey Budankovcf790512018-10-09 17:36:24 +0300887 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
888 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
889
Jiri Olsa258031c2019-03-08 14:47:39 +0100890 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
891
Jiri Olsaffa517a2015-10-25 15:51:43 +0100892 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -0700893}
894
Wang Nane1ab48b2016-02-26 09:32:10 +0000895static void
896record__finish_output(struct record *rec)
897{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100898 struct perf_data *data = &rec->data;
899 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +0000900
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100901 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +0000902 return;
903
904 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +0100905 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +0000906
907 if (!rec->no_buildid) {
908 process_buildids(rec);
909
910 if (rec->buildid_all)
911 dsos__hit_all(rec->session);
912 }
913 perf_session__write_header(rec->session, rec->evlist, fd, true);
914
915 return;
916}
917
Wang Nan4ea648a2016-07-14 08:34:47 +0000918static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +0000919{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300920 int err;
921 struct thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000922
Wang Nan4ea648a2016-07-14 08:34:47 +0000923 if (rec->opts.tail_synthesize != tail)
924 return 0;
925
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300926 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
927 if (thread_map == NULL)
928 return -1;
929
930 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +0000931 process_synthesized_event,
932 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -0800933 rec->opts.sample_address);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -0300934 thread_map__put(thread_map);
935 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +0000936}
937
Wang Nan4ea648a2016-07-14 08:34:47 +0000938static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000939
Wang Nanecfd7a92016-04-13 08:21:07 +0000940static int
941record__switch_output(struct record *rec, bool at_exit)
942{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100943 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +0000944 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -0700945 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +0000946
947 /* Same Size: "2015122520103046"*/
948 char timestamp[] = "InvalidTimestamp";
949
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300950 record__aio_mmap_read_sync(rec);
951
Wang Nan4ea648a2016-07-14 08:34:47 +0000952 record__synthesize(rec, true);
953 if (target__none(&rec->opts.target))
954 record__synthesize_workload(rec, true);
955
Wang Nanecfd7a92016-04-13 08:21:07 +0000956 rec->samples = 0;
957 record__finish_output(rec);
958 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
959 if (err) {
960 pr_err("Failed to get current timestamp\n");
961 return -EINVAL;
962 }
963
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100964 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +0000965 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -0700966 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +0000967 if (fd >= 0 && !at_exit) {
968 rec->bytes_written = 0;
969 rec->session->header.data_size = 0;
970 }
971
972 if (!quiet)
973 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +0100974 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000975
Andi Kleen03724b22019-03-14 15:49:55 -0700976 if (rec->switch_output.num_files) {
977 int n = rec->switch_output.cur_file + 1;
978
979 if (n >= rec->switch_output.num_files)
980 n = 0;
981 rec->switch_output.cur_file = n;
982 if (rec->switch_output.filenames[n]) {
983 remove(rec->switch_output.filenames[n]);
984 free(rec->switch_output.filenames[n]);
985 }
986 rec->switch_output.filenames[n] = new_filename;
987 } else {
988 free(new_filename);
989 }
990
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000991 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +0000992 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +0000993 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +0000994
Wang Nanbe7b0c92016-04-20 18:59:54 +0000995 /*
996 * In 'perf record --switch-output' without -a,
997 * record__synthesize() in record__switch_output() won't
998 * generate tracking events because there's no thread_map
999 * in evlist. Which causes newly created perf.data doesn't
1000 * contain map and comm information.
1001 * Create a fake thread_map and directly call
1002 * perf_event__synthesize_thread_map() for those events.
1003 */
1004 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001005 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001006 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001007 return fd;
1008}
1009
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001010static volatile int workload_exec_errno;
1011
1012/*
1013 * perf_evlist__prepare_workload will send a SIGUSR1
1014 * if the fork fails, since we asked by setting its
1015 * want_signal to true.
1016 */
Namhyung Kim45604712014-05-12 09:47:24 +09001017static void workload_exec_failed_signal(int signo __maybe_unused,
1018 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001019 void *ucontext __maybe_unused)
1020{
1021 workload_exec_errno = info->si_value.sival_int;
1022 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001023 child_finished = 1;
1024}
1025
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001026static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001027static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001028
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001029int __weak
1030perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
1031 struct perf_tool *tool __maybe_unused,
1032 perf_event__handler_t process __maybe_unused,
1033 struct machine *machine __maybe_unused)
1034{
1035 return 0;
1036}
1037
Wang Nanee667f92016-06-27 10:24:05 +00001038static const struct perf_event_mmap_page *
1039perf_evlist__pick_pc(struct perf_evlist *evlist)
1040{
Wang Nanb2cb6152016-07-14 08:34:39 +00001041 if (evlist) {
1042 if (evlist->mmap && evlist->mmap[0].base)
1043 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +00001044 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
1045 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001046 }
Wang Nanee667f92016-06-27 10:24:05 +00001047 return NULL;
1048}
1049
Wang Nanc45628b2016-05-24 02:28:59 +00001050static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1051{
Wang Nanee667f92016-06-27 10:24:05 +00001052 const struct perf_event_mmap_page *pc;
1053
1054 pc = perf_evlist__pick_pc(rec->evlist);
1055 if (pc)
1056 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001057 return NULL;
1058}
1059
Wang Nan4ea648a2016-07-14 08:34:47 +00001060static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001061{
1062 struct perf_session *session = rec->session;
1063 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001064 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001065 struct record_opts *opts = &rec->opts;
1066 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001067 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001068 int err = 0;
1069
Wang Nan4ea648a2016-07-14 08:34:47 +00001070 if (rec->opts.tail_synthesize != tail)
1071 return 0;
1072
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001073 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001074 /*
1075 * We need to synthesize events first, because some
1076 * features works on top of them (on report side).
1077 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001078 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001079 process_synthesized_event);
1080 if (err < 0) {
1081 pr_err("Couldn't synthesize attrs.\n");
1082 goto out;
1083 }
1084
Jiri Olsaa2015512018-03-14 10:22:04 +01001085 err = perf_event__synthesize_features(tool, session, rec->evlist,
1086 process_synthesized_event);
1087 if (err < 0) {
1088 pr_err("Couldn't synthesize features.\n");
1089 return err;
1090 }
1091
Wang Nanc45c86e2016-02-26 09:32:07 +00001092 if (have_tracepoints(&rec->evlist->entries)) {
1093 /*
1094 * FIXME err <= 0 here actually means that
1095 * there were no tracepoints so its not really
1096 * an error, just that we don't need to
1097 * synthesize anything. We really have to
1098 * return this more properly and also
1099 * propagate errors that now are calling die()
1100 */
1101 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1102 process_synthesized_event);
1103 if (err <= 0) {
1104 pr_err("Couldn't record tracing data.\n");
1105 goto out;
1106 }
1107 rec->bytes_written += err;
1108 }
1109 }
1110
Wang Nanc45628b2016-05-24 02:28:59 +00001111 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001112 process_synthesized_event, machine);
1113 if (err)
1114 goto out;
1115
Wang Nanc45c86e2016-02-26 09:32:07 +00001116 if (rec->opts.full_auxtrace) {
1117 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1118 session, process_synthesized_event);
1119 if (err)
1120 goto out;
1121 }
1122
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001123 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1124 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1125 machine);
1126 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1127 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1128 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001129
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001130 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1131 machine);
1132 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1133 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1134 "Check /proc/modules permission or run as root.\n");
1135 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001136
1137 if (perf_guest) {
1138 machines__process_guests(&session->machines,
1139 perf_event__synthesize_guest_os, tool);
1140 }
1141
Andi Kleenbfd8f722017-11-17 13:42:58 -08001142 err = perf_event__synthesize_extra_attr(&rec->tool,
1143 rec->evlist,
1144 process_synthesized_event,
1145 data->is_pipe);
1146 if (err)
1147 goto out;
1148
Andi Kleen373565d2017-11-17 13:42:59 -08001149 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
1150 process_synthesized_event,
1151 NULL);
1152 if (err < 0) {
1153 pr_err("Couldn't synthesize thread map.\n");
1154 return err;
1155 }
1156
1157 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
1158 process_synthesized_event, NULL);
1159 if (err < 0) {
1160 pr_err("Couldn't synthesize cpu map.\n");
1161 return err;
1162 }
1163
Song Liue5416952019-03-11 22:30:41 -07001164 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001165 machine, opts);
1166 if (err < 0)
1167 pr_warning("Couldn't synthesize bpf events.\n");
1168
Wang Nanc45c86e2016-02-26 09:32:07 +00001169 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
1170 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001171 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001172out:
1173 return err;
1174}
1175
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001176static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001177{
David Ahern57706ab2013-11-06 11:41:34 -07001178 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001179 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001180 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001181 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001182 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001183 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001184 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001185 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001186 bool disabled = false, draining = false;
Song Liu657ee552019-03-11 22:30:50 -07001187 struct perf_evlist *sb_evlist = NULL;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001188 int fd;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001189
Namhyung Kim45604712014-05-12 09:47:24 +09001190 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001191 signal(SIGCHLD, sig_handler);
1192 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001193 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001194 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001195
Hari Bathinif3b36142017-03-08 02:11:43 +05301196 if (rec->opts.record_namespaces)
1197 tool->namespace_events = true;
1198
Jiri Olsadc0c6122017-01-09 10:51:58 +01001199 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001200 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001201 if (rec->opts.auxtrace_snapshot_mode)
1202 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001203 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001204 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001205 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001206 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001207 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001208
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001209 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001210 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +09001211 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001212 return -1;
1213 }
1214
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001215 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001216 rec->session = session;
1217
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001218 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001219
Alexey Budankovcf790512018-10-09 17:36:24 +03001220 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1221 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1222
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001223 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001224 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001225 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001226 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001227 if (err < 0) {
1228 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001229 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001230 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001231 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001232 }
1233
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001234 /*
1235 * If we have just single event and are sending data
1236 * through pipe, we need to force the ids allocation,
1237 * because we synthesize event name through the pipe
1238 * and need the id for that.
1239 */
1240 if (data->is_pipe && rec->evlist->nr_entries == 1)
1241 rec->opts.sample_id = true;
1242
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001243 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001244 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001245 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001246 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001247
Wang Nan8690a2a2016-02-22 09:10:32 +00001248 err = bpf__apply_obj_config();
1249 if (err) {
1250 char errbuf[BUFSIZ];
1251
1252 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1253 pr_err("ERROR: Apply config to BPF failed: %s\n",
1254 errbuf);
1255 goto out_child;
1256 }
1257
Adrian Huntercca84822015-08-19 17:29:21 +03001258 /*
1259 * Normally perf_session__new would do this, but it doesn't have the
1260 * evlist.
1261 */
1262 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1263 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1264 rec->tool.ordered_events = false;
1265 }
1266
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001267 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001268 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1269
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001270 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001271 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001272 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001273 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001274 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001275 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001276 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001277 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001278 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001279
David Ahernd3665492012-02-06 15:27:52 -07001280 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001281 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001282 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001283 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001284 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001285 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001286 }
1287
Song Liud56354d2019-03-11 22:30:51 -07001288 if (!opts->no_bpf_event)
1289 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1290
Song Liu657ee552019-03-11 22:30:50 -07001291 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1292 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1293 opts->no_bpf_event = true;
1294 }
1295
Wang Nan4ea648a2016-07-14 08:34:47 +00001296 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001297 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001298 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001299
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001300 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001301 struct sched_param param;
1302
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001303 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001304 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001305 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001306 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001307 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001308 }
1309 }
1310
Jiri Olsa774cb492012-11-12 18:34:01 +01001311 /*
1312 * When perf is starting the traced process, all the events
1313 * (apart from group members) have enable_on_exec=1 set,
1314 * so don't spoil it by prematurely enabling them.
1315 */
Andi Kleen6619a532014-01-11 13:38:27 -08001316 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001317 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001318
Peter Zijlstra856e9662009-12-16 17:55:55 +01001319 /*
1320 * Let the child rip
1321 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001322 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001323 struct machine *machine = &session->machines.host;
Namhyung Kime5bed5642015-09-30 10:45:24 +09001324 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301325 pid_t tgid;
Namhyung Kime5bed5642015-09-30 10:45:24 +09001326
1327 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1328 if (event == NULL) {
1329 err = -ENOMEM;
1330 goto out_child;
1331 }
1332
Namhyung Kime803cf92015-09-22 09:24:55 +09001333 /*
1334 * Some H/W events are generated before COMM event
1335 * which is emitted during exec(), so perf script
1336 * cannot see a correct process name for those events.
1337 * Synthesize COMM event to prevent it.
1338 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301339 tgid = perf_event__synthesize_comm(tool, event,
1340 rec->evlist->workload.pid,
1341 process_synthesized_event,
1342 machine);
1343 free(event);
1344
1345 if (tgid == -1)
1346 goto out_child;
1347
1348 event = malloc(sizeof(event->namespaces) +
1349 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1350 machine->id_hdr_size);
1351 if (event == NULL) {
1352 err = -ENOMEM;
1353 goto out_child;
1354 }
1355
1356 /*
1357 * Synthesize NAMESPACES event for the command specified.
1358 */
1359 perf_event__synthesize_namespaces(tool, event,
1360 rec->evlist->workload.pid,
1361 tgid, process_synthesized_event,
1362 machine);
Namhyung Kime5bed5642015-09-30 10:45:24 +09001363 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001364
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001365 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001366 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001367
Andi Kleen6619a532014-01-11 13:38:27 -08001368 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001369 usleep(opts->initial_delay * USEC_PER_MSEC);
Andi Kleen6619a532014-01-11 13:38:27 -08001370 perf_evlist__enable(rec->evlist);
1371 }
1372
Wang Nan5f9cf592016-04-20 18:59:49 +00001373 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001374 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001375 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001376 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001377 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001378
Wang Nan057374642016-07-14 08:34:43 +00001379 /*
1380 * rec->evlist->bkw_mmap_state is possible to be
1381 * BKW_MMAP_EMPTY here: when done == true and
1382 * hits != rec->samples in previous round.
1383 *
1384 * perf_evlist__toggle_bkw_mmap ensure we never
1385 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1386 */
1387 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1388 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1389
Alexey Budankov470530b2019-03-18 20:40:26 +03001390 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001391 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001392 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001393 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001394 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001395 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001396
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001397 if (auxtrace_record__snapshot_started) {
1398 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001399 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001400 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +00001401 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001402 pr_err("AUX area tracing snapshot failed\n");
1403 err = -1;
1404 goto out_child;
1405 }
1406 }
1407
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001408 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001409 /*
1410 * If switch_output_trigger is hit, the data in
1411 * overwritable ring buffer should have been collected,
1412 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1413 *
1414 * If SIGUSR2 raise after or during record__mmap_read_all(),
1415 * record__mmap_read_all() didn't collect data from
1416 * overwritable ring buffer. Read again.
1417 */
1418 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1419 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001420 trigger_ready(&switch_output_trigger);
1421
Wang Nan057374642016-07-14 08:34:43 +00001422 /*
1423 * Reenable events in overwrite ring buffer after
1424 * record__mmap_read_all(): we should have collected
1425 * data from it.
1426 */
1427 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1428
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001429 if (!quiet)
1430 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1431 waking);
1432 waking = 0;
1433 fd = record__switch_output(rec, false);
1434 if (fd < 0) {
1435 pr_err("Failed to switch to new file\n");
1436 trigger_error(&switch_output_trigger);
1437 err = fd;
1438 goto out_child;
1439 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001440
1441 /* re-arm the alarm */
1442 if (rec->switch_output.time)
1443 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001444 }
1445
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001446 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001447 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001448 break;
Arnaldo Carvalho de Melof66a889d2014-08-18 17:25:59 -03001449 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001450 /*
1451 * Propagate error, only if there's any. Ignore positive
1452 * number of returned events and interrupt error.
1453 */
1454 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001455 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001456 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001457
1458 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1459 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001460 }
1461
Jiri Olsa774cb492012-11-12 18:34:01 +01001462 /*
1463 * When perf is starting the traced process, at the end events
1464 * die with the process and we wait for that. Thus no need to
1465 * disable events in this case.
1466 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001467 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001468 trigger_off(&auxtrace_snapshot_trigger);
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001469 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001470 disabled = true;
1471 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001472 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001473 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001474 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001475
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001476 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001477 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001478 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001479 pr_err("Workload failed: %s\n", emsg);
1480 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001481 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001482 }
1483
Namhyung Kime3d59112015-01-29 17:06:44 +09001484 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001485 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001486
Wang Nan4ea648a2016-07-14 08:34:47 +00001487 if (target__none(&rec->opts.target))
1488 record__synthesize_workload(rec, true);
1489
Namhyung Kim45604712014-05-12 09:47:24 +09001490out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001491 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001492 record__aio_mmap_read_sync(rec);
1493
Namhyung Kim45604712014-05-12 09:47:24 +09001494 if (forks) {
1495 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001496
Namhyung Kim45604712014-05-12 09:47:24 +09001497 if (!child_finished)
1498 kill(rec->evlist->workload.pid, SIGTERM);
1499
1500 wait(&exit_status);
1501
1502 if (err < 0)
1503 status = err;
1504 else if (WIFEXITED(exit_status))
1505 status = WEXITSTATUS(exit_status);
1506 else if (WIFSIGNALED(exit_status))
1507 signr = WTERMSIG(exit_status);
1508 } else
1509 status = err;
1510
Wang Nan4ea648a2016-07-14 08:34:47 +00001511 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001512 /* this will be recalculated during process_buildids() */
1513 rec->samples = 0;
1514
Wang Nanecfd7a92016-04-13 08:21:07 +00001515 if (!err) {
1516 if (!rec->timestamp_filename) {
1517 record__finish_output(rec);
1518 } else {
1519 fd = record__switch_output(rec, true);
1520 if (fd < 0) {
1521 status = fd;
1522 goto out_delete_session;
1523 }
1524 }
1525 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001526
Wang Nana0748652016-11-26 07:03:28 +00001527 perf_hooks__invoke_record_end();
1528
Namhyung Kime3d59112015-01-29 17:06:44 +09001529 if (!err && !quiet) {
1530 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001531 const char *postfix = rec->timestamp_filename ?
1532 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001533
Adrian Hunteref149c22015-04-09 18:53:45 +03001534 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001535 scnprintf(samples, sizeof(samples),
1536 " (%" PRIu64 " samples)", rec->samples);
1537 else
1538 samples[0] = '\0';
1539
Wang Nanecfd7a92016-04-13 08:21:07 +00001540 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001541 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001542 data->path, postfix, samples);
Namhyung Kime3d59112015-01-29 17:06:44 +09001543 }
1544
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001545out_delete_session:
1546 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001547
1548 if (!opts->no_bpf_event)
1549 perf_evlist__stop_sb_thread(sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001550 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001551}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001552
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001553static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001554{
Kan Liangaad2b212015-01-05 13:23:04 -05001555 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001556
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001557 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001558
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001559 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001560 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001561 callchain->dump_size);
1562}
1563
1564int record_opts__parse_callchain(struct record_opts *record,
1565 struct callchain_param *callchain,
1566 const char *arg, bool unset)
1567{
1568 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001569 callchain->enabled = !unset;
1570
1571 /* --no-call-graph */
1572 if (unset) {
1573 callchain->record_mode = CALLCHAIN_NONE;
1574 pr_debug("callchain: disabled\n");
1575 return 0;
1576 }
1577
1578 ret = parse_callchain_record_opt(arg, callchain);
1579 if (!ret) {
1580 /* Enable data address sampling for DWARF unwind. */
1581 if (callchain->record_mode == CALLCHAIN_DWARF)
1582 record->sample_address = true;
1583 callchain_debug(callchain);
1584 }
1585
1586 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001587}
1588
Kan Liangc421e802015-07-29 05:42:12 -04001589int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001590 const char *arg,
1591 int unset)
1592{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001593 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001594}
1595
Kan Liangc421e802015-07-29 05:42:12 -04001596int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001597 const char *arg __maybe_unused,
1598 int unset __maybe_unused)
1599{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001600 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001601
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001602 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001603
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001604 if (callchain->record_mode == CALLCHAIN_NONE)
1605 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001606
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001607 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001608 return 0;
1609}
1610
Jiri Olsaeb853e82014-02-03 12:44:42 +01001611static int perf_record_config(const char *var, const char *value, void *cb)
1612{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001613 struct record *rec = cb;
1614
1615 if (!strcmp(var, "record.build-id")) {
1616 if (!strcmp(value, "cache"))
1617 rec->no_buildid_cache = false;
1618 else if (!strcmp(value, "no-cache"))
1619 rec->no_buildid_cache = true;
1620 else if (!strcmp(value, "skip"))
1621 rec->no_buildid = true;
1622 else
1623 return -1;
1624 return 0;
1625 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001626 if (!strcmp(var, "record.call-graph")) {
1627 var = "call-graph.record-mode";
1628 return perf_default_config(var, value, cb);
1629 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001630#ifdef HAVE_AIO_SUPPORT
1631 if (!strcmp(var, "record.aio")) {
1632 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1633 if (!rec->opts.nr_cblocks)
1634 rec->opts.nr_cblocks = nr_cblocks_default;
1635 }
1636#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001637
Yisheng Xiecff17202018-03-12 19:25:57 +08001638 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001639}
1640
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001641struct clockid_map {
1642 const char *name;
1643 int clockid;
1644};
1645
1646#define CLOCKID_MAP(n, c) \
1647 { .name = n, .clockid = (c), }
1648
1649#define CLOCKID_END { .name = NULL, }
1650
1651
1652/*
1653 * Add the missing ones, we need to build on many distros...
1654 */
1655#ifndef CLOCK_MONOTONIC_RAW
1656#define CLOCK_MONOTONIC_RAW 4
1657#endif
1658#ifndef CLOCK_BOOTTIME
1659#define CLOCK_BOOTTIME 7
1660#endif
1661#ifndef CLOCK_TAI
1662#define CLOCK_TAI 11
1663#endif
1664
1665static const struct clockid_map clockids[] = {
1666 /* available for all events, NMI safe */
1667 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1668 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1669
1670 /* available for some events */
1671 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1672 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1673 CLOCKID_MAP("tai", CLOCK_TAI),
1674
1675 /* available for the lazy */
1676 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1677 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1678 CLOCKID_MAP("real", CLOCK_REALTIME),
1679 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1680
1681 CLOCKID_END,
1682};
1683
Alexey Budankovcf790512018-10-09 17:36:24 +03001684static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1685{
1686 struct timespec res;
1687
1688 *res_ns = 0;
1689 if (!clock_getres(clk_id, &res))
1690 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1691 else
1692 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1693
1694 return 0;
1695}
1696
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001697static int parse_clockid(const struct option *opt, const char *str, int unset)
1698{
1699 struct record_opts *opts = (struct record_opts *)opt->value;
1700 const struct clockid_map *cm;
1701 const char *ostr = str;
1702
1703 if (unset) {
1704 opts->use_clockid = 0;
1705 return 0;
1706 }
1707
1708 /* no arg passed */
1709 if (!str)
1710 return 0;
1711
1712 /* no setting it twice */
1713 if (opts->use_clockid)
1714 return -1;
1715
1716 opts->use_clockid = true;
1717
1718 /* if its a number, we're done */
1719 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001720 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001721
1722 /* allow a "CLOCK_" prefix to the name */
1723 if (!strncasecmp(str, "CLOCK_", 6))
1724 str += 6;
1725
1726 for (cm = clockids; cm->name; cm++) {
1727 if (!strcasecmp(str, cm->name)) {
1728 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001729 return get_clockid_res(opts->clockid,
1730 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001731 }
1732 }
1733
1734 opts->use_clockid = false;
1735 ui__warning("unknown clockid %s, check man page\n", ostr);
1736 return -1;
1737}
1738
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001739static int record__parse_affinity(const struct option *opt, const char *str, int unset)
1740{
1741 struct record_opts *opts = (struct record_opts *)opt->value;
1742
1743 if (unset || !str)
1744 return 0;
1745
1746 if (!strcasecmp(str, "node"))
1747 opts->affinity = PERF_AFFINITY_NODE;
1748 else if (!strcasecmp(str, "cpu"))
1749 opts->affinity = PERF_AFFINITY_CPU;
1750
1751 return 0;
1752}
1753
Adrian Huntere9db1312015-04-09 18:53:46 +03001754static int record__parse_mmap_pages(const struct option *opt,
1755 const char *str,
1756 int unset __maybe_unused)
1757{
1758 struct record_opts *opts = opt->value;
1759 char *s, *p;
1760 unsigned int mmap_pages;
1761 int ret;
1762
1763 if (!str)
1764 return -EINVAL;
1765
1766 s = strdup(str);
1767 if (!s)
1768 return -ENOMEM;
1769
1770 p = strchr(s, ',');
1771 if (p)
1772 *p = '\0';
1773
1774 if (*s) {
1775 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1776 if (ret)
1777 goto out_free;
1778 opts->mmap_pages = mmap_pages;
1779 }
1780
1781 if (!p) {
1782 ret = 0;
1783 goto out_free;
1784 }
1785
1786 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1787 if (ret)
1788 goto out_free;
1789
1790 opts->auxtrace_mmap_pages = mmap_pages;
1791
1792out_free:
1793 free(s);
1794 return ret;
1795}
1796
Jiri Olsa0c582442017-01-09 10:51:59 +01001797static void switch_output_size_warn(struct record *rec)
1798{
1799 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1800 struct switch_output *s = &rec->switch_output;
1801
1802 wakeup_size /= 2;
1803
1804 if (s->size < wakeup_size) {
1805 char buf[100];
1806
1807 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1808 pr_warning("WARNING: switch-output data size lower than "
1809 "wakeup kernel buffer size (%s) "
1810 "expect bigger perf.data sizes\n", buf);
1811 }
1812}
1813
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001814static int switch_output_setup(struct record *rec)
1815{
1816 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001817 static struct parse_tag tags_size[] = {
1818 { .tag = 'B', .mult = 1 },
1819 { .tag = 'K', .mult = 1 << 10 },
1820 { .tag = 'M', .mult = 1 << 20 },
1821 { .tag = 'G', .mult = 1 << 30 },
1822 { .tag = 0 },
1823 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001824 static struct parse_tag tags_time[] = {
1825 { .tag = 's', .mult = 1 },
1826 { .tag = 'm', .mult = 60 },
1827 { .tag = 'h', .mult = 60*60 },
1828 { .tag = 'd', .mult = 60*60*24 },
1829 { .tag = 0 },
1830 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01001831 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001832
1833 if (!s->set)
1834 return 0;
1835
1836 if (!strcmp(s->str, "signal")) {
1837 s->signal = true;
1838 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01001839 goto enabled;
1840 }
1841
1842 val = parse_tag_value(s->str, tags_size);
1843 if (val != (unsigned long) -1) {
1844 s->size = val;
1845 pr_debug("switch-output with %s size threshold\n", s->str);
1846 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001847 }
1848
Jiri Olsabfacbe32017-01-09 10:52:00 +01001849 val = parse_tag_value(s->str, tags_time);
1850 if (val != (unsigned long) -1) {
1851 s->time = val;
1852 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1853 s->str, s->time);
1854 goto enabled;
1855 }
1856
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001857 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001858
1859enabled:
1860 rec->timestamp_filename = true;
1861 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01001862
1863 if (s->size && !rec->opts.no_buffering)
1864 switch_output_size_warn(rec);
1865
Jiri Olsadc0c6122017-01-09 10:51:58 +01001866 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001867}
1868
Namhyung Kime5b2c202014-10-23 00:15:46 +09001869static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02001870 "perf record [<options>] [<command>]",
1871 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001872 NULL
1873};
Namhyung Kime5b2c202014-10-23 00:15:46 +09001874const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001875
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001876/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001877 * XXX Ideally would be local to cmd_record() and passed to a record__new
1878 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001879 * after cmd_record() exits, but since record_options need to be accessible to
1880 * builtin-script, leave it here.
1881 *
1882 * At least we don't ouch it in all the other functions here directly.
1883 *
1884 * Just say no to tons of global variables, sigh.
1885 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001886static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001887 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08001888 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001889 .mmap_pages = UINT_MAX,
1890 .user_freq = UINT_MAX,
1891 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03001892 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001893 .target = {
1894 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02001895 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001896 },
Alexey Budankov470530b2019-03-18 20:40:26 +03001897 .mmap_flush = MMAP_FLUSH_DEFAULT,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001898 },
Namhyung Kime3d59112015-01-29 17:06:44 +09001899 .tool = {
1900 .sample = process_sample_event,
1901 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03001902 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09001903 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05301904 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09001905 .mmap = perf_event__process_mmap,
1906 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03001907 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09001908 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001909};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02001910
Namhyung Kim76a26542015-10-22 23:28:32 +09001911const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1912 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03001913
Wang Nan0aab2132016-06-16 08:02:41 +00001914static bool dry_run;
1915
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001916/*
1917 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1918 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001919 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001920 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1921 * using pipes, etc.
1922 */
Jiri Olsaefd21302017-01-03 09:19:55 +01001923static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001924 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001925 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001926 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001927 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08001928 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00001929 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1930 NULL, "don't record events from perf itself",
1931 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09001932 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001933 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001934 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001935 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001936 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001937 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001938 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03001939 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001940 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02001941 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001942 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001943 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001944 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001945 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001946 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001947 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02001948 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001949 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1950 &record.opts.no_inherit_set,
1951 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00001952 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1953 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00001954 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07001955 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03001956 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
1957 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03001958 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
1959 "profile at this frequency",
1960 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03001961 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1962 "number of mmap data pages and AUX area tracing mmap pages",
1963 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03001964 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
1965 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
1966 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001967 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08001968 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001969 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001970 NULL, "enables call-graph recording" ,
1971 &record_callchain_opt),
1972 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09001973 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001974 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10001975 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02001976 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001977 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001978 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001979 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001980 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04001981 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
1982 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02001983 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03001984 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1985 &record.opts.sample_time_set,
1986 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01001987 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
1988 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001989 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001990 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00001991 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1992 &record.no_buildid_cache_set,
1993 "do not update the buildid cache"),
1994 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1995 &record.no_buildid_set,
1996 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001997 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02001998 "monitor event in cgroup name only",
1999 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002000 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002001 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002002 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2003 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002004
2005 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2006 "branch any", "sample any taken branches",
2007 parse_branch_stack),
2008
2009 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2010 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002011 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002012 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2013 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002014 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2015 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002016 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2017 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002018 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2019 "sample selected machine registers on interrupt,"
2020 " use -I ? to list register names", parse_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002021 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2022 "sample selected machine registers on interrupt,"
2023 " use -I ? to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002024 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2025 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002026 OPT_CALLBACK('k', "clockid", &record.opts,
2027 "clockid", "clockid to use for events, see clock_gettime()",
2028 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002029 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2030 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002031 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002032 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302033 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2034 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002035 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2036 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002037 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2038 "Configure all used events to run in kernel space.",
2039 PARSE_OPT_EXCLUSIVE),
2040 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2041 "Configure all used events to run in user space.",
2042 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00002043 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2044 "clang binary to use for compiling BPF scriptlets"),
2045 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2046 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002047 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2048 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002049 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2050 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002051 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2052 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002053 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2054 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002055 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002056 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2057 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002058 "signal"),
Andi Kleen03724b22019-03-14 15:49:55 -07002059 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2060 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002061 OPT_BOOLEAN(0, "dry-run", &dry_run,
2062 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002063#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002064 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2065 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002066 record__aio_parse),
2067#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002068 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2069 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2070 record__parse_affinity),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002071 OPT_END()
2072};
2073
Namhyung Kime5b2c202014-10-23 00:15:46 +09002074struct option *record_options = __record_options;
2075
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002076int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002077{
Adrian Hunteref149c22015-04-09 18:53:45 +03002078 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002079 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002080 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002081
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002082 setlocale(LC_ALL, "");
2083
Wang Nan48e1cab2015-12-14 10:39:22 +00002084#ifndef HAVE_LIBBPF_SUPPORT
2085# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2086 set_nobuild('\0', "clang-path", true);
2087 set_nobuild('\0', "clang-opt", true);
2088# undef set_nobuild
2089#endif
2090
He Kuang7efe0e02015-12-14 10:39:23 +00002091#ifndef HAVE_BPF_PROLOGUE
2092# if !defined (HAVE_DWARF_SUPPORT)
2093# define REASON "NO_DWARF=1"
2094# elif !defined (HAVE_LIBBPF_SUPPORT)
2095# define REASON "NO_LIBBPF=1"
2096# else
2097# define REASON "this architecture doesn't support BPF prologue"
2098# endif
2099# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2100 set_nobuild('\0', "vmlinux", true);
2101# undef set_nobuild
2102# undef REASON
2103#endif
2104
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002105 CPU_ZERO(&rec->affinity_mask);
2106 rec->opts.affinity = PERF_AFFINITY_SYS;
2107
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002108 rec->evlist = perf_evlist__new();
2109 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002110 return -ENOMEM;
2111
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002112 err = perf_config(perf_record_config, rec);
2113 if (err)
2114 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002115
Tom Zanussibca647a2010-11-10 08:11:30 -06002116 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002117 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002118 if (quiet)
2119 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002120
2121 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002122 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002123 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002124
Namhyung Kimbea03402012-04-26 14:15:15 +09002125 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002126 usage_with_options_msg(record_usage, record_options,
2127 "cgroup monitoring only available in system-wide mode");
2128
Stephane Eranian023695d2011-02-14 11:20:01 +02002129 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03002130 if (rec->opts.record_switch_events &&
2131 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002132 ui__error("kernel does not support recording context switch events\n");
2133 parse_options_usage(record_usage, record_options, "switch-events", 0);
2134 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002135 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002136
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002137 if (switch_output_setup(rec)) {
2138 parse_options_usage(record_usage, record_options, "switch-output", 0);
2139 return -EINVAL;
2140 }
2141
Jiri Olsabfacbe32017-01-09 10:52:00 +01002142 if (rec->switch_output.time) {
2143 signal(SIGALRM, alarm_sig_handler);
2144 alarm(rec->switch_output.time);
2145 }
2146
Andi Kleen03724b22019-03-14 15:49:55 -07002147 if (rec->switch_output.num_files) {
2148 rec->switch_output.filenames = calloc(sizeof(char *),
2149 rec->switch_output.num_files);
2150 if (!rec->switch_output.filenames)
2151 return -EINVAL;
2152 }
2153
Adrian Hunter1b36c032016-09-23 17:38:39 +03002154 /*
2155 * Allow aliases to facilitate the lookup of symbols for address
2156 * filters. Refer to auxtrace_parse_filters().
2157 */
2158 symbol_conf.allow_aliases = true;
2159
2160 symbol__init(NULL);
2161
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002162 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002163 if (err)
2164 goto out;
2165
Wang Nan0aab2132016-06-16 08:02:41 +00002166 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002167 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002168
Wang Nand7888572016-04-08 15:07:24 +00002169 err = bpf__setup_stdout(rec->evlist);
2170 if (err) {
2171 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2172 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2173 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002174 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002175 }
2176
Adrian Hunteref149c22015-04-09 18:53:45 +03002177 err = -ENOMEM;
2178
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002179 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002180 pr_warning(
2181"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
2182"check /proc/sys/kernel/kptr_restrict.\n\n"
2183"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2184"file is not found in the buildid cache or in the vmlinux path.\n\n"
2185"Samples in kernel modules won't be resolved at all.\n\n"
2186"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2187"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002188
Wang Nan0c1d46a2016-04-20 18:59:52 +00002189 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002190 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002191 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002192 /*
2193 * In 'perf record --switch-output', disable buildid
2194 * generation by default to reduce data file switching
2195 * overhead. Still generate buildid if they are required
2196 * explicitly using
2197 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002198 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002199 * --no-no-buildid-cache
2200 *
2201 * Following code equals to:
2202 *
2203 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2204 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2205 * disable_buildid_cache();
2206 */
2207 bool disable = true;
2208
2209 if (rec->no_buildid_set && !rec->no_buildid)
2210 disable = false;
2211 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2212 disable = false;
2213 if (disable) {
2214 rec->no_buildid = true;
2215 rec->no_buildid_cache = true;
2216 disable_buildid_cache();
2217 }
2218 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002219
Wang Nan4ea648a2016-07-14 08:34:47 +00002220 if (record.opts.overwrite)
2221 record.opts.tail_synthesize = true;
2222
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002223 if (rec->evlist->nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002224 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002225 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002226 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002227 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002228
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002229 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2230 rec->opts.no_inherit = true;
2231
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002232 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002233 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002234 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002235 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002236 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002237
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002238 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002239 if (err) {
2240 int saved_errno = errno;
2241
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002242 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002243 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002244
2245 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002246 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002247 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002248
Mengting Zhangca800062017-12-13 15:01:53 +08002249 /* Enable ignoring missing threads when -u/-p option is defined. */
2250 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002251
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002252 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002253 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002254 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002255
Adrian Hunteref149c22015-04-09 18:53:45 +03002256 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2257 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002258 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002259
Namhyung Kim61566812016-01-11 22:37:09 +09002260 /*
2261 * We take all buildids when the file contains
2262 * AUX area tracing data because we do not decode the
2263 * trace because it would take too long.
2264 */
2265 if (rec->opts.full_auxtrace)
2266 rec->buildid_all = true;
2267
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002268 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002269 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002270 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002271 }
2272
Alexey Budankov93f20c02018-11-06 12:07:19 +03002273 if (rec->opts.nr_cblocks > nr_cblocks_max)
2274 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002275 if (verbose > 0)
2276 pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
2277
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002278 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002279 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002280
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002281 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002282out:
Namhyung Kim45604712014-05-12 09:47:24 +09002283 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002284 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002285 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002286 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002287}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002288
2289static void snapshot_sig_handler(int sig __maybe_unused)
2290{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002291 struct record *rec = &record;
2292
Wang Nan5f9cf592016-04-20 18:59:49 +00002293 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2294 trigger_hit(&auxtrace_snapshot_trigger);
2295 auxtrace_record__snapshot_started = 1;
2296 if (auxtrace_record__snapshot_start(record.itr))
2297 trigger_error(&auxtrace_snapshot_trigger);
2298 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002299
Jiri Olsadc0c6122017-01-09 10:51:58 +01002300 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002301 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002302}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002303
2304static void alarm_sig_handler(int sig __maybe_unused)
2305{
2306 struct record *rec = &record;
2307
2308 if (switch_output_time(rec))
2309 trigger_hit(&switch_output_trigger);
2310}