blob: d31d7a5a1be377f85c60a812a261d36b6b13b153 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
11#include "perf.h"
12
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020013#include "util/build-id.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060014#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020015#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090016#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020017
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030018#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030019#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020020#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020021#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020022#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020023#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020024#include "util/debug.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020025#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020026#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020027#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110028#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020029#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020030#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020031#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030032#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020033#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070034#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020035#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000036#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000037#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000038#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000039#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030040#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030041#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030042#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080043#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000044#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020045
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030046#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030047#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030048#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030049#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020050#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020051#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030052#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030053#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030054#include <sys/wait.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030055#include <linux/time64.h>
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030056#include <linux/zalloc.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030057
Jiri Olsa1b43b702017-01-09 10:51:56 +010058struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010059 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010060 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010061 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010062 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010063 const char *str;
64 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070065 char **filenames;
66 int num_files;
67 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010068};
69
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030070struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020071 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030072 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020073 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010074 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030075 struct auxtrace_record *itr;
Jiri Olsa63503db2019-07-21 13:23:52 +020076 struct evlist *evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020077 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020078 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020079 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000080 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020081 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000082 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090083 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000084 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080085 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010086 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070087 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030088 cpu_set_t affinity_mask;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020089};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020090
Jiri Olsadc0c6122017-01-09 10:51:58 +010091static volatile int auxtrace_record__snapshot_started;
92static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
93static DEFINE_TRIGGER(switch_output_trigger);
94
Alexey Budankov9d2ed642019-01-22 20:47:43 +030095static const char *affinity_tags[PERF_AFFINITY_MAX] = {
96 "SYS", "NODE", "CPU"
97};
98
Jiri Olsadc0c6122017-01-09 10:51:58 +010099static bool switch_output_signal(struct record *rec)
100{
101 return rec->switch_output.signal &&
102 trigger_is_ready(&switch_output_trigger);
103}
104
105static bool switch_output_size(struct record *rec)
106{
107 return rec->switch_output.size &&
108 trigger_is_ready(&switch_output_trigger) &&
109 (rec->bytes_written >= rec->switch_output.size);
110}
111
Jiri Olsabfacbe32017-01-09 10:52:00 +0100112static bool switch_output_time(struct record *rec)
113{
114 return rec->switch_output.time &&
115 trigger_is_ready(&switch_output_trigger);
116}
117
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200118static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
119 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200120{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200121 struct perf_data_file *file = &rec->session->data->file;
122
123 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100124 pr_err("failed to write perf data, error: %m\n");
125 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200126 }
David Ahern8d3eca22012-08-26 12:24:47 -0600127
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300128 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100129
130 if (switch_output_size(rec))
131 trigger_hit(&switch_output_trigger);
132
David Ahern8d3eca22012-08-26 12:24:47 -0600133 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200134}
135
Alexey Budankovef781122019-03-18 20:44:12 +0300136static int record__aio_enabled(struct record *rec);
137static int record__comp_enabled(struct record *rec);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300138static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
139 void *src, size_t src_size);
140
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300141#ifdef HAVE_AIO_SUPPORT
142static int record__aio_write(struct aiocb *cblock, int trace_fd,
143 void *buf, size_t size, off_t off)
144{
145 int rc;
146
147 cblock->aio_fildes = trace_fd;
148 cblock->aio_buf = buf;
149 cblock->aio_nbytes = size;
150 cblock->aio_offset = off;
151 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
152
153 do {
154 rc = aio_write(cblock);
155 if (rc == 0) {
156 break;
157 } else if (errno != EAGAIN) {
158 cblock->aio_fildes = -1;
159 pr_err("failed to queue perf data, error: %m\n");
160 break;
161 }
162 } while (1);
163
164 return rc;
165}
166
167static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
168{
169 void *rem_buf;
170 off_t rem_off;
171 size_t rem_size;
172 int rc, aio_errno;
173 ssize_t aio_ret, written;
174
175 aio_errno = aio_error(cblock);
176 if (aio_errno == EINPROGRESS)
177 return 0;
178
179 written = aio_ret = aio_return(cblock);
180 if (aio_ret < 0) {
181 if (aio_errno != EINTR)
182 pr_err("failed to write perf data, error: %m\n");
183 written = 0;
184 }
185
186 rem_size = cblock->aio_nbytes - written;
187
188 if (rem_size == 0) {
189 cblock->aio_fildes = -1;
190 /*
Alexey Budankovef781122019-03-18 20:44:12 +0300191 * md->refcount is incremented in record__aio_pushfn() for
192 * every aio write request started in record__aio_push() so
193 * decrement it because the request is now complete.
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300194 */
195 perf_mmap__put(md);
196 rc = 1;
197 } else {
198 /*
199 * aio write request may require restart with the
200 * reminder if the kernel didn't write whole
201 * chunk at once.
202 */
203 rem_off = cblock->aio_offset + written;
204 rem_buf = (void *)(cblock->aio_buf + written);
205 record__aio_write(cblock, cblock->aio_fildes,
206 rem_buf, rem_size, rem_off);
207 rc = 0;
208 }
209
210 return rc;
211}
212
Alexey Budankov93f20c02018-11-06 12:07:19 +0300213static int record__aio_sync(struct perf_mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300214{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300215 struct aiocb **aiocb = md->aio.aiocb;
216 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300217 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300218 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300219
220 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300221 do_suspend = 0;
222 for (i = 0; i < md->aio.nr_cblocks; ++i) {
223 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
224 if (sync_all)
225 aiocb[i] = NULL;
226 else
227 return i;
228 } else {
229 /*
230 * Started aio write is not complete yet
231 * so it has to be waited before the
232 * next allocation.
233 */
234 aiocb[i] = &cblocks[i];
235 do_suspend = 1;
236 }
237 }
238 if (!do_suspend)
239 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300240
Alexey Budankov93f20c02018-11-06 12:07:19 +0300241 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300242 if (!(errno == EAGAIN || errno == EINTR))
243 pr_err("failed to sync perf data, error: %m\n");
244 }
245 } while (1);
246}
247
Alexey Budankovef781122019-03-18 20:44:12 +0300248struct record_aio {
249 struct record *rec;
250 void *data;
251 size_t size;
252};
253
254static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t size)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300255{
Alexey Budankovef781122019-03-18 20:44:12 +0300256 struct record_aio *aio = to;
257
258 /*
259 * map->base data pointed by buf is copied into free map->aio.data[] buffer
260 * to release space in the kernel buffer as fast as possible, calling
261 * perf_mmap__consume() from perf_mmap__push() function.
262 *
263 * That lets the kernel to proceed with storing more profiling data into
264 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
265 *
266 * Coping can be done in two steps in case the chunk of profiling data
267 * crosses the upper bound of the kernel buffer. In this case we first move
268 * part of data from map->start till the upper bound and then the reminder
269 * from the beginning of the kernel buffer till the end of the data chunk.
270 */
271
272 if (record__comp_enabled(aio->rec)) {
273 size = zstd_compress(aio->rec->session, aio->data + aio->size,
274 perf_mmap__mmap_len(map) - aio->size,
275 buf, size);
276 } else {
277 memcpy(aio->data + aio->size, buf, size);
278 }
279
280 if (!aio->size) {
281 /*
282 * Increment map->refcount to guard map->aio.data[] buffer
283 * from premature deallocation because map object can be
284 * released earlier than aio write request started on
285 * map->aio.data[] buffer is complete.
286 *
287 * perf_mmap__put() is done at record__aio_complete()
288 * after started aio request completion or at record__aio_push()
289 * if the request failed to start.
290 */
291 perf_mmap__get(map);
292 }
293
294 aio->size += size;
295
296 return size;
297}
298
299static int record__aio_push(struct record *rec, struct perf_mmap *map, off_t *off)
300{
301 int ret, idx;
302 int trace_fd = rec->session->data->file.fd;
303 struct record_aio aio = { .rec = rec, .size = 0 };
304
305 /*
306 * Call record__aio_sync() to wait till map->aio.data[] buffer
307 * becomes available after previous aio write operation.
308 */
309
310 idx = record__aio_sync(map, false);
311 aio.data = map->aio.data[idx];
312 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
313 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
314 return ret;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300315
316 rec->samples++;
Alexey Budankovef781122019-03-18 20:44:12 +0300317 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300318 if (!ret) {
Alexey Budankovef781122019-03-18 20:44:12 +0300319 *off += aio.size;
320 rec->bytes_written += aio.size;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300321 if (switch_output_size(rec))
322 trigger_hit(&switch_output_trigger);
Alexey Budankovef781122019-03-18 20:44:12 +0300323 } else {
324 /*
325 * Decrement map->refcount incremented in record__aio_pushfn()
326 * back if record__aio_write() operation failed to start, otherwise
327 * map->refcount is decremented in record__aio_complete() after
328 * aio write operation finishes successfully.
329 */
330 perf_mmap__put(map);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300331 }
332
333 return ret;
334}
335
336static off_t record__aio_get_pos(int trace_fd)
337{
338 return lseek(trace_fd, 0, SEEK_CUR);
339}
340
341static void record__aio_set_pos(int trace_fd, off_t pos)
342{
343 lseek(trace_fd, pos, SEEK_SET);
344}
345
346static void record__aio_mmap_read_sync(struct record *rec)
347{
348 int i;
Jiri Olsa63503db2019-07-21 13:23:52 +0200349 struct evlist *evlist = rec->evlist;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300350 struct perf_mmap *maps = evlist->mmap;
351
Alexey Budankovef781122019-03-18 20:44:12 +0300352 if (!record__aio_enabled(rec))
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300353 return;
354
355 for (i = 0; i < evlist->nr_mmaps; i++) {
356 struct perf_mmap *map = &maps[i];
357
358 if (map->base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300359 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300360 }
361}
362
363static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300364static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300365
366static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300367 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300368 int unset)
369{
370 struct record_opts *opts = (struct record_opts *)opt->value;
371
Alexey Budankov93f20c02018-11-06 12:07:19 +0300372 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300373 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300374 } else {
375 if (str)
376 opts->nr_cblocks = strtol(str, NULL, 0);
377 if (!opts->nr_cblocks)
378 opts->nr_cblocks = nr_cblocks_default;
379 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300380
381 return 0;
382}
383#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300384static int nr_cblocks_max = 0;
385
Alexey Budankovef781122019-03-18 20:44:12 +0300386static int record__aio_push(struct record *rec __maybe_unused, struct perf_mmap *map __maybe_unused,
387 off_t *off __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300388{
389 return -1;
390}
391
392static off_t record__aio_get_pos(int trace_fd __maybe_unused)
393{
394 return -1;
395}
396
397static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
398{
399}
400
401static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
402{
403}
404#endif
405
406static int record__aio_enabled(struct record *rec)
407{
408 return rec->opts.nr_cblocks > 0;
409}
410
Alexey Budankov470530b2019-03-18 20:40:26 +0300411#define MMAP_FLUSH_DEFAULT 1
412static int record__mmap_flush_parse(const struct option *opt,
413 const char *str,
414 int unset)
415{
416 int flush_max;
417 struct record_opts *opts = (struct record_opts *)opt->value;
418 static struct parse_tag tags[] = {
419 { .tag = 'B', .mult = 1 },
420 { .tag = 'K', .mult = 1 << 10 },
421 { .tag = 'M', .mult = 1 << 20 },
422 { .tag = 'G', .mult = 1 << 30 },
423 { .tag = 0 },
424 };
425
426 if (unset)
427 return 0;
428
429 if (str) {
430 opts->mmap_flush = parse_tag_value(str, tags);
431 if (opts->mmap_flush == (int)-1)
432 opts->mmap_flush = strtol(str, NULL, 0);
433 }
434
435 if (!opts->mmap_flush)
436 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
437
438 flush_max = perf_evlist__mmap_size(opts->mmap_pages);
439 flush_max /= 4;
440 if (opts->mmap_flush > flush_max)
441 opts->mmap_flush = flush_max;
442
443 return 0;
444}
445
Alexey Budankov504c1ad2019-03-18 20:44:42 +0300446#ifdef HAVE_ZSTD_SUPPORT
447static unsigned int comp_level_default = 1;
448
449static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
450{
451 struct record_opts *opts = opt->value;
452
453 if (unset) {
454 opts->comp_level = 0;
455 } else {
456 if (str)
457 opts->comp_level = strtol(str, NULL, 0);
458 if (!opts->comp_level)
459 opts->comp_level = comp_level_default;
460 }
461
462 return 0;
463}
464#endif
Alexey Budankov51255a82019-03-18 20:42:19 +0300465static unsigned int comp_level_max = 22;
466
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300467static int record__comp_enabled(struct record *rec)
468{
469 return rec->opts.comp_level > 0;
470}
471
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200472static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200473 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300474 struct perf_sample *sample __maybe_unused,
475 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200476{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300477 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200478 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200479}
480
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200481static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300482{
483 struct record *rec = to;
484
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300485 if (record__comp_enabled(rec)) {
486 size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size);
487 bf = map->data;
488 }
489
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300490 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200491 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300492}
493
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300494static volatile int done;
495static volatile int signr = -1;
496static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000497
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300498static void sig_handler(int sig)
499{
500 if (sig == SIGCHLD)
501 child_finished = 1;
502 else
503 signr = sig;
504
505 done = 1;
506}
507
Wang Nana0748652016-11-26 07:03:28 +0000508static void sigsegv_handler(int sig)
509{
510 perf_hooks__recover();
511 sighandler_dump_stack(sig);
512}
513
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300514static void record__sig_exit(void)
515{
516 if (signr == -1)
517 return;
518
519 signal(signr, SIG_DFL);
520 raise(signr);
521}
522
Adrian Huntere31f0d02015-04-30 17:37:27 +0300523#ifdef HAVE_AUXTRACE_SUPPORT
524
Adrian Hunteref149c22015-04-09 18:53:45 +0300525static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200526 struct perf_mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300527 union perf_event *event, void *data1,
528 size_t len1, void *data2, size_t len2)
529{
530 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100531 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300532 size_t padding;
533 u8 pad[8] = {0};
534
Jiri Olsacd3dd8d2019-03-08 14:47:36 +0100535 if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300536 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100537 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300538 int err;
539
540 file_offset = lseek(fd, 0, SEEK_CUR);
541 if (file_offset == -1)
542 return -1;
543 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
544 event, file_offset);
545 if (err)
546 return err;
547 }
548
Adrian Hunteref149c22015-04-09 18:53:45 +0300549 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
550 padding = (len1 + len2) & 7;
551 if (padding)
552 padding = 8 - padding;
553
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200554 record__write(rec, map, event, event->header.size);
555 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300556 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200557 record__write(rec, map, data2, len2);
558 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300559
560 return 0;
561}
562
563static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200564 struct perf_mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300565{
566 int ret;
567
Jiri Olsae035f4c2018-09-13 14:54:05 +0200568 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300569 record__process_auxtrace);
570 if (ret < 0)
571 return ret;
572
573 if (ret)
574 rec->samples++;
575
576 return 0;
577}
578
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300579static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200580 struct perf_mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300581{
582 int ret;
583
Jiri Olsae035f4c2018-09-13 14:54:05 +0200584 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300585 record__process_auxtrace,
586 rec->opts.auxtrace_snapshot_size);
587 if (ret < 0)
588 return ret;
589
590 if (ret)
591 rec->samples++;
592
593 return 0;
594}
595
596static int record__auxtrace_read_snapshot_all(struct record *rec)
597{
598 int i;
599 int rc = 0;
600
601 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Jiri Olsae035f4c2018-09-13 14:54:05 +0200602 struct perf_mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300603
Jiri Olsae035f4c2018-09-13 14:54:05 +0200604 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300605 continue;
606
Jiri Olsae035f4c2018-09-13 14:54:05 +0200607 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300608 rc = -1;
609 goto out;
610 }
611 }
612out:
613 return rc;
614}
615
616static void record__read_auxtrace_snapshot(struct record *rec)
617{
618 pr_debug("Recording AUX area tracing snapshot\n");
619 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000620 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300621 } else {
Wang Nan5f9cf592016-04-20 18:59:49 +0000622 if (auxtrace_record__snapshot_finish(rec->itr))
623 trigger_error(&auxtrace_snapshot_trigger);
624 else
625 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300626 }
627}
628
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200629static int record__auxtrace_init(struct record *rec)
630{
631 int err;
632
633 if (!rec->itr) {
634 rec->itr = auxtrace_record__init(rec->evlist, &err);
635 if (err)
636 return err;
637 }
638
639 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
640 rec->opts.auxtrace_snapshot_opts);
641 if (err)
642 return err;
643
644 return auxtrace_parse_filters(rec->evlist);
645}
646
Adrian Huntere31f0d02015-04-30 17:37:27 +0300647#else
648
649static inline
650int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsae035f4c2018-09-13 14:54:05 +0200651 struct perf_mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300652{
653 return 0;
654}
655
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300656static inline
657void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
658{
659}
660
661static inline
662int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
663{
664 return 0;
665}
666
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200667static int record__auxtrace_init(struct record *rec __maybe_unused)
668{
669 return 0;
670}
671
Adrian Huntere31f0d02015-04-30 17:37:27 +0300672#endif
673
Wang Nancda57a82016-06-27 10:24:03 +0000674static int record__mmap_evlist(struct record *rec,
Jiri Olsa63503db2019-07-21 13:23:52 +0200675 struct evlist *evlist)
Wang Nancda57a82016-06-27 10:24:03 +0000676{
677 struct record_opts *opts = &rec->opts;
678 char msg[512];
679
Alexey Budankovf13de662019-01-22 20:50:57 +0300680 if (opts->affinity != PERF_AFFINITY_SYS)
681 cpu__setup_cpunode_map();
682
Wang Nan7a276ff2017-12-03 02:00:38 +0000683 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000684 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300685 opts->auxtrace_snapshot_mode,
Alexey Budankov470530b2019-03-18 20:40:26 +0300686 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +0300687 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000688 if (errno == EPERM) {
689 pr_err("Permission error mapping pages.\n"
690 "Consider increasing "
691 "/proc/sys/kernel/perf_event_mlock_kb,\n"
692 "or try again with a smaller value of -m/--mmap_pages.\n"
693 "(current value: %u,%u)\n",
694 opts->mmap_pages, opts->auxtrace_mmap_pages);
695 return -errno;
696 } else {
697 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300698 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000699 if (errno)
700 return -errno;
701 else
702 return -EINVAL;
703 }
704 }
705 return 0;
706}
707
708static int record__mmap(struct record *rec)
709{
710 return record__mmap_evlist(rec, rec->evlist);
711}
712
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300713static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200714{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300715 char msg[BUFSIZ];
Jiri Olsa32dcd022019-07-21 13:23:51 +0200716 struct evsel *pos;
Jiri Olsa63503db2019-07-21 13:23:52 +0200717 struct evlist *evlist = rec->evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200718 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300719 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600720 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200721
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300722 /*
723 * For initial_delay we need to add a dummy event so that we can track
724 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
725 * real events, the ones asked by the user.
726 */
727 if (opts->initial_delay) {
728 if (perf_evlist__add_dummy(evlist))
729 return -ENOMEM;
730
731 pos = perf_evlist__first(evlist);
732 pos->tracking = 0;
733 pos = perf_evlist__last(evlist);
734 pos->tracking = 1;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200735 pos->core.attr.enable_on_exec = 1;
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300736 }
737
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300738 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100739
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300740 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200741try_again:
Jiri Olsaaf663bd2019-07-21 13:24:39 +0200742 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300743 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900744 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300745 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300746 goto try_again;
747 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700748 if ((errno == EINVAL || errno == EBADF) &&
749 pos->leader != pos &&
750 pos->weak_group) {
751 pos = perf_evlist__reset_weak_group(evlist, pos);
752 goto try_again;
753 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300754 rc = -errno;
755 perf_evsel__open_strerror(pos, &opts->target,
756 errno, msg, sizeof(msg));
757 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600758 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300759 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800760
761 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800762 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200763
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300764 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300765 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300766 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300767 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600768 rc = -1;
769 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100770 }
771
Wang Nancda57a82016-06-27 10:24:03 +0000772 rc = record__mmap(rec);
773 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600774 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200775
Jiri Olsa563aecb2013-06-05 13:35:06 +0200776 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300777 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600778out:
779 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200780}
781
Namhyung Kime3d59112015-01-29 17:06:44 +0900782static int process_sample_event(struct perf_tool *tool,
783 union perf_event *event,
784 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200785 struct evsel *evsel,
Namhyung Kime3d59112015-01-29 17:06:44 +0900786 struct machine *machine)
787{
788 struct record *rec = container_of(tool, struct record, tool);
789
Jin Yao68588ba2017-12-08 21:13:42 +0800790 if (rec->evlist->first_sample_time == 0)
791 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900792
Jin Yao68588ba2017-12-08 21:13:42 +0800793 rec->evlist->last_sample_time = sample->time;
794
795 if (rec->buildid_all)
796 return 0;
797
798 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900799 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
800}
801
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300802static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200803{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200804 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200805
Jiri Olsa45112e82019-02-21 10:41:29 +0100806 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300807 return 0;
808
Namhyung Kim00dc8652014-11-04 10:14:32 +0900809 /*
810 * During this process, it'll load kernel map and replace the
811 * dso->long_name to a real pathname it found. In this case
812 * we prefer the vmlinux path like
813 * /lib/modules/3.16.4/build/vmlinux
814 *
815 * rather than build-id path (in debug directory).
816 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
817 */
818 symbol_conf.ignore_vmlinux_buildid = true;
819
Namhyung Kim61566812016-01-11 22:37:09 +0900820 /*
821 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800822 * so no need to process samples. But if timestamp_boundary is enabled,
823 * it still needs to walk on all samples to get the timestamps of
824 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900825 */
Jin Yao68588ba2017-12-08 21:13:42 +0800826 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900827 rec->tool.sample = NULL;
828
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300829 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200830}
831
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200832static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800833{
834 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200835 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800836 /*
837 *As for guest kernel when processing subcommand record&report,
838 *we arrange module mmap prior to guest kernel mmap and trigger
839 *a preload dso because default guest module symbols are loaded
840 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
841 *method is used to avoid symbol missing when the first addr is
842 *in module instead of in guest kernel.
843 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200844 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200845 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800846 if (err < 0)
847 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300848 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800849
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800850 /*
851 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
852 * have no _text sometimes.
853 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200854 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200855 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800856 if (err < 0)
857 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300858 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800859}
860
Frederic Weisbecker98402802010-05-02 22:05:29 +0200861static struct perf_event_header finished_round_event = {
862 .size = sizeof(struct perf_event_header),
863 .type = PERF_RECORD_FINISHED_ROUND,
864};
865
Alexey Budankovf13de662019-01-22 20:50:57 +0300866static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
867{
868 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
869 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
870 CPU_ZERO(&rec->affinity_mask);
871 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
872 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
873 }
874}
875
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300876static size_t process_comp_header(void *record, size_t increment)
877{
878 struct compressed_event *event = record;
879 size_t size = sizeof(*event);
880
881 if (increment) {
882 event->header.size += increment;
883 return increment;
884 }
885
886 event->header.type = PERF_RECORD_COMPRESSED;
887 event->header.size = size;
888
889 return size;
890}
891
892static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
893 void *src, size_t src_size)
894{
895 size_t compressed;
896 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct compressed_event) - 1;
897
898 compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
899 max_record_size, process_comp_header);
900
901 session->bytes_transferred += src_size;
902 session->bytes_compressed += compressed;
903
904 return compressed;
905}
906
Jiri Olsa63503db2019-07-21 13:23:52 +0200907static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +0300908 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200909{
Jiri Olsadcabb502014-07-25 16:56:16 +0200910 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200911 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600912 int rc = 0;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000913 struct perf_mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300914 int trace_fd = rec->data.file.fd;
Alexey Budankovef781122019-03-18 20:44:12 +0300915 off_t off = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200916
Wang Nancb216862016-06-27 10:24:04 +0000917 if (!evlist)
918 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +0300919
Wang Nan0b72d692017-12-04 16:51:07 +0000920 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +0000921 if (!maps)
922 return 0;
Wang Nancb216862016-06-27 10:24:04 +0000923
Wang Nan0b72d692017-12-04 16:51:07 +0000924 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +0000925 return 0;
926
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300927 if (record__aio_enabled(rec))
928 off = record__aio_get_pos(trace_fd);
929
Wang Nana4ea0ec2016-07-14 08:34:36 +0000930 for (i = 0; i < evlist->nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300931 u64 flush = 0;
Jiri Olsae035f4c2018-09-13 14:54:05 +0200932 struct perf_mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +0000933
Jiri Olsae035f4c2018-09-13 14:54:05 +0200934 if (map->base) {
Alexey Budankovf13de662019-01-22 20:50:57 +0300935 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +0300936 if (synch) {
937 flush = map->flush;
938 map->flush = 1;
939 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300940 if (!record__aio_enabled(rec)) {
Alexey Budankovef781122019-03-18 20:44:12 +0300941 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +0300942 if (synch)
943 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300944 rc = -1;
945 goto out;
946 }
947 } else {
Alexey Budankovef781122019-03-18 20:44:12 +0300948 if (record__aio_push(rec, map, &off) < 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300949 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +0300950 if (synch)
951 map->flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300952 rc = -1;
953 goto out;
954 }
David Ahern8d3eca22012-08-26 12:24:47 -0600955 }
Alexey Budankov470530b2019-03-18 20:40:26 +0300956 if (synch)
957 map->flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -0600958 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300959
Jiri Olsae035f4c2018-09-13 14:54:05 +0200960 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
961 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300962 rc = -1;
963 goto out;
964 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200965 }
966
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300967 if (record__aio_enabled(rec))
968 record__aio_set_pos(trace_fd, off);
969
Jiri Olsadcabb502014-07-25 16:56:16 +0200970 /*
971 * Mark the round finished in case we wrote
972 * at least one event.
973 */
974 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200975 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600976
Wang Nan0b72d692017-12-04 16:51:07 +0000977 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +0000978 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -0600979out:
980 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200981}
982
Alexey Budankov470530b2019-03-18 20:40:26 +0300983static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +0000984{
985 int err;
986
Alexey Budankov470530b2019-03-18 20:40:26 +0300987 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +0000988 if (err)
989 return err;
990
Alexey Budankov470530b2019-03-18 20:40:26 +0300991 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +0000992}
993
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300994static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700995{
David Ahern57706ab2013-11-06 11:41:34 -0700996 struct perf_session *session = rec->session;
997 int feat;
998
999 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1000 perf_header__set_feat(&session->header, feat);
1001
1002 if (rec->no_buildid)
1003 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1004
Jiri Olsace9036a2019-07-21 13:24:23 +02001005 if (!have_tracepoints(&rec->evlist->core.entries))
David Ahern57706ab2013-11-06 11:41:34 -07001006 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1007
1008 if (!rec->opts.branch_stack)
1009 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +03001010
1011 if (!rec->opts.full_auxtrace)
1012 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +01001013
Alexey Budankovcf790512018-10-09 17:36:24 +03001014 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1015 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1016
Jiri Olsa258031c2019-03-08 14:47:39 +01001017 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001018 if (!record__comp_enabled(rec))
1019 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +01001020
Jiri Olsaffa517a2015-10-25 15:51:43 +01001021 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -07001022}
1023
Wang Nane1ab48b2016-02-26 09:32:10 +00001024static void
1025record__finish_output(struct record *rec)
1026{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001027 struct perf_data *data = &rec->data;
1028 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +00001029
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001030 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +00001031 return;
1032
1033 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +01001034 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +00001035
1036 if (!rec->no_buildid) {
1037 process_buildids(rec);
1038
1039 if (rec->buildid_all)
1040 dsos__hit_all(rec->session);
1041 }
1042 perf_session__write_header(rec->session, rec->evlist, fd, true);
1043
1044 return;
1045}
1046
Wang Nan4ea648a2016-07-14 08:34:47 +00001047static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +00001048{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001049 int err;
Jiri Olsa9749b902019-07-21 13:23:50 +02001050 struct perf_thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001051
Wang Nan4ea648a2016-07-14 08:34:47 +00001052 if (rec->opts.tail_synthesize != tail)
1053 return 0;
1054
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001055 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1056 if (thread_map == NULL)
1057 return -1;
1058
1059 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +00001060 process_synthesized_event,
1061 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001062 rec->opts.sample_address);
Jiri Olsa7836e522019-07-21 13:24:20 +02001063 perf_thread_map__put(thread_map);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001064 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001065}
1066
Wang Nan4ea648a2016-07-14 08:34:47 +00001067static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001068
Wang Nanecfd7a92016-04-13 08:21:07 +00001069static int
1070record__switch_output(struct record *rec, bool at_exit)
1071{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001072 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +00001073 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -07001074 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +00001075
1076 /* Same Size: "2015122520103046"*/
1077 char timestamp[] = "InvalidTimestamp";
1078
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001079 record__aio_mmap_read_sync(rec);
1080
Wang Nan4ea648a2016-07-14 08:34:47 +00001081 record__synthesize(rec, true);
1082 if (target__none(&rec->opts.target))
1083 record__synthesize_workload(rec, true);
1084
Wang Nanecfd7a92016-04-13 08:21:07 +00001085 rec->samples = 0;
1086 record__finish_output(rec);
1087 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1088 if (err) {
1089 pr_err("Failed to get current timestamp\n");
1090 return -EINVAL;
1091 }
1092
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001093 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001094 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001095 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001096 if (fd >= 0 && !at_exit) {
1097 rec->bytes_written = 0;
1098 rec->session->header.data_size = 0;
1099 }
1100
1101 if (!quiet)
1102 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001103 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001104
Andi Kleen03724b22019-03-14 15:49:55 -07001105 if (rec->switch_output.num_files) {
1106 int n = rec->switch_output.cur_file + 1;
1107
1108 if (n >= rec->switch_output.num_files)
1109 n = 0;
1110 rec->switch_output.cur_file = n;
1111 if (rec->switch_output.filenames[n]) {
1112 remove(rec->switch_output.filenames[n]);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001113 zfree(&rec->switch_output.filenames[n]);
Andi Kleen03724b22019-03-14 15:49:55 -07001114 }
1115 rec->switch_output.filenames[n] = new_filename;
1116 } else {
1117 free(new_filename);
1118 }
1119
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001120 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001121 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001122 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001123
Wang Nanbe7b0c92016-04-20 18:59:54 +00001124 /*
1125 * In 'perf record --switch-output' without -a,
1126 * record__synthesize() in record__switch_output() won't
1127 * generate tracking events because there's no thread_map
1128 * in evlist. Which causes newly created perf.data doesn't
1129 * contain map and comm information.
1130 * Create a fake thread_map and directly call
1131 * perf_event__synthesize_thread_map() for those events.
1132 */
1133 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001134 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001135 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001136 return fd;
1137}
1138
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001139static volatile int workload_exec_errno;
1140
1141/*
1142 * perf_evlist__prepare_workload will send a SIGUSR1
1143 * if the fork fails, since we asked by setting its
1144 * want_signal to true.
1145 */
Namhyung Kim45604712014-05-12 09:47:24 +09001146static void workload_exec_failed_signal(int signo __maybe_unused,
1147 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001148 void *ucontext __maybe_unused)
1149{
1150 workload_exec_errno = info->si_value.sival_int;
1151 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001152 child_finished = 1;
1153}
1154
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001155static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001156static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001157
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001158int __weak
1159perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
1160 struct perf_tool *tool __maybe_unused,
1161 perf_event__handler_t process __maybe_unused,
1162 struct machine *machine __maybe_unused)
1163{
1164 return 0;
1165}
1166
Wang Nanee667f92016-06-27 10:24:05 +00001167static const struct perf_event_mmap_page *
Jiri Olsa63503db2019-07-21 13:23:52 +02001168perf_evlist__pick_pc(struct evlist *evlist)
Wang Nanee667f92016-06-27 10:24:05 +00001169{
Wang Nanb2cb6152016-07-14 08:34:39 +00001170 if (evlist) {
1171 if (evlist->mmap && evlist->mmap[0].base)
1172 return evlist->mmap[0].base;
Wang Nan0b72d692017-12-04 16:51:07 +00001173 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
1174 return evlist->overwrite_mmap[0].base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001175 }
Wang Nanee667f92016-06-27 10:24:05 +00001176 return NULL;
1177}
1178
Wang Nanc45628b2016-05-24 02:28:59 +00001179static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1180{
Wang Nanee667f92016-06-27 10:24:05 +00001181 const struct perf_event_mmap_page *pc;
1182
1183 pc = perf_evlist__pick_pc(rec->evlist);
1184 if (pc)
1185 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001186 return NULL;
1187}
1188
Wang Nan4ea648a2016-07-14 08:34:47 +00001189static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001190{
1191 struct perf_session *session = rec->session;
1192 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001193 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001194 struct record_opts *opts = &rec->opts;
1195 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001196 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001197 int err = 0;
1198
Wang Nan4ea648a2016-07-14 08:34:47 +00001199 if (rec->opts.tail_synthesize != tail)
1200 return 0;
1201
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001202 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001203 /*
1204 * We need to synthesize events first, because some
1205 * features works on top of them (on report side).
1206 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001207 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001208 process_synthesized_event);
1209 if (err < 0) {
1210 pr_err("Couldn't synthesize attrs.\n");
1211 goto out;
1212 }
1213
Jiri Olsaa2015512018-03-14 10:22:04 +01001214 err = perf_event__synthesize_features(tool, session, rec->evlist,
1215 process_synthesized_event);
1216 if (err < 0) {
1217 pr_err("Couldn't synthesize features.\n");
1218 return err;
1219 }
1220
Jiri Olsace9036a2019-07-21 13:24:23 +02001221 if (have_tracepoints(&rec->evlist->core.entries)) {
Wang Nanc45c86e2016-02-26 09:32:07 +00001222 /*
1223 * FIXME err <= 0 here actually means that
1224 * there were no tracepoints so its not really
1225 * an error, just that we don't need to
1226 * synthesize anything. We really have to
1227 * return this more properly and also
1228 * propagate errors that now are calling die()
1229 */
1230 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1231 process_synthesized_event);
1232 if (err <= 0) {
1233 pr_err("Couldn't record tracing data.\n");
1234 goto out;
1235 }
1236 rec->bytes_written += err;
1237 }
1238 }
1239
Wang Nanc45628b2016-05-24 02:28:59 +00001240 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001241 process_synthesized_event, machine);
1242 if (err)
1243 goto out;
1244
Wang Nanc45c86e2016-02-26 09:32:07 +00001245 if (rec->opts.full_auxtrace) {
1246 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1247 session, process_synthesized_event);
1248 if (err)
1249 goto out;
1250 }
1251
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001252 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1253 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1254 machine);
1255 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1256 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1257 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001258
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001259 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1260 machine);
1261 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1262 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1263 "Check /proc/modules permission or run as root.\n");
1264 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001265
1266 if (perf_guest) {
1267 machines__process_guests(&session->machines,
1268 perf_event__synthesize_guest_os, tool);
1269 }
1270
Andi Kleenbfd8f722017-11-17 13:42:58 -08001271 err = perf_event__synthesize_extra_attr(&rec->tool,
1272 rec->evlist,
1273 process_synthesized_event,
1274 data->is_pipe);
1275 if (err)
1276 goto out;
1277
Jiri Olsa03617c22019-07-21 13:24:42 +02001278 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
Andi Kleen373565d2017-11-17 13:42:59 -08001279 process_synthesized_event,
1280 NULL);
1281 if (err < 0) {
1282 pr_err("Couldn't synthesize thread map.\n");
1283 return err;
1284 }
1285
Jiri Olsaf72f9012019-07-21 13:24:41 +02001286 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
Andi Kleen373565d2017-11-17 13:42:59 -08001287 process_synthesized_event, NULL);
1288 if (err < 0) {
1289 pr_err("Couldn't synthesize cpu map.\n");
1290 return err;
1291 }
1292
Song Liue5416952019-03-11 22:30:41 -07001293 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001294 machine, opts);
1295 if (err < 0)
1296 pr_warning("Couldn't synthesize bpf events.\n");
1297
Jiri Olsa03617c22019-07-21 13:24:42 +02001298 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
Wang Nanc45c86e2016-02-26 09:32:07 +00001299 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001300 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001301out:
1302 return err;
1303}
1304
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001305static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001306{
David Ahern57706ab2013-11-06 11:41:34 -07001307 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001308 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001309 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001310 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001311 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001312 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001313 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001314 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001315 bool disabled = false, draining = false;
Jiri Olsa63503db2019-07-21 13:23:52 +02001316 struct evlist *sb_evlist = NULL;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001317 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001318 float ratio = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001319
Namhyung Kim45604712014-05-12 09:47:24 +09001320 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001321 signal(SIGCHLD, sig_handler);
1322 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001323 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001324 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001325
Hari Bathinif3b36142017-03-08 02:11:43 +05301326 if (rec->opts.record_namespaces)
1327 tool->namespace_events = true;
1328
Jiri Olsadc0c6122017-01-09 10:51:58 +01001329 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001330 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001331 if (rec->opts.auxtrace_snapshot_mode)
1332 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001333 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001334 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001335 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001336 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001337 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001338
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001339 session = perf_session__new(data, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001340 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +09001341 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001342 return -1;
1343 }
1344
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001345 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001346 rec->session = session;
1347
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001348 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1349 pr_err("Compression initialization failed.\n");
1350 return -1;
1351 }
1352
1353 session->header.env.comp_type = PERF_COMP_ZSTD;
1354 session->header.env.comp_level = rec->opts.comp_level;
1355
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001356 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001357
Alexey Budankovcf790512018-10-09 17:36:24 +03001358 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1359 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1360
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001361 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001362 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001363 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001364 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001365 if (err < 0) {
1366 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001367 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001368 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001369 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001370 }
1371
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001372 /*
1373 * If we have just single event and are sending data
1374 * through pipe, we need to force the ids allocation,
1375 * because we synthesize event name through the pipe
1376 * and need the id for that.
1377 */
Jiri Olsa6484d2f2019-07-21 13:24:28 +02001378 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001379 rec->opts.sample_id = true;
1380
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001381 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001382 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001383 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001384 }
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001385 session->header.env.comp_mmap_len = session->evlist->mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001386
Wang Nan8690a2a2016-02-22 09:10:32 +00001387 err = bpf__apply_obj_config();
1388 if (err) {
1389 char errbuf[BUFSIZ];
1390
1391 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1392 pr_err("ERROR: Apply config to BPF failed: %s\n",
1393 errbuf);
1394 goto out_child;
1395 }
1396
Adrian Huntercca84822015-08-19 17:29:21 +03001397 /*
1398 * Normally perf_session__new would do this, but it doesn't have the
1399 * evlist.
1400 */
1401 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1402 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1403 rec->tool.ordered_events = false;
1404 }
1405
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001406 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001407 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1408
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001409 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001410 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001411 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001412 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001413 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001414 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001415 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001416 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001417 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001418
David Ahernd3665492012-02-06 15:27:52 -07001419 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001420 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001421 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001422 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001423 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001424 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001425 }
1426
Song Liud56354d2019-03-11 22:30:51 -07001427 if (!opts->no_bpf_event)
1428 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1429
Song Liu657ee552019-03-11 22:30:50 -07001430 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1431 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1432 opts->no_bpf_event = true;
1433 }
1434
Wang Nan4ea648a2016-07-14 08:34:47 +00001435 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001436 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001437 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001438
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001439 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001440 struct sched_param param;
1441
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001442 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001443 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001444 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001445 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001446 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001447 }
1448 }
1449
Jiri Olsa774cb492012-11-12 18:34:01 +01001450 /*
1451 * When perf is starting the traced process, all the events
1452 * (apart from group members) have enable_on_exec=1 set,
1453 * so don't spoil it by prematurely enabling them.
1454 */
Andi Kleen6619a532014-01-11 13:38:27 -08001455 if (!target__none(&opts->target) && !opts->initial_delay)
Jiri Olsa1c87f162019-07-21 13:24:08 +02001456 evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001457
Peter Zijlstra856e9662009-12-16 17:55:55 +01001458 /*
1459 * Let the child rip
1460 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001461 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001462 struct machine *machine = &session->machines.host;
Namhyung Kime5bed5642015-09-30 10:45:24 +09001463 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301464 pid_t tgid;
Namhyung Kime5bed5642015-09-30 10:45:24 +09001465
1466 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1467 if (event == NULL) {
1468 err = -ENOMEM;
1469 goto out_child;
1470 }
1471
Namhyung Kime803cf92015-09-22 09:24:55 +09001472 /*
1473 * Some H/W events are generated before COMM event
1474 * which is emitted during exec(), so perf script
1475 * cannot see a correct process name for those events.
1476 * Synthesize COMM event to prevent it.
1477 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301478 tgid = perf_event__synthesize_comm(tool, event,
1479 rec->evlist->workload.pid,
1480 process_synthesized_event,
1481 machine);
1482 free(event);
1483
1484 if (tgid == -1)
1485 goto out_child;
1486
1487 event = malloc(sizeof(event->namespaces) +
1488 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1489 machine->id_hdr_size);
1490 if (event == NULL) {
1491 err = -ENOMEM;
1492 goto out_child;
1493 }
1494
1495 /*
1496 * Synthesize NAMESPACES event for the command specified.
1497 */
1498 perf_event__synthesize_namespaces(tool, event,
1499 rec->evlist->workload.pid,
1500 tgid, process_synthesized_event,
1501 machine);
Namhyung Kime5bed5642015-09-30 10:45:24 +09001502 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001503
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001504 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001505 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001506
Andi Kleen6619a532014-01-11 13:38:27 -08001507 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001508 usleep(opts->initial_delay * USEC_PER_MSEC);
Jiri Olsa1c87f162019-07-21 13:24:08 +02001509 evlist__enable(rec->evlist);
Andi Kleen6619a532014-01-11 13:38:27 -08001510 }
1511
Wang Nan5f9cf592016-04-20 18:59:49 +00001512 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001513 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001514 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001515 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001516 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001517
Wang Nan057374642016-07-14 08:34:43 +00001518 /*
1519 * rec->evlist->bkw_mmap_state is possible to be
1520 * BKW_MMAP_EMPTY here: when done == true and
1521 * hits != rec->samples in previous round.
1522 *
1523 * perf_evlist__toggle_bkw_mmap ensure we never
1524 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1525 */
1526 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1527 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1528
Alexey Budankov470530b2019-03-18 20:40:26 +03001529 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001530 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001531 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001532 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001533 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001534 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001535
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001536 if (auxtrace_record__snapshot_started) {
1537 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001538 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001539 record__read_auxtrace_snapshot(rec);
Wang Nan5f9cf592016-04-20 18:59:49 +00001540 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001541 pr_err("AUX area tracing snapshot failed\n");
1542 err = -1;
1543 goto out_child;
1544 }
1545 }
1546
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001547 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001548 /*
1549 * If switch_output_trigger is hit, the data in
1550 * overwritable ring buffer should have been collected,
1551 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1552 *
1553 * If SIGUSR2 raise after or during record__mmap_read_all(),
1554 * record__mmap_read_all() didn't collect data from
1555 * overwritable ring buffer. Read again.
1556 */
1557 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1558 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001559 trigger_ready(&switch_output_trigger);
1560
Wang Nan057374642016-07-14 08:34:43 +00001561 /*
1562 * Reenable events in overwrite ring buffer after
1563 * record__mmap_read_all(): we should have collected
1564 * data from it.
1565 */
1566 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1567
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001568 if (!quiet)
1569 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1570 waking);
1571 waking = 0;
1572 fd = record__switch_output(rec, false);
1573 if (fd < 0) {
1574 pr_err("Failed to switch to new file\n");
1575 trigger_error(&switch_output_trigger);
1576 err = fd;
1577 goto out_child;
1578 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001579
1580 /* re-arm the alarm */
1581 if (rec->switch_output.time)
1582 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001583 }
1584
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001585 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001586 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001587 break;
Arnaldo Carvalho de Melof66a889d2014-08-18 17:25:59 -03001588 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001589 /*
1590 * Propagate error, only if there's any. Ignore positive
1591 * number of returned events and interrupt error.
1592 */
1593 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001594 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001595 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001596
1597 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1598 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001599 }
1600
Jiri Olsa774cb492012-11-12 18:34:01 +01001601 /*
1602 * When perf is starting the traced process, at the end events
1603 * die with the process and we wait for that. Thus no need to
1604 * disable events in this case.
1605 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001606 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001607 trigger_off(&auxtrace_snapshot_trigger);
Jiri Olsae74676d2019-07-21 13:24:09 +02001608 evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001609 disabled = true;
1610 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001611 }
Wang Nan5f9cf592016-04-20 18:59:49 +00001612 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001613 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001614
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001615 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001616 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001617 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001618 pr_err("Workload failed: %s\n", emsg);
1619 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001620 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001621 }
1622
Namhyung Kime3d59112015-01-29 17:06:44 +09001623 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001624 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001625
Wang Nan4ea648a2016-07-14 08:34:47 +00001626 if (target__none(&rec->opts.target))
1627 record__synthesize_workload(rec, true);
1628
Namhyung Kim45604712014-05-12 09:47:24 +09001629out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001630 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001631 record__aio_mmap_read_sync(rec);
1632
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001633 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1634 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1635 session->header.env.comp_ratio = ratio + 0.5;
1636 }
1637
Namhyung Kim45604712014-05-12 09:47:24 +09001638 if (forks) {
1639 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001640
Namhyung Kim45604712014-05-12 09:47:24 +09001641 if (!child_finished)
1642 kill(rec->evlist->workload.pid, SIGTERM);
1643
1644 wait(&exit_status);
1645
1646 if (err < 0)
1647 status = err;
1648 else if (WIFEXITED(exit_status))
1649 status = WEXITSTATUS(exit_status);
1650 else if (WIFSIGNALED(exit_status))
1651 signr = WTERMSIG(exit_status);
1652 } else
1653 status = err;
1654
Wang Nan4ea648a2016-07-14 08:34:47 +00001655 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001656 /* this will be recalculated during process_buildids() */
1657 rec->samples = 0;
1658
Wang Nanecfd7a92016-04-13 08:21:07 +00001659 if (!err) {
1660 if (!rec->timestamp_filename) {
1661 record__finish_output(rec);
1662 } else {
1663 fd = record__switch_output(rec, true);
1664 if (fd < 0) {
1665 status = fd;
1666 goto out_delete_session;
1667 }
1668 }
1669 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001670
Wang Nana0748652016-11-26 07:03:28 +00001671 perf_hooks__invoke_record_end();
1672
Namhyung Kime3d59112015-01-29 17:06:44 +09001673 if (!err && !quiet) {
1674 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001675 const char *postfix = rec->timestamp_filename ?
1676 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001677
Adrian Hunteref149c22015-04-09 18:53:45 +03001678 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001679 scnprintf(samples, sizeof(samples),
1680 " (%" PRIu64 " samples)", rec->samples);
1681 else
1682 samples[0] = '\0';
1683
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001684 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001685 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001686 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001687 if (ratio) {
1688 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
1689 rec->session->bytes_transferred / 1024.0 / 1024.0,
1690 ratio);
1691 }
1692 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09001693 }
1694
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001695out_delete_session:
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001696 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001697 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001698
1699 if (!opts->no_bpf_event)
1700 perf_evlist__stop_sb_thread(sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001701 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001702}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001703
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001704static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001705{
Kan Liangaad2b212015-01-05 13:23:04 -05001706 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001707
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001708 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001709
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001710 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001711 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001712 callchain->dump_size);
1713}
1714
1715int record_opts__parse_callchain(struct record_opts *record,
1716 struct callchain_param *callchain,
1717 const char *arg, bool unset)
1718{
1719 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001720 callchain->enabled = !unset;
1721
1722 /* --no-call-graph */
1723 if (unset) {
1724 callchain->record_mode = CALLCHAIN_NONE;
1725 pr_debug("callchain: disabled\n");
1726 return 0;
1727 }
1728
1729 ret = parse_callchain_record_opt(arg, callchain);
1730 if (!ret) {
1731 /* Enable data address sampling for DWARF unwind. */
1732 if (callchain->record_mode == CALLCHAIN_DWARF)
1733 record->sample_address = true;
1734 callchain_debug(callchain);
1735 }
1736
1737 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001738}
1739
Kan Liangc421e802015-07-29 05:42:12 -04001740int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001741 const char *arg,
1742 int unset)
1743{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001744 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001745}
1746
Kan Liangc421e802015-07-29 05:42:12 -04001747int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001748 const char *arg __maybe_unused,
1749 int unset __maybe_unused)
1750{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001751 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001752
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001753 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001754
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001755 if (callchain->record_mode == CALLCHAIN_NONE)
1756 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001757
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001758 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001759 return 0;
1760}
1761
Jiri Olsaeb853e82014-02-03 12:44:42 +01001762static int perf_record_config(const char *var, const char *value, void *cb)
1763{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001764 struct record *rec = cb;
1765
1766 if (!strcmp(var, "record.build-id")) {
1767 if (!strcmp(value, "cache"))
1768 rec->no_buildid_cache = false;
1769 else if (!strcmp(value, "no-cache"))
1770 rec->no_buildid_cache = true;
1771 else if (!strcmp(value, "skip"))
1772 rec->no_buildid = true;
1773 else
1774 return -1;
1775 return 0;
1776 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001777 if (!strcmp(var, "record.call-graph")) {
1778 var = "call-graph.record-mode";
1779 return perf_default_config(var, value, cb);
1780 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001781#ifdef HAVE_AIO_SUPPORT
1782 if (!strcmp(var, "record.aio")) {
1783 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1784 if (!rec->opts.nr_cblocks)
1785 rec->opts.nr_cblocks = nr_cblocks_default;
1786 }
1787#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001788
Yisheng Xiecff17202018-03-12 19:25:57 +08001789 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001790}
1791
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001792struct clockid_map {
1793 const char *name;
1794 int clockid;
1795};
1796
1797#define CLOCKID_MAP(n, c) \
1798 { .name = n, .clockid = (c), }
1799
1800#define CLOCKID_END { .name = NULL, }
1801
1802
1803/*
1804 * Add the missing ones, we need to build on many distros...
1805 */
1806#ifndef CLOCK_MONOTONIC_RAW
1807#define CLOCK_MONOTONIC_RAW 4
1808#endif
1809#ifndef CLOCK_BOOTTIME
1810#define CLOCK_BOOTTIME 7
1811#endif
1812#ifndef CLOCK_TAI
1813#define CLOCK_TAI 11
1814#endif
1815
1816static const struct clockid_map clockids[] = {
1817 /* available for all events, NMI safe */
1818 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1819 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1820
1821 /* available for some events */
1822 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1823 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1824 CLOCKID_MAP("tai", CLOCK_TAI),
1825
1826 /* available for the lazy */
1827 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1828 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1829 CLOCKID_MAP("real", CLOCK_REALTIME),
1830 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1831
1832 CLOCKID_END,
1833};
1834
Alexey Budankovcf790512018-10-09 17:36:24 +03001835static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1836{
1837 struct timespec res;
1838
1839 *res_ns = 0;
1840 if (!clock_getres(clk_id, &res))
1841 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1842 else
1843 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1844
1845 return 0;
1846}
1847
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001848static int parse_clockid(const struct option *opt, const char *str, int unset)
1849{
1850 struct record_opts *opts = (struct record_opts *)opt->value;
1851 const struct clockid_map *cm;
1852 const char *ostr = str;
1853
1854 if (unset) {
1855 opts->use_clockid = 0;
1856 return 0;
1857 }
1858
1859 /* no arg passed */
1860 if (!str)
1861 return 0;
1862
1863 /* no setting it twice */
1864 if (opts->use_clockid)
1865 return -1;
1866
1867 opts->use_clockid = true;
1868
1869 /* if its a number, we're done */
1870 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001871 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001872
1873 /* allow a "CLOCK_" prefix to the name */
1874 if (!strncasecmp(str, "CLOCK_", 6))
1875 str += 6;
1876
1877 for (cm = clockids; cm->name; cm++) {
1878 if (!strcasecmp(str, cm->name)) {
1879 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001880 return get_clockid_res(opts->clockid,
1881 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001882 }
1883 }
1884
1885 opts->use_clockid = false;
1886 ui__warning("unknown clockid %s, check man page\n", ostr);
1887 return -1;
1888}
1889
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001890static int record__parse_affinity(const struct option *opt, const char *str, int unset)
1891{
1892 struct record_opts *opts = (struct record_opts *)opt->value;
1893
1894 if (unset || !str)
1895 return 0;
1896
1897 if (!strcasecmp(str, "node"))
1898 opts->affinity = PERF_AFFINITY_NODE;
1899 else if (!strcasecmp(str, "cpu"))
1900 opts->affinity = PERF_AFFINITY_CPU;
1901
1902 return 0;
1903}
1904
Adrian Huntere9db1312015-04-09 18:53:46 +03001905static int record__parse_mmap_pages(const struct option *opt,
1906 const char *str,
1907 int unset __maybe_unused)
1908{
1909 struct record_opts *opts = opt->value;
1910 char *s, *p;
1911 unsigned int mmap_pages;
1912 int ret;
1913
1914 if (!str)
1915 return -EINVAL;
1916
1917 s = strdup(str);
1918 if (!s)
1919 return -ENOMEM;
1920
1921 p = strchr(s, ',');
1922 if (p)
1923 *p = '\0';
1924
1925 if (*s) {
1926 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1927 if (ret)
1928 goto out_free;
1929 opts->mmap_pages = mmap_pages;
1930 }
1931
1932 if (!p) {
1933 ret = 0;
1934 goto out_free;
1935 }
1936
1937 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1938 if (ret)
1939 goto out_free;
1940
1941 opts->auxtrace_mmap_pages = mmap_pages;
1942
1943out_free:
1944 free(s);
1945 return ret;
1946}
1947
Jiri Olsa0c582442017-01-09 10:51:59 +01001948static void switch_output_size_warn(struct record *rec)
1949{
1950 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1951 struct switch_output *s = &rec->switch_output;
1952
1953 wakeup_size /= 2;
1954
1955 if (s->size < wakeup_size) {
1956 char buf[100];
1957
1958 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1959 pr_warning("WARNING: switch-output data size lower than "
1960 "wakeup kernel buffer size (%s) "
1961 "expect bigger perf.data sizes\n", buf);
1962 }
1963}
1964
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001965static int switch_output_setup(struct record *rec)
1966{
1967 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01001968 static struct parse_tag tags_size[] = {
1969 { .tag = 'B', .mult = 1 },
1970 { .tag = 'K', .mult = 1 << 10 },
1971 { .tag = 'M', .mult = 1 << 20 },
1972 { .tag = 'G', .mult = 1 << 30 },
1973 { .tag = 0 },
1974 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01001975 static struct parse_tag tags_time[] = {
1976 { .tag = 's', .mult = 1 },
1977 { .tag = 'm', .mult = 60 },
1978 { .tag = 'h', .mult = 60*60 },
1979 { .tag = 'd', .mult = 60*60*24 },
1980 { .tag = 0 },
1981 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01001982 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001983
1984 if (!s->set)
1985 return 0;
1986
1987 if (!strcmp(s->str, "signal")) {
1988 s->signal = true;
1989 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01001990 goto enabled;
1991 }
1992
1993 val = parse_tag_value(s->str, tags_size);
1994 if (val != (unsigned long) -1) {
1995 s->size = val;
1996 pr_debug("switch-output with %s size threshold\n", s->str);
1997 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01001998 }
1999
Jiri Olsabfacbe32017-01-09 10:52:00 +01002000 val = parse_tag_value(s->str, tags_time);
2001 if (val != (unsigned long) -1) {
2002 s->time = val;
2003 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
2004 s->str, s->time);
2005 goto enabled;
2006 }
2007
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002008 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002009
2010enabled:
2011 rec->timestamp_filename = true;
2012 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01002013
2014 if (s->size && !rec->opts.no_buffering)
2015 switch_output_size_warn(rec);
2016
Jiri Olsadc0c6122017-01-09 10:51:58 +01002017 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002018}
2019
Namhyung Kime5b2c202014-10-23 00:15:46 +09002020static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02002021 "perf record [<options>] [<command>]",
2022 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002023 NULL
2024};
Namhyung Kime5b2c202014-10-23 00:15:46 +09002025const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002026
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002027/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002028 * XXX Ideally would be local to cmd_record() and passed to a record__new
2029 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002030 * after cmd_record() exits, but since record_options need to be accessible to
2031 * builtin-script, leave it here.
2032 *
2033 * At least we don't ouch it in all the other functions here directly.
2034 *
2035 * Just say no to tons of global variables, sigh.
2036 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002037static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002038 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08002039 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002040 .mmap_pages = UINT_MAX,
2041 .user_freq = UINT_MAX,
2042 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03002043 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002044 .target = {
2045 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02002046 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002047 },
Alexey Budankov470530b2019-03-18 20:40:26 +03002048 .mmap_flush = MMAP_FLUSH_DEFAULT,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002049 },
Namhyung Kime3d59112015-01-29 17:06:44 +09002050 .tool = {
2051 .sample = process_sample_event,
2052 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03002053 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09002054 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05302055 .namespaces = perf_event__process_namespaces,
Namhyung Kime3d59112015-01-29 17:06:44 +09002056 .mmap = perf_event__process_mmap,
2057 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03002058 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09002059 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002060};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02002061
Namhyung Kim76a26542015-10-22 23:28:32 +09002062const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
2063 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03002064
Wang Nan0aab2132016-06-16 08:02:41 +00002065static bool dry_run;
2066
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002067/*
2068 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
2069 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002070 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002071 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
2072 * using pipes, etc.
2073 */
Jiri Olsaefd21302017-01-03 09:19:55 +01002074static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002075 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02002076 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02002077 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002078 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08002079 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00002080 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2081 NULL, "don't record events from perf itself",
2082 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09002083 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002084 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002085 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002086 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002087 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002088 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03002089 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03002090 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002091 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02002092 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002093 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002094 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002095 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002096 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002097 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002098 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02002099 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002100 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2101 &record.opts.no_inherit_set,
2102 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00002103 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2104 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00002105 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07002106 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03002107 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2108 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002109 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2110 "profile at this frequency",
2111 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03002112 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2113 "number of mmap data pages and AUX area tracing mmap pages",
2114 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03002115 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2116 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2117 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002118 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08002119 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002120 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002121 NULL, "enables call-graph recording" ,
2122 &record_callchain_opt),
2123 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09002124 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002125 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10002126 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02002127 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002128 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002129 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002130 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02002131 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04002132 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2133 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02002134 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03002135 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2136 &record.opts.sample_time_set,
2137 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01002138 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2139 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002140 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002141 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002142 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2143 &record.no_buildid_cache_set,
2144 "do not update the buildid cache"),
2145 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2146 &record.no_buildid_set,
2147 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002148 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002149 "monitor event in cgroup name only",
2150 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002151 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002152 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002153 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2154 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002155
2156 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2157 "branch any", "sample any taken branches",
2158 parse_branch_stack),
2159
2160 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2161 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002162 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002163 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2164 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002165 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2166 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002167 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2168 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002169 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2170 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002171 " use '-I?' to list register names", parse_intr_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002172 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2173 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002174 " use '--user-regs=?' to list register names", parse_user_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002175 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2176 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002177 OPT_CALLBACK('k', "clockid", &record.opts,
2178 "clockid", "clockid to use for events, see clock_gettime()",
2179 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002180 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2181 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002182 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002183 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302184 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2185 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002186 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2187 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002188 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2189 "Configure all used events to run in kernel space.",
2190 PARSE_OPT_EXCLUSIVE),
2191 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2192 "Configure all used events to run in user space.",
2193 PARSE_OPT_EXCLUSIVE),
yuzhoujian53651b22019-05-30 14:29:22 +01002194 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
2195 "collect kernel callchains"),
2196 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
2197 "collect user callchains"),
Wang Nan71dc23262015-10-14 12:41:19 +00002198 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2199 "clang binary to use for compiling BPF scriptlets"),
2200 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2201 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002202 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2203 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002204 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2205 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002206 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2207 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002208 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2209 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002210 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002211 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2212 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002213 "signal"),
Andi Kleen03724b22019-03-14 15:49:55 -07002214 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2215 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002216 OPT_BOOLEAN(0, "dry-run", &dry_run,
2217 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002218#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002219 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2220 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002221 record__aio_parse),
2222#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002223 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2224 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2225 record__parse_affinity),
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002226#ifdef HAVE_ZSTD_SUPPORT
2227 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
2228 "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
2229 record__parse_comp_level),
2230#endif
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002231 OPT_END()
2232};
2233
Namhyung Kime5b2c202014-10-23 00:15:46 +09002234struct option *record_options = __record_options;
2235
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002236int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002237{
Adrian Hunteref149c22015-04-09 18:53:45 +03002238 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002239 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002240 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002241
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002242 setlocale(LC_ALL, "");
2243
Wang Nan48e1cab2015-12-14 10:39:22 +00002244#ifndef HAVE_LIBBPF_SUPPORT
2245# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2246 set_nobuild('\0', "clang-path", true);
2247 set_nobuild('\0', "clang-opt", true);
2248# undef set_nobuild
2249#endif
2250
He Kuang7efe0e02015-12-14 10:39:23 +00002251#ifndef HAVE_BPF_PROLOGUE
2252# if !defined (HAVE_DWARF_SUPPORT)
2253# define REASON "NO_DWARF=1"
2254# elif !defined (HAVE_LIBBPF_SUPPORT)
2255# define REASON "NO_LIBBPF=1"
2256# else
2257# define REASON "this architecture doesn't support BPF prologue"
2258# endif
2259# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2260 set_nobuild('\0', "vmlinux", true);
2261# undef set_nobuild
2262# undef REASON
2263#endif
2264
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002265 CPU_ZERO(&rec->affinity_mask);
2266 rec->opts.affinity = PERF_AFFINITY_SYS;
2267
Jiri Olsa0f98b112019-07-21 13:23:55 +02002268 rec->evlist = evlist__new();
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002269 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002270 return -ENOMEM;
2271
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002272 err = perf_config(perf_record_config, rec);
2273 if (err)
2274 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002275
Tom Zanussibca647a2010-11-10 08:11:30 -06002276 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002277 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002278 if (quiet)
2279 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002280
2281 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002282 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002283 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002284
Namhyung Kimbea03402012-04-26 14:15:15 +09002285 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002286 usage_with_options_msg(record_usage, record_options,
2287 "cgroup monitoring only available in system-wide mode");
2288
Stephane Eranian023695d2011-02-14 11:20:01 +02002289 }
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002290
2291 if (rec->opts.comp_level != 0) {
2292 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
2293 rec->no_buildid = true;
2294 }
2295
Adrian Hunterb757bb02015-07-21 12:44:04 +03002296 if (rec->opts.record_switch_events &&
2297 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002298 ui__error("kernel does not support recording context switch events\n");
2299 parse_options_usage(record_usage, record_options, "switch-events", 0);
2300 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002301 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002302
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002303 if (switch_output_setup(rec)) {
2304 parse_options_usage(record_usage, record_options, "switch-output", 0);
2305 return -EINVAL;
2306 }
2307
Jiri Olsabfacbe32017-01-09 10:52:00 +01002308 if (rec->switch_output.time) {
2309 signal(SIGALRM, alarm_sig_handler);
2310 alarm(rec->switch_output.time);
2311 }
2312
Andi Kleen03724b22019-03-14 15:49:55 -07002313 if (rec->switch_output.num_files) {
2314 rec->switch_output.filenames = calloc(sizeof(char *),
2315 rec->switch_output.num_files);
2316 if (!rec->switch_output.filenames)
2317 return -EINVAL;
2318 }
2319
Adrian Hunter1b36c032016-09-23 17:38:39 +03002320 /*
2321 * Allow aliases to facilitate the lookup of symbols for address
2322 * filters. Refer to auxtrace_parse_filters().
2323 */
2324 symbol_conf.allow_aliases = true;
2325
2326 symbol__init(NULL);
2327
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002328 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002329 if (err)
2330 goto out;
2331
Wang Nan0aab2132016-06-16 08:02:41 +00002332 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002333 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002334
Wang Nand7888572016-04-08 15:07:24 +00002335 err = bpf__setup_stdout(rec->evlist);
2336 if (err) {
2337 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2338 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2339 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002340 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002341 }
2342
Adrian Hunteref149c22015-04-09 18:53:45 +03002343 err = -ENOMEM;
2344
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03002345 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
Arnaldo Carvalho de Melo646aaea2011-05-27 11:00:41 -03002346 pr_warning(
2347"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
2348"check /proc/sys/kernel/kptr_restrict.\n\n"
2349"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
2350"file is not found in the buildid cache or in the vmlinux path.\n\n"
2351"Samples in kernel modules won't be resolved at all.\n\n"
2352"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
2353"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03002354
Wang Nan0c1d46a2016-04-20 18:59:52 +00002355 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002356 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002357 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002358 /*
2359 * In 'perf record --switch-output', disable buildid
2360 * generation by default to reduce data file switching
2361 * overhead. Still generate buildid if they are required
2362 * explicitly using
2363 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002364 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002365 * --no-no-buildid-cache
2366 *
2367 * Following code equals to:
2368 *
2369 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2370 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2371 * disable_buildid_cache();
2372 */
2373 bool disable = true;
2374
2375 if (rec->no_buildid_set && !rec->no_buildid)
2376 disable = false;
2377 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2378 disable = false;
2379 if (disable) {
2380 rec->no_buildid = true;
2381 rec->no_buildid_cache = true;
2382 disable_buildid_cache();
2383 }
2384 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002385
Wang Nan4ea648a2016-07-14 08:34:47 +00002386 if (record.opts.overwrite)
2387 record.opts.tail_synthesize = true;
2388
Jiri Olsa6484d2f2019-07-21 13:24:28 +02002389 if (rec->evlist->core.nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002390 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002391 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002392 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002393 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002394
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002395 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2396 rec->opts.no_inherit = true;
2397
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002398 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002399 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002400 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002401 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002402 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002403
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002404 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002405 if (err) {
2406 int saved_errno = errno;
2407
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002408 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002409 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002410
2411 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002412 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002413 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002414
Mengting Zhangca800062017-12-13 15:01:53 +08002415 /* Enable ignoring missing threads when -u/-p option is defined. */
2416 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002417
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002418 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002419 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002420 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002421
Adrian Hunteref149c22015-04-09 18:53:45 +03002422 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2423 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002424 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002425
Namhyung Kim61566812016-01-11 22:37:09 +09002426 /*
2427 * We take all buildids when the file contains
2428 * AUX area tracing data because we do not decode the
2429 * trace because it would take too long.
2430 */
2431 if (rec->opts.full_auxtrace)
2432 rec->buildid_all = true;
2433
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002434 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002435 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002436 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002437 }
2438
Alexey Budankov93f20c02018-11-06 12:07:19 +03002439 if (rec->opts.nr_cblocks > nr_cblocks_max)
2440 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002441 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002442
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002443 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002444 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002445
Alexey Budankov51255a82019-03-18 20:42:19 +03002446 if (rec->opts.comp_level > comp_level_max)
2447 rec->opts.comp_level = comp_level_max;
2448 pr_debug("comp level: %d\n", rec->opts.comp_level);
2449
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002450 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002451out:
Jiri Olsac12995a2019-07-21 13:23:56 +02002452 evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002453 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002454 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002455 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002456}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002457
2458static void snapshot_sig_handler(int sig __maybe_unused)
2459{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002460 struct record *rec = &record;
2461
Wang Nan5f9cf592016-04-20 18:59:49 +00002462 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2463 trigger_hit(&auxtrace_snapshot_trigger);
2464 auxtrace_record__snapshot_started = 1;
2465 if (auxtrace_record__snapshot_start(record.itr))
2466 trigger_error(&auxtrace_snapshot_trigger);
2467 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002468
Jiri Olsadc0c6122017-01-09 10:51:58 +01002469 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002470 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002471}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002472
2473static void alarm_sig_handler(int sig __maybe_unused)
2474{
2475 struct record *rec = &record;
2476
2477 if (switch_output_time(rec))
2478 trigger_hit(&switch_output_trigger);
2479}