blob: 7ab3110b40351cf7af3d526ca9667d4bdf206766 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020011#include "util/build-id.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060012#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020013#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090014#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020015
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030016#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030017#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020018#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020019#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020020#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020021#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020022#include "util/debug.h"
Arnaldo Carvalho de Meloe0fcfb02019-09-23 12:20:38 -030023#include "util/mmap.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030024#include "util/target.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020025#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020026#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020027#include "util/symbol.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030028#include "util/record.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000037#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000038#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000039#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000040#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030041#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloea49e012019-09-18 11:36:13 -030042#include "util/synthetic-events.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030043#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030044#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080045#include "util/bpf-event.h"
Wang Nand8871ea2016-02-26 09:32:06 +000046#include "asm/bug.h"
Arnaldo Carvalho de Meloc1a604d2019-08-29 15:20:59 -030047#include "perf.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020048
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030049#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030050#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030051#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030052#include <poll.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020053#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020054#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030055#include <signal.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030056#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030057#include <sys/wait.h>
Adrian Huntereeb399b2019-10-04 11:31:21 +030058#include <sys/types.h>
59#include <sys/stat.h>
60#include <fcntl.h>
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +053061#include <linux/err.h>
Arnaldo Carvalho de Melo8520a982019-08-29 16:18:59 -030062#include <linux/string.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030063#include <linux/time64.h>
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030064#include <linux/zalloc.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030065
Jiri Olsa1b43b702017-01-09 10:51:56 +010066struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010067 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010068 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010069 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010070 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010071 const char *str;
72 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070073 char **filenames;
74 int num_files;
75 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010076};
77
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030078struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020079 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030080 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020081 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010082 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030083 struct auxtrace_record *itr;
Jiri Olsa63503db2019-07-21 13:23:52 +020084 struct evlist *evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020085 struct perf_session *session;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020086 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020087 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000088 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020089 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000090 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090091 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000092 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +080093 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +010094 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -070095 unsigned long long samples;
Alexey Budankov9d2ed642019-01-22 20:47:43 +030096 cpu_set_t affinity_mask;
Jiwei Sun6d575812019-10-22 16:09:01 +080097 unsigned long output_max_size; /* = 0: unlimited */
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020098};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020099
Jiwei Sun6d575812019-10-22 16:09:01 +0800100static volatile int done;
101
Jiri Olsadc0c6122017-01-09 10:51:58 +0100102static volatile int auxtrace_record__snapshot_started;
103static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
104static DEFINE_TRIGGER(switch_output_trigger);
105
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300106static const char *affinity_tags[PERF_AFFINITY_MAX] = {
107 "SYS", "NODE", "CPU"
108};
109
Jiri Olsadc0c6122017-01-09 10:51:58 +0100110static bool switch_output_signal(struct record *rec)
111{
112 return rec->switch_output.signal &&
113 trigger_is_ready(&switch_output_trigger);
114}
115
116static bool switch_output_size(struct record *rec)
117{
118 return rec->switch_output.size &&
119 trigger_is_ready(&switch_output_trigger) &&
120 (rec->bytes_written >= rec->switch_output.size);
121}
122
Jiri Olsabfacbe32017-01-09 10:52:00 +0100123static bool switch_output_time(struct record *rec)
124{
125 return rec->switch_output.time &&
126 trigger_is_ready(&switch_output_trigger);
127}
128
Jiwei Sun6d575812019-10-22 16:09:01 +0800129static bool record__output_max_size_exceeded(struct record *rec)
130{
131 return rec->output_max_size &&
132 (rec->bytes_written >= rec->output_max_size);
133}
134
Jiri Olsaa5830532019-07-27 20:30:53 +0200135static int record__write(struct record *rec, struct mmap *map __maybe_unused,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200136 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200137{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200138 struct perf_data_file *file = &rec->session->data->file;
139
140 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100141 pr_err("failed to write perf data, error: %m\n");
142 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200143 }
David Ahern8d3eca22012-08-26 12:24:47 -0600144
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300145 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100146
Jiwei Sun6d575812019-10-22 16:09:01 +0800147 if (record__output_max_size_exceeded(rec) && !done) {
148 fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
149 " stopping session ]\n",
150 rec->bytes_written >> 10);
151 done = 1;
152 }
153
Jiri Olsadc0c6122017-01-09 10:51:58 +0100154 if (switch_output_size(rec))
155 trigger_hit(&switch_output_trigger);
156
David Ahern8d3eca22012-08-26 12:24:47 -0600157 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200158}
159
Alexey Budankovef781122019-03-18 20:44:12 +0300160static int record__aio_enabled(struct record *rec);
161static int record__comp_enabled(struct record *rec);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300162static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
163 void *src, size_t src_size);
164
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300165#ifdef HAVE_AIO_SUPPORT
166static int record__aio_write(struct aiocb *cblock, int trace_fd,
167 void *buf, size_t size, off_t off)
168{
169 int rc;
170
171 cblock->aio_fildes = trace_fd;
172 cblock->aio_buf = buf;
173 cblock->aio_nbytes = size;
174 cblock->aio_offset = off;
175 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
176
177 do {
178 rc = aio_write(cblock);
179 if (rc == 0) {
180 break;
181 } else if (errno != EAGAIN) {
182 cblock->aio_fildes = -1;
183 pr_err("failed to queue perf data, error: %m\n");
184 break;
185 }
186 } while (1);
187
188 return rc;
189}
190
Jiri Olsaa5830532019-07-27 20:30:53 +0200191static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300192{
193 void *rem_buf;
194 off_t rem_off;
195 size_t rem_size;
196 int rc, aio_errno;
197 ssize_t aio_ret, written;
198
199 aio_errno = aio_error(cblock);
200 if (aio_errno == EINPROGRESS)
201 return 0;
202
203 written = aio_ret = aio_return(cblock);
204 if (aio_ret < 0) {
205 if (aio_errno != EINTR)
206 pr_err("failed to write perf data, error: %m\n");
207 written = 0;
208 }
209
210 rem_size = cblock->aio_nbytes - written;
211
212 if (rem_size == 0) {
213 cblock->aio_fildes = -1;
214 /*
Alexey Budankovef781122019-03-18 20:44:12 +0300215 * md->refcount is incremented in record__aio_pushfn() for
216 * every aio write request started in record__aio_push() so
217 * decrement it because the request is now complete.
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300218 */
Jiri Olsa80e53d12019-10-07 14:53:15 +0200219 perf_mmap__put(&md->core);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300220 rc = 1;
221 } else {
222 /*
223 * aio write request may require restart with the
224 * reminder if the kernel didn't write whole
225 * chunk at once.
226 */
227 rem_off = cblock->aio_offset + written;
228 rem_buf = (void *)(cblock->aio_buf + written);
229 record__aio_write(cblock, cblock->aio_fildes,
230 rem_buf, rem_size, rem_off);
231 rc = 0;
232 }
233
234 return rc;
235}
236
Jiri Olsaa5830532019-07-27 20:30:53 +0200237static int record__aio_sync(struct mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300238{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300239 struct aiocb **aiocb = md->aio.aiocb;
240 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300241 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300242 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300243
244 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300245 do_suspend = 0;
246 for (i = 0; i < md->aio.nr_cblocks; ++i) {
247 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
248 if (sync_all)
249 aiocb[i] = NULL;
250 else
251 return i;
252 } else {
253 /*
254 * Started aio write is not complete yet
255 * so it has to be waited before the
256 * next allocation.
257 */
258 aiocb[i] = &cblocks[i];
259 do_suspend = 1;
260 }
261 }
262 if (!do_suspend)
263 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300264
Alexey Budankov93f20c02018-11-06 12:07:19 +0300265 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300266 if (!(errno == EAGAIN || errno == EINTR))
267 pr_err("failed to sync perf data, error: %m\n");
268 }
269 } while (1);
270}
271
Alexey Budankovef781122019-03-18 20:44:12 +0300272struct record_aio {
273 struct record *rec;
274 void *data;
275 size_t size;
276};
277
Jiri Olsaa5830532019-07-27 20:30:53 +0200278static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300279{
Alexey Budankovef781122019-03-18 20:44:12 +0300280 struct record_aio *aio = to;
281
282 /*
Jiri Olsa547740f2019-07-27 22:07:44 +0200283 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
Alexey Budankovef781122019-03-18 20:44:12 +0300284 * to release space in the kernel buffer as fast as possible, calling
285 * perf_mmap__consume() from perf_mmap__push() function.
286 *
287 * That lets the kernel to proceed with storing more profiling data into
288 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
289 *
290 * Coping can be done in two steps in case the chunk of profiling data
291 * crosses the upper bound of the kernel buffer. In this case we first move
292 * part of data from map->start till the upper bound and then the reminder
293 * from the beginning of the kernel buffer till the end of the data chunk.
294 */
295
296 if (record__comp_enabled(aio->rec)) {
297 size = zstd_compress(aio->rec->session, aio->data + aio->size,
Jiri Olsabf59b302019-10-07 14:53:11 +0200298 mmap__mmap_len(map) - aio->size,
Alexey Budankovef781122019-03-18 20:44:12 +0300299 buf, size);
300 } else {
301 memcpy(aio->data + aio->size, buf, size);
302 }
303
304 if (!aio->size) {
305 /*
306 * Increment map->refcount to guard map->aio.data[] buffer
307 * from premature deallocation because map object can be
308 * released earlier than aio write request started on
309 * map->aio.data[] buffer is complete.
310 *
311 * perf_mmap__put() is done at record__aio_complete()
312 * after started aio request completion or at record__aio_push()
313 * if the request failed to start.
314 */
Jiri Olsae75710f2019-10-07 14:53:13 +0200315 perf_mmap__get(&map->core);
Alexey Budankovef781122019-03-18 20:44:12 +0300316 }
317
318 aio->size += size;
319
320 return size;
321}
322
Jiri Olsaa5830532019-07-27 20:30:53 +0200323static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
Alexey Budankovef781122019-03-18 20:44:12 +0300324{
325 int ret, idx;
326 int trace_fd = rec->session->data->file.fd;
327 struct record_aio aio = { .rec = rec, .size = 0 };
328
329 /*
330 * Call record__aio_sync() to wait till map->aio.data[] buffer
331 * becomes available after previous aio write operation.
332 */
333
334 idx = record__aio_sync(map, false);
335 aio.data = map->aio.data[idx];
336 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
337 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
338 return ret;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300339
340 rec->samples++;
Alexey Budankovef781122019-03-18 20:44:12 +0300341 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300342 if (!ret) {
Alexey Budankovef781122019-03-18 20:44:12 +0300343 *off += aio.size;
344 rec->bytes_written += aio.size;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300345 if (switch_output_size(rec))
346 trigger_hit(&switch_output_trigger);
Alexey Budankovef781122019-03-18 20:44:12 +0300347 } else {
348 /*
349 * Decrement map->refcount incremented in record__aio_pushfn()
350 * back if record__aio_write() operation failed to start, otherwise
351 * map->refcount is decremented in record__aio_complete() after
352 * aio write operation finishes successfully.
353 */
Jiri Olsa80e53d12019-10-07 14:53:15 +0200354 perf_mmap__put(&map->core);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300355 }
356
357 return ret;
358}
359
360static off_t record__aio_get_pos(int trace_fd)
361{
362 return lseek(trace_fd, 0, SEEK_CUR);
363}
364
365static void record__aio_set_pos(int trace_fd, off_t pos)
366{
367 lseek(trace_fd, pos, SEEK_SET);
368}
369
370static void record__aio_mmap_read_sync(struct record *rec)
371{
372 int i;
Jiri Olsa63503db2019-07-21 13:23:52 +0200373 struct evlist *evlist = rec->evlist;
Jiri Olsaa5830532019-07-27 20:30:53 +0200374 struct mmap *maps = evlist->mmap;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300375
Alexey Budankovef781122019-03-18 20:44:12 +0300376 if (!record__aio_enabled(rec))
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300377 return;
378
Jiri Olsac976ee12019-07-30 13:04:59 +0200379 for (i = 0; i < evlist->core.nr_mmaps; i++) {
Jiri Olsaa5830532019-07-27 20:30:53 +0200380 struct mmap *map = &maps[i];
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300381
Jiri Olsa547740f2019-07-27 22:07:44 +0200382 if (map->core.base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300383 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300384 }
385}
386
387static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300388static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300389
390static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300391 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300392 int unset)
393{
394 struct record_opts *opts = (struct record_opts *)opt->value;
395
Alexey Budankov93f20c02018-11-06 12:07:19 +0300396 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300397 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300398 } else {
399 if (str)
400 opts->nr_cblocks = strtol(str, NULL, 0);
401 if (!opts->nr_cblocks)
402 opts->nr_cblocks = nr_cblocks_default;
403 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300404
405 return 0;
406}
407#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300408static int nr_cblocks_max = 0;
409
Jiri Olsaa5830532019-07-27 20:30:53 +0200410static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
Alexey Budankovef781122019-03-18 20:44:12 +0300411 off_t *off __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300412{
413 return -1;
414}
415
416static off_t record__aio_get_pos(int trace_fd __maybe_unused)
417{
418 return -1;
419}
420
421static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
422{
423}
424
425static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
426{
427}
428#endif
429
430static int record__aio_enabled(struct record *rec)
431{
432 return rec->opts.nr_cblocks > 0;
433}
434
Alexey Budankov470530b2019-03-18 20:40:26 +0300435#define MMAP_FLUSH_DEFAULT 1
436static int record__mmap_flush_parse(const struct option *opt,
437 const char *str,
438 int unset)
439{
440 int flush_max;
441 struct record_opts *opts = (struct record_opts *)opt->value;
442 static struct parse_tag tags[] = {
443 { .tag = 'B', .mult = 1 },
444 { .tag = 'K', .mult = 1 << 10 },
445 { .tag = 'M', .mult = 1 << 20 },
446 { .tag = 'G', .mult = 1 << 30 },
447 { .tag = 0 },
448 };
449
450 if (unset)
451 return 0;
452
453 if (str) {
454 opts->mmap_flush = parse_tag_value(str, tags);
455 if (opts->mmap_flush == (int)-1)
456 opts->mmap_flush = strtol(str, NULL, 0);
457 }
458
459 if (!opts->mmap_flush)
460 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
461
Jiri Olsa9521b5f2019-07-28 12:45:35 +0200462 flush_max = evlist__mmap_size(opts->mmap_pages);
Alexey Budankov470530b2019-03-18 20:40:26 +0300463 flush_max /= 4;
464 if (opts->mmap_flush > flush_max)
465 opts->mmap_flush = flush_max;
466
467 return 0;
468}
469
Alexey Budankov504c1ad2019-03-18 20:44:42 +0300470#ifdef HAVE_ZSTD_SUPPORT
471static unsigned int comp_level_default = 1;
472
473static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
474{
475 struct record_opts *opts = opt->value;
476
477 if (unset) {
478 opts->comp_level = 0;
479 } else {
480 if (str)
481 opts->comp_level = strtol(str, NULL, 0);
482 if (!opts->comp_level)
483 opts->comp_level = comp_level_default;
484 }
485
486 return 0;
487}
488#endif
Alexey Budankov51255a82019-03-18 20:42:19 +0300489static unsigned int comp_level_max = 22;
490
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300491static int record__comp_enabled(struct record *rec)
492{
493 return rec->opts.comp_level > 0;
494}
495
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200496static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200497 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300498 struct perf_sample *sample __maybe_unused,
499 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200500{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300501 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200502 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200503}
504
Jiri Olsaa5830532019-07-27 20:30:53 +0200505static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300506{
507 struct record *rec = to;
508
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300509 if (record__comp_enabled(rec)) {
Jiri Olsabf59b302019-10-07 14:53:11 +0200510 size = zstd_compress(rec->session, map->data, mmap__mmap_len(map), bf, size);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300511 bf = map->data;
512 }
513
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300514 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200515 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300516}
517
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300518static volatile int signr = -1;
519static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000520
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300521static void sig_handler(int sig)
522{
523 if (sig == SIGCHLD)
524 child_finished = 1;
525 else
526 signr = sig;
527
528 done = 1;
529}
530
Wang Nana0748652016-11-26 07:03:28 +0000531static void sigsegv_handler(int sig)
532{
533 perf_hooks__recover();
534 sighandler_dump_stack(sig);
535}
536
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300537static void record__sig_exit(void)
538{
539 if (signr == -1)
540 return;
541
542 signal(signr, SIG_DFL);
543 raise(signr);
544}
545
Adrian Huntere31f0d02015-04-30 17:37:27 +0300546#ifdef HAVE_AUXTRACE_SUPPORT
547
Adrian Hunteref149c22015-04-09 18:53:45 +0300548static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaa5830532019-07-27 20:30:53 +0200549 struct mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300550 union perf_event *event, void *data1,
551 size_t len1, void *data2, size_t len2)
552{
553 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100554 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300555 size_t padding;
556 u8 pad[8] = {0};
557
Adrian Hunter46e201e2019-10-04 11:31:20 +0300558 if (!perf_data__is_pipe(data) && perf_data__is_single_file(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300559 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100560 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300561 int err;
562
563 file_offset = lseek(fd, 0, SEEK_CUR);
564 if (file_offset == -1)
565 return -1;
566 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
567 event, file_offset);
568 if (err)
569 return err;
570 }
571
Adrian Hunteref149c22015-04-09 18:53:45 +0300572 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
573 padding = (len1 + len2) & 7;
574 if (padding)
575 padding = 8 - padding;
576
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200577 record__write(rec, map, event, event->header.size);
578 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300579 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200580 record__write(rec, map, data2, len2);
581 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300582
583 return 0;
584}
585
586static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsaa5830532019-07-27 20:30:53 +0200587 struct mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300588{
589 int ret;
590
Jiri Olsae035f4c2018-09-13 14:54:05 +0200591 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300592 record__process_auxtrace);
593 if (ret < 0)
594 return ret;
595
596 if (ret)
597 rec->samples++;
598
599 return 0;
600}
601
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300602static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsaa5830532019-07-27 20:30:53 +0200603 struct mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300604{
605 int ret;
606
Jiri Olsae035f4c2018-09-13 14:54:05 +0200607 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300608 record__process_auxtrace,
609 rec->opts.auxtrace_snapshot_size);
610 if (ret < 0)
611 return ret;
612
613 if (ret)
614 rec->samples++;
615
616 return 0;
617}
618
619static int record__auxtrace_read_snapshot_all(struct record *rec)
620{
621 int i;
622 int rc = 0;
623
Jiri Olsac976ee12019-07-30 13:04:59 +0200624 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
Jiri Olsaa5830532019-07-27 20:30:53 +0200625 struct mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300626
Jiri Olsae035f4c2018-09-13 14:54:05 +0200627 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300628 continue;
629
Jiri Olsae035f4c2018-09-13 14:54:05 +0200630 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300631 rc = -1;
632 goto out;
633 }
634 }
635out:
636 return rc;
637}
638
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300639static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300640{
641 pr_debug("Recording AUX area tracing snapshot\n");
642 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000643 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300644 } else {
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300645 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
Wang Nan5f9cf592016-04-20 18:59:49 +0000646 trigger_error(&auxtrace_snapshot_trigger);
647 else
648 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300649 }
650}
651
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300652static int record__auxtrace_snapshot_exit(struct record *rec)
653{
654 if (trigger_is_error(&auxtrace_snapshot_trigger))
655 return 0;
656
657 if (!auxtrace_record__snapshot_started &&
658 auxtrace_record__snapshot_start(rec->itr))
659 return -1;
660
661 record__read_auxtrace_snapshot(rec, true);
662 if (trigger_is_error(&auxtrace_snapshot_trigger))
663 return -1;
664
665 return 0;
666}
667
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200668static int record__auxtrace_init(struct record *rec)
669{
670 int err;
671
672 if (!rec->itr) {
673 rec->itr = auxtrace_record__init(rec->evlist, &err);
674 if (err)
675 return err;
676 }
677
678 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
679 rec->opts.auxtrace_snapshot_opts);
680 if (err)
681 return err;
682
683 return auxtrace_parse_filters(rec->evlist);
684}
685
Adrian Huntere31f0d02015-04-30 17:37:27 +0300686#else
687
688static inline
689int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsaa5830532019-07-27 20:30:53 +0200690 struct mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300691{
692 return 0;
693}
694
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300695static inline
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300696void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
697 bool on_exit __maybe_unused)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300698{
699}
700
701static inline
702int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
703{
704 return 0;
705}
706
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300707static inline
708int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
709{
710 return 0;
711}
712
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200713static int record__auxtrace_init(struct record *rec __maybe_unused)
714{
715 return 0;
716}
717
Adrian Huntere31f0d02015-04-30 17:37:27 +0300718#endif
719
Adrian Huntereeb399b2019-10-04 11:31:21 +0300720static bool record__kcore_readable(struct machine *machine)
721{
722 char kcore[PATH_MAX];
723 int fd;
724
725 scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir);
726
727 fd = open(kcore, O_RDONLY);
728 if (fd < 0)
729 return false;
730
731 close(fd);
732
733 return true;
734}
735
736static int record__kcore_copy(struct machine *machine, struct perf_data *data)
737{
738 char from_dir[PATH_MAX];
739 char kcore_dir[PATH_MAX];
740 int ret;
741
742 snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir);
743
744 ret = perf_data__make_kcore_dir(data, kcore_dir, sizeof(kcore_dir));
745 if (ret)
746 return ret;
747
748 return kcore_copy(from_dir, kcore_dir);
749}
750
Wang Nancda57a82016-06-27 10:24:03 +0000751static int record__mmap_evlist(struct record *rec,
Jiri Olsa63503db2019-07-21 13:23:52 +0200752 struct evlist *evlist)
Wang Nancda57a82016-06-27 10:24:03 +0000753{
754 struct record_opts *opts = &rec->opts;
755 char msg[512];
756
Alexey Budankovf13de662019-01-22 20:50:57 +0300757 if (opts->affinity != PERF_AFFINITY_SYS)
758 cpu__setup_cpunode_map();
759
Jiri Olsa9521b5f2019-07-28 12:45:35 +0200760 if (evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000761 opts->auxtrace_mmap_pages,
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300762 opts->auxtrace_snapshot_mode,
Alexey Budankov470530b2019-03-18 20:40:26 +0300763 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +0300764 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000765 if (errno == EPERM) {
766 pr_err("Permission error mapping pages.\n"
767 "Consider increasing "
768 "/proc/sys/kernel/perf_event_mlock_kb,\n"
769 "or try again with a smaller value of -m/--mmap_pages.\n"
770 "(current value: %u,%u)\n",
771 opts->mmap_pages, opts->auxtrace_mmap_pages);
772 return -errno;
773 } else {
774 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300775 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000776 if (errno)
777 return -errno;
778 else
779 return -EINVAL;
780 }
781 }
782 return 0;
783}
784
785static int record__mmap(struct record *rec)
786{
787 return record__mmap_evlist(rec, rec->evlist);
788}
789
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300790static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200791{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300792 char msg[BUFSIZ];
Jiri Olsa32dcd022019-07-21 13:23:51 +0200793 struct evsel *pos;
Jiri Olsa63503db2019-07-21 13:23:52 +0200794 struct evlist *evlist = rec->evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200795 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300796 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600797 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200798
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300799 /*
800 * For initial_delay we need to add a dummy event so that we can track
801 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
802 * real events, the ones asked by the user.
803 */
804 if (opts->initial_delay) {
805 if (perf_evlist__add_dummy(evlist))
806 return -ENOMEM;
807
Jiri Olsa515dbe42019-09-03 10:39:52 +0200808 pos = evlist__first(evlist);
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300809 pos->tracking = 0;
Jiri Olsa515dbe42019-09-03 10:39:52 +0200810 pos = evlist__last(evlist);
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300811 pos->tracking = 1;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200812 pos->core.attr.enable_on_exec = 1;
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300813 }
814
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300815 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100816
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300817 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200818try_again:
Jiri Olsaaf663bd2019-07-21 13:24:39 +0200819 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300820 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900821 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300822 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300823 goto try_again;
824 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700825 if ((errno == EINVAL || errno == EBADF) &&
826 pos->leader != pos &&
827 pos->weak_group) {
828 pos = perf_evlist__reset_weak_group(evlist, pos);
829 goto try_again;
830 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300831 rc = -errno;
832 perf_evsel__open_strerror(pos, &opts->target,
833 errno, msg, sizeof(msg));
834 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600835 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300836 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800837
838 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800839 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200840
Arnaldo Carvalho de Meloc8b567c2019-09-23 11:07:29 -0300841 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(evlist)) {
842 pr_warning(
843"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
844"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
845"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
846"file is not found in the buildid cache or in the vmlinux path.\n\n"
847"Samples in kernel modules won't be resolved at all.\n\n"
848"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
849"even with a suitable vmlinux or kallsyms file.\n\n");
850 }
851
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300852 if (perf_evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300853 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300854 pos->filter, perf_evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300855 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600856 rc = -1;
857 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100858 }
859
Wang Nancda57a82016-06-27 10:24:03 +0000860 rc = record__mmap(rec);
861 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600862 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200863
Jiri Olsa563aecb2013-06-05 13:35:06 +0200864 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300865 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600866out:
867 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200868}
869
Namhyung Kime3d59112015-01-29 17:06:44 +0900870static int process_sample_event(struct perf_tool *tool,
871 union perf_event *event,
872 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200873 struct evsel *evsel,
Namhyung Kime3d59112015-01-29 17:06:44 +0900874 struct machine *machine)
875{
876 struct record *rec = container_of(tool, struct record, tool);
877
Jin Yao68588ba2017-12-08 21:13:42 +0800878 if (rec->evlist->first_sample_time == 0)
879 rec->evlist->first_sample_time = sample->time;
Namhyung Kime3d59112015-01-29 17:06:44 +0900880
Jin Yao68588ba2017-12-08 21:13:42 +0800881 rec->evlist->last_sample_time = sample->time;
882
883 if (rec->buildid_all)
884 return 0;
885
886 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900887 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
888}
889
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300890static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200891{
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200892 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200893
Jiri Olsa45112e82019-02-21 10:41:29 +0100894 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300895 return 0;
896
Namhyung Kim00dc8652014-11-04 10:14:32 +0900897 /*
898 * During this process, it'll load kernel map and replace the
899 * dso->long_name to a real pathname it found. In this case
900 * we prefer the vmlinux path like
901 * /lib/modules/3.16.4/build/vmlinux
902 *
903 * rather than build-id path (in debug directory).
904 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
905 */
906 symbol_conf.ignore_vmlinux_buildid = true;
907
Namhyung Kim61566812016-01-11 22:37:09 +0900908 /*
909 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +0800910 * so no need to process samples. But if timestamp_boundary is enabled,
911 * it still needs to walk on all samples to get the timestamps of
912 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +0900913 */
Jin Yao68588ba2017-12-08 21:13:42 +0800914 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +0900915 rec->tool.sample = NULL;
916
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300917 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200918}
919
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200920static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800921{
922 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200923 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800924 /*
925 *As for guest kernel when processing subcommand record&report,
926 *we arrange module mmap prior to guest kernel mmap and trigger
927 *a preload dso because default guest module symbols are loaded
928 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
929 *method is used to avoid symbol missing when the first addr is
930 *in module instead of in guest kernel.
931 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200932 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200933 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800934 if (err < 0)
935 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300936 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800937
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800938 /*
939 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
940 * have no _text sometimes.
941 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200942 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200943 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800944 if (err < 0)
945 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300946 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800947}
948
Frederic Weisbecker98402802010-05-02 22:05:29 +0200949static struct perf_event_header finished_round_event = {
950 .size = sizeof(struct perf_event_header),
951 .type = PERF_RECORD_FINISHED_ROUND,
952};
953
Jiri Olsaa5830532019-07-27 20:30:53 +0200954static void record__adjust_affinity(struct record *rec, struct mmap *map)
Alexey Budankovf13de662019-01-22 20:50:57 +0300955{
956 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
957 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
958 CPU_ZERO(&rec->affinity_mask);
959 CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
960 sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
961 }
962}
963
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300964static size_t process_comp_header(void *record, size_t increment)
965{
Jiri Olsa72932372019-08-28 15:57:16 +0200966 struct perf_record_compressed *event = record;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300967 size_t size = sizeof(*event);
968
969 if (increment) {
970 event->header.size += increment;
971 return increment;
972 }
973
974 event->header.type = PERF_RECORD_COMPRESSED;
975 event->header.size = size;
976
977 return size;
978}
979
980static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
981 void *src, size_t src_size)
982{
983 size_t compressed;
Jiri Olsa72932372019-08-28 15:57:16 +0200984 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300985
986 compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
987 max_record_size, process_comp_header);
988
989 session->bytes_transferred += src_size;
990 session->bytes_compressed += compressed;
991
992 return compressed;
993}
994
Jiri Olsa63503db2019-07-21 13:23:52 +0200995static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +0300996 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200997{
Jiri Olsadcabb502014-07-25 16:56:16 +0200998 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200999 int i;
David Ahern8d3eca22012-08-26 12:24:47 -06001000 int rc = 0;
Jiri Olsaa5830532019-07-27 20:30:53 +02001001 struct mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001002 int trace_fd = rec->data.file.fd;
Alexey Budankovef781122019-03-18 20:44:12 +03001003 off_t off = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001004
Wang Nancb216862016-06-27 10:24:04 +00001005 if (!evlist)
1006 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +03001007
Wang Nan0b72d692017-12-04 16:51:07 +00001008 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +00001009 if (!maps)
1010 return 0;
Wang Nancb216862016-06-27 10:24:04 +00001011
Wang Nan0b72d692017-12-04 16:51:07 +00001012 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +00001013 return 0;
1014
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001015 if (record__aio_enabled(rec))
1016 off = record__aio_get_pos(trace_fd);
1017
Jiri Olsac976ee12019-07-30 13:04:59 +02001018 for (i = 0; i < evlist->core.nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +03001019 u64 flush = 0;
Jiri Olsaa5830532019-07-27 20:30:53 +02001020 struct mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +00001021
Jiri Olsa547740f2019-07-27 22:07:44 +02001022 if (map->core.base) {
Alexey Budankovf13de662019-01-22 20:50:57 +03001023 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +03001024 if (synch) {
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001025 flush = map->core.flush;
1026 map->core.flush = 1;
Alexey Budankov470530b2019-03-18 20:40:26 +03001027 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001028 if (!record__aio_enabled(rec)) {
Alexey Budankovef781122019-03-18 20:44:12 +03001029 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +03001030 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001031 map->core.flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001032 rc = -1;
1033 goto out;
1034 }
1035 } else {
Alexey Budankovef781122019-03-18 20:44:12 +03001036 if (record__aio_push(rec, map, &off) < 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001037 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +03001038 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001039 map->core.flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001040 rc = -1;
1041 goto out;
1042 }
David Ahern8d3eca22012-08-26 12:24:47 -06001043 }
Alexey Budankov470530b2019-03-18 20:40:26 +03001044 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001045 map->core.flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -06001046 }
Adrian Hunteref149c22015-04-09 18:53:45 +03001047
Jiri Olsae035f4c2018-09-13 14:54:05 +02001048 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
1049 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +03001050 rc = -1;
1051 goto out;
1052 }
Frederic Weisbecker98402802010-05-02 22:05:29 +02001053 }
1054
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001055 if (record__aio_enabled(rec))
1056 record__aio_set_pos(trace_fd, off);
1057
Jiri Olsadcabb502014-07-25 16:56:16 +02001058 /*
1059 * Mark the round finished in case we wrote
1060 * at least one event.
1061 */
1062 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +02001063 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -06001064
Wang Nan0b72d692017-12-04 16:51:07 +00001065 if (overwrite)
Wang Nan54cc54d2016-07-14 08:34:42 +00001066 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -06001067out:
1068 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001069}
1070
Alexey Budankov470530b2019-03-18 20:40:26 +03001071static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +00001072{
1073 int err;
1074
Alexey Budankov470530b2019-03-18 20:40:26 +03001075 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +00001076 if (err)
1077 return err;
1078
Alexey Budankov470530b2019-03-18 20:40:26 +03001079 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +00001080}
1081
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001082static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -07001083{
David Ahern57706ab2013-11-06 11:41:34 -07001084 struct perf_session *session = rec->session;
1085 int feat;
1086
1087 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1088 perf_header__set_feat(&session->header, feat);
1089
1090 if (rec->no_buildid)
1091 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1092
Jiri Olsace9036a2019-07-21 13:24:23 +02001093 if (!have_tracepoints(&rec->evlist->core.entries))
David Ahern57706ab2013-11-06 11:41:34 -07001094 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1095
1096 if (!rec->opts.branch_stack)
1097 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +03001098
1099 if (!rec->opts.full_auxtrace)
1100 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +01001101
Alexey Budankovcf790512018-10-09 17:36:24 +03001102 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1103 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1104
Jiri Olsa258031c2019-03-08 14:47:39 +01001105 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001106 if (!record__comp_enabled(rec))
1107 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +01001108
Jiri Olsaffa517a2015-10-25 15:51:43 +01001109 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -07001110}
1111
Wang Nane1ab48b2016-02-26 09:32:10 +00001112static void
1113record__finish_output(struct record *rec)
1114{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001115 struct perf_data *data = &rec->data;
1116 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +00001117
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001118 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +00001119 return;
1120
1121 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +01001122 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +00001123
1124 if (!rec->no_buildid) {
1125 process_buildids(rec);
1126
1127 if (rec->buildid_all)
1128 dsos__hit_all(rec->session);
1129 }
1130 perf_session__write_header(rec->session, rec->evlist, fd, true);
1131
1132 return;
1133}
1134
Wang Nan4ea648a2016-07-14 08:34:47 +00001135static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +00001136{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001137 int err;
Jiri Olsa9749b902019-07-21 13:23:50 +02001138 struct perf_thread_map *thread_map;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001139
Wang Nan4ea648a2016-07-14 08:34:47 +00001140 if (rec->opts.tail_synthesize != tail)
1141 return 0;
1142
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001143 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1144 if (thread_map == NULL)
1145 return -1;
1146
1147 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +00001148 process_synthesized_event,
1149 &rec->session->machines.host,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001150 rec->opts.sample_address);
Jiri Olsa7836e522019-07-21 13:24:20 +02001151 perf_thread_map__put(thread_map);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001152 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001153}
1154
Wang Nan4ea648a2016-07-14 08:34:47 +00001155static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001156
Wang Nanecfd7a92016-04-13 08:21:07 +00001157static int
1158record__switch_output(struct record *rec, bool at_exit)
1159{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001160 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +00001161 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -07001162 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +00001163
1164 /* Same Size: "2015122520103046"*/
1165 char timestamp[] = "InvalidTimestamp";
1166
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001167 record__aio_mmap_read_sync(rec);
1168
Wang Nan4ea648a2016-07-14 08:34:47 +00001169 record__synthesize(rec, true);
1170 if (target__none(&rec->opts.target))
1171 record__synthesize_workload(rec, true);
1172
Wang Nanecfd7a92016-04-13 08:21:07 +00001173 rec->samples = 0;
1174 record__finish_output(rec);
1175 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1176 if (err) {
1177 pr_err("Failed to get current timestamp\n");
1178 return -EINVAL;
1179 }
1180
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001181 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001182 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001183 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001184 if (fd >= 0 && !at_exit) {
1185 rec->bytes_written = 0;
1186 rec->session->header.data_size = 0;
1187 }
1188
1189 if (!quiet)
1190 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001191 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001192
Andi Kleen03724b22019-03-14 15:49:55 -07001193 if (rec->switch_output.num_files) {
1194 int n = rec->switch_output.cur_file + 1;
1195
1196 if (n >= rec->switch_output.num_files)
1197 n = 0;
1198 rec->switch_output.cur_file = n;
1199 if (rec->switch_output.filenames[n]) {
1200 remove(rec->switch_output.filenames[n]);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001201 zfree(&rec->switch_output.filenames[n]);
Andi Kleen03724b22019-03-14 15:49:55 -07001202 }
1203 rec->switch_output.filenames[n] = new_filename;
1204 } else {
1205 free(new_filename);
1206 }
1207
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001208 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001209 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001210 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001211
Wang Nanbe7b0c92016-04-20 18:59:54 +00001212 /*
1213 * In 'perf record --switch-output' without -a,
1214 * record__synthesize() in record__switch_output() won't
1215 * generate tracking events because there's no thread_map
1216 * in evlist. Which causes newly created perf.data doesn't
1217 * contain map and comm information.
1218 * Create a fake thread_map and directly call
1219 * perf_event__synthesize_thread_map() for those events.
1220 */
1221 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001222 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001223 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001224 return fd;
1225}
1226
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001227static volatile int workload_exec_errno;
1228
1229/*
1230 * perf_evlist__prepare_workload will send a SIGUSR1
1231 * if the fork fails, since we asked by setting its
1232 * want_signal to true.
1233 */
Namhyung Kim45604712014-05-12 09:47:24 +09001234static void workload_exec_failed_signal(int signo __maybe_unused,
1235 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001236 void *ucontext __maybe_unused)
1237{
1238 workload_exec_errno = info->si_value.sival_int;
1239 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001240 child_finished = 1;
1241}
1242
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001243static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001244static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001245
Wang Nanee667f92016-06-27 10:24:05 +00001246static const struct perf_event_mmap_page *
Jiri Olsa63503db2019-07-21 13:23:52 +02001247perf_evlist__pick_pc(struct evlist *evlist)
Wang Nanee667f92016-06-27 10:24:05 +00001248{
Wang Nanb2cb6152016-07-14 08:34:39 +00001249 if (evlist) {
Jiri Olsa547740f2019-07-27 22:07:44 +02001250 if (evlist->mmap && evlist->mmap[0].core.base)
1251 return evlist->mmap[0].core.base;
1252 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
1253 return evlist->overwrite_mmap[0].core.base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001254 }
Wang Nanee667f92016-06-27 10:24:05 +00001255 return NULL;
1256}
1257
Wang Nanc45628b2016-05-24 02:28:59 +00001258static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1259{
Wang Nanee667f92016-06-27 10:24:05 +00001260 const struct perf_event_mmap_page *pc;
1261
1262 pc = perf_evlist__pick_pc(rec->evlist);
1263 if (pc)
1264 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001265 return NULL;
1266}
1267
Wang Nan4ea648a2016-07-14 08:34:47 +00001268static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001269{
1270 struct perf_session *session = rec->session;
1271 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001272 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001273 struct record_opts *opts = &rec->opts;
1274 struct perf_tool *tool = &rec->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001275 int fd = perf_data__fd(data);
Wang Nanc45c86e2016-02-26 09:32:07 +00001276 int err = 0;
1277
Wang Nan4ea648a2016-07-14 08:34:47 +00001278 if (rec->opts.tail_synthesize != tail)
1279 return 0;
1280
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001281 if (data->is_pipe) {
Jiri Olsaa2015512018-03-14 10:22:04 +01001282 /*
1283 * We need to synthesize events first, because some
1284 * features works on top of them (on report side).
1285 */
Jiri Olsa318ec182018-08-30 08:32:15 +02001286 err = perf_event__synthesize_attrs(tool, rec->evlist,
Wang Nanc45c86e2016-02-26 09:32:07 +00001287 process_synthesized_event);
1288 if (err < 0) {
1289 pr_err("Couldn't synthesize attrs.\n");
1290 goto out;
1291 }
1292
Jiri Olsaa2015512018-03-14 10:22:04 +01001293 err = perf_event__synthesize_features(tool, session, rec->evlist,
1294 process_synthesized_event);
1295 if (err < 0) {
1296 pr_err("Couldn't synthesize features.\n");
1297 return err;
1298 }
1299
Jiri Olsace9036a2019-07-21 13:24:23 +02001300 if (have_tracepoints(&rec->evlist->core.entries)) {
Wang Nanc45c86e2016-02-26 09:32:07 +00001301 /*
1302 * FIXME err <= 0 here actually means that
1303 * there were no tracepoints so its not really
1304 * an error, just that we don't need to
1305 * synthesize anything. We really have to
1306 * return this more properly and also
1307 * propagate errors that now are calling die()
1308 */
1309 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1310 process_synthesized_event);
1311 if (err <= 0) {
1312 pr_err("Couldn't record tracing data.\n");
1313 goto out;
1314 }
1315 rec->bytes_written += err;
1316 }
1317 }
1318
Wang Nanc45628b2016-05-24 02:28:59 +00001319 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001320 process_synthesized_event, machine);
1321 if (err)
1322 goto out;
1323
Wang Nanc45c86e2016-02-26 09:32:07 +00001324 if (rec->opts.full_auxtrace) {
1325 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1326 session, process_synthesized_event);
1327 if (err)
1328 goto out;
1329 }
1330
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001331 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1332 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1333 machine);
1334 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1335 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1336 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001337
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001338 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1339 machine);
1340 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1341 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1342 "Check /proc/modules permission or run as root.\n");
1343 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001344
1345 if (perf_guest) {
1346 machines__process_guests(&session->machines,
1347 perf_event__synthesize_guest_os, tool);
1348 }
1349
Andi Kleenbfd8f722017-11-17 13:42:58 -08001350 err = perf_event__synthesize_extra_attr(&rec->tool,
1351 rec->evlist,
1352 process_synthesized_event,
1353 data->is_pipe);
1354 if (err)
1355 goto out;
1356
Jiri Olsa03617c22019-07-21 13:24:42 +02001357 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
Andi Kleen373565d2017-11-17 13:42:59 -08001358 process_synthesized_event,
1359 NULL);
1360 if (err < 0) {
1361 pr_err("Couldn't synthesize thread map.\n");
1362 return err;
1363 }
1364
Jiri Olsaf72f9012019-07-21 13:24:41 +02001365 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
Andi Kleen373565d2017-11-17 13:42:59 -08001366 process_synthesized_event, NULL);
1367 if (err < 0) {
1368 pr_err("Couldn't synthesize cpu map.\n");
1369 return err;
1370 }
1371
Song Liue5416952019-03-11 22:30:41 -07001372 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001373 machine, opts);
1374 if (err < 0)
1375 pr_warning("Couldn't synthesize bpf events.\n");
1376
Jiri Olsa03617c22019-07-21 13:24:42 +02001377 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
Wang Nanc45c86e2016-02-26 09:32:07 +00001378 process_synthesized_event, opts->sample_address,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001379 1);
Wang Nanc45c86e2016-02-26 09:32:07 +00001380out:
1381 return err;
1382}
1383
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001384static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001385{
David Ahern57706ab2013-11-06 11:41:34 -07001386 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001387 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001388 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001389 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001390 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001391 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001392 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001393 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001394 bool disabled = false, draining = false;
Jiri Olsa63503db2019-07-21 13:23:52 +02001395 struct evlist *sb_evlist = NULL;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001396 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001397 float ratio = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001398
Namhyung Kim45604712014-05-12 09:47:24 +09001399 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001400 signal(SIGCHLD, sig_handler);
1401 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001402 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001403 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001404
Hari Bathinif3b36142017-03-08 02:11:43 +05301405 if (rec->opts.record_namespaces)
1406 tool->namespace_events = true;
1407
Jiri Olsadc0c6122017-01-09 10:51:58 +01001408 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001409 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001410 if (rec->opts.auxtrace_snapshot_mode)
1411 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001412 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001413 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001414 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001415 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001416 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001417
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001418 session = perf_session__new(data, false, tool);
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05301419 if (IS_ERR(session)) {
Adrien BAKffa91882014-04-18 11:00:43 +09001420 pr_err("Perf session creation failed.\n");
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05301421 return PTR_ERR(session);
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001422 }
1423
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001424 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001425 rec->session = session;
1426
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001427 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1428 pr_err("Compression initialization failed.\n");
1429 return -1;
1430 }
1431
1432 session->header.env.comp_type = PERF_COMP_ZSTD;
1433 session->header.env.comp_level = rec->opts.comp_level;
1434
Adrian Huntereeb399b2019-10-04 11:31:21 +03001435 if (rec->opts.kcore &&
1436 !record__kcore_readable(&session->machines.host)) {
1437 pr_err("ERROR: kcore is not readable.\n");
1438 return -1;
1439 }
1440
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001441 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001442
Alexey Budankovcf790512018-10-09 17:36:24 +03001443 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1444 session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
1445
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001446 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001447 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001448 argv, data->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001449 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001450 if (err < 0) {
1451 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001452 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001453 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001454 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001455 }
1456
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001457 /*
1458 * If we have just single event and are sending data
1459 * through pipe, we need to force the ids allocation,
1460 * because we synthesize event name through the pipe
1461 * and need the id for that.
1462 */
Jiri Olsa6484d2f2019-07-21 13:24:28 +02001463 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001464 rec->opts.sample_id = true;
1465
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001466 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001467 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001468 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001469 }
Jiri Olsaf6fa4372019-08-06 15:14:05 +02001470 session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001471
Adrian Huntereeb399b2019-10-04 11:31:21 +03001472 if (rec->opts.kcore) {
1473 err = record__kcore_copy(&session->machines.host, data);
1474 if (err) {
1475 pr_err("ERROR: Failed to copy kcore\n");
1476 goto out_child;
1477 }
1478 }
1479
Wang Nan8690a2a2016-02-22 09:10:32 +00001480 err = bpf__apply_obj_config();
1481 if (err) {
1482 char errbuf[BUFSIZ];
1483
1484 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1485 pr_err("ERROR: Apply config to BPF failed: %s\n",
1486 errbuf);
1487 goto out_child;
1488 }
1489
Adrian Huntercca84822015-08-19 17:29:21 +03001490 /*
1491 * Normally perf_session__new would do this, but it doesn't have the
1492 * evlist.
1493 */
1494 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1495 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1496 rec->tool.ordered_events = false;
1497 }
1498
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001499 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001500 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1501
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001502 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001503 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001504 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001505 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001506 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001507 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001508 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001509 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001510 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001511
David Ahernd3665492012-02-06 15:27:52 -07001512 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001513 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001514 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001515 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001516 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001517 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001518 }
1519
Song Liud56354d2019-03-11 22:30:51 -07001520 if (!opts->no_bpf_event)
1521 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1522
Song Liu657ee552019-03-11 22:30:50 -07001523 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1524 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1525 opts->no_bpf_event = true;
1526 }
1527
Wang Nan4ea648a2016-07-14 08:34:47 +00001528 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001529 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001530 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001531
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001532 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001533 struct sched_param param;
1534
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001535 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001536 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001537 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001538 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001539 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001540 }
1541 }
1542
Jiri Olsa774cb492012-11-12 18:34:01 +01001543 /*
1544 * When perf is starting the traced process, all the events
1545 * (apart from group members) have enable_on_exec=1 set,
1546 * so don't spoil it by prematurely enabling them.
1547 */
Andi Kleen6619a532014-01-11 13:38:27 -08001548 if (!target__none(&opts->target) && !opts->initial_delay)
Jiri Olsa1c87f162019-07-21 13:24:08 +02001549 evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001550
Peter Zijlstra856e9662009-12-16 17:55:55 +01001551 /*
1552 * Let the child rip
1553 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001554 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001555 struct machine *machine = &session->machines.host;
Namhyung Kime5bed5642015-09-30 10:45:24 +09001556 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301557 pid_t tgid;
Namhyung Kime5bed5642015-09-30 10:45:24 +09001558
1559 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1560 if (event == NULL) {
1561 err = -ENOMEM;
1562 goto out_child;
1563 }
1564
Namhyung Kime803cf92015-09-22 09:24:55 +09001565 /*
1566 * Some H/W events are generated before COMM event
1567 * which is emitted during exec(), so perf script
1568 * cannot see a correct process name for those events.
1569 * Synthesize COMM event to prevent it.
1570 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301571 tgid = perf_event__synthesize_comm(tool, event,
1572 rec->evlist->workload.pid,
1573 process_synthesized_event,
1574 machine);
1575 free(event);
1576
1577 if (tgid == -1)
1578 goto out_child;
1579
1580 event = malloc(sizeof(event->namespaces) +
1581 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1582 machine->id_hdr_size);
1583 if (event == NULL) {
1584 err = -ENOMEM;
1585 goto out_child;
1586 }
1587
1588 /*
1589 * Synthesize NAMESPACES event for the command specified.
1590 */
1591 perf_event__synthesize_namespaces(tool, event,
1592 rec->evlist->workload.pid,
1593 tgid, process_synthesized_event,
1594 machine);
Namhyung Kime5bed5642015-09-30 10:45:24 +09001595 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001596
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001597 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001598 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001599
Andi Kleen6619a532014-01-11 13:38:27 -08001600 if (opts->initial_delay) {
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -03001601 usleep(opts->initial_delay * USEC_PER_MSEC);
Jiri Olsa1c87f162019-07-21 13:24:08 +02001602 evlist__enable(rec->evlist);
Andi Kleen6619a532014-01-11 13:38:27 -08001603 }
1604
Wang Nan5f9cf592016-04-20 18:59:49 +00001605 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001606 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001607 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001608 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001609 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001610
Wang Nan057374642016-07-14 08:34:43 +00001611 /*
1612 * rec->evlist->bkw_mmap_state is possible to be
1613 * BKW_MMAP_EMPTY here: when done == true and
1614 * hits != rec->samples in previous round.
1615 *
1616 * perf_evlist__toggle_bkw_mmap ensure we never
1617 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1618 */
1619 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1620 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1621
Alexey Budankov470530b2019-03-18 20:40:26 +03001622 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001623 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001624 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001625 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001626 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001627 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001628
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001629 if (auxtrace_record__snapshot_started) {
1630 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001631 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001632 record__read_auxtrace_snapshot(rec, false);
Wang Nan5f9cf592016-04-20 18:59:49 +00001633 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001634 pr_err("AUX area tracing snapshot failed\n");
1635 err = -1;
1636 goto out_child;
1637 }
1638 }
1639
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001640 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001641 /*
1642 * If switch_output_trigger is hit, the data in
1643 * overwritable ring buffer should have been collected,
1644 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1645 *
1646 * If SIGUSR2 raise after or during record__mmap_read_all(),
1647 * record__mmap_read_all() didn't collect data from
1648 * overwritable ring buffer. Read again.
1649 */
1650 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1651 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001652 trigger_ready(&switch_output_trigger);
1653
Wang Nan057374642016-07-14 08:34:43 +00001654 /*
1655 * Reenable events in overwrite ring buffer after
1656 * record__mmap_read_all(): we should have collected
1657 * data from it.
1658 */
1659 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1660
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001661 if (!quiet)
1662 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1663 waking);
1664 waking = 0;
1665 fd = record__switch_output(rec, false);
1666 if (fd < 0) {
1667 pr_err("Failed to switch to new file\n");
1668 trigger_error(&switch_output_trigger);
1669 err = fd;
1670 goto out_child;
1671 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001672
1673 /* re-arm the alarm */
1674 if (rec->switch_output.time)
1675 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001676 }
1677
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001678 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001679 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001680 break;
Jiri Olsa80ab2982019-08-31 22:48:33 +02001681 err = evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001682 /*
1683 * Propagate error, only if there's any. Ignore positive
1684 * number of returned events and interrupt error.
1685 */
1686 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001687 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001688 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001689
Jiri Olsaf4009e72019-08-16 16:00:45 +02001690 if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001691 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001692 }
1693
Jiri Olsa774cb492012-11-12 18:34:01 +01001694 /*
1695 * When perf is starting the traced process, at the end events
1696 * die with the process and we wait for that. Thus no need to
1697 * disable events in this case.
1698 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001699 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001700 trigger_off(&auxtrace_snapshot_trigger);
Jiri Olsae74676d2019-07-21 13:24:09 +02001701 evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001702 disabled = true;
1703 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001704 }
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001705
Wang Nan5f9cf592016-04-20 18:59:49 +00001706 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001707 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001708
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001709 if (opts->auxtrace_snapshot_on_exit)
1710 record__auxtrace_snapshot_exit(rec);
1711
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001712 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +00001713 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001714 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001715 pr_err("Workload failed: %s\n", emsg);
1716 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001717 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001718 }
1719
Namhyung Kime3d59112015-01-29 17:06:44 +09001720 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09001721 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001722
Wang Nan4ea648a2016-07-14 08:34:47 +00001723 if (target__none(&rec->opts.target))
1724 record__synthesize_workload(rec, true);
1725
Namhyung Kim45604712014-05-12 09:47:24 +09001726out_child:
Alexey Budankov470530b2019-03-18 20:40:26 +03001727 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001728 record__aio_mmap_read_sync(rec);
1729
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001730 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1731 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1732 session->header.env.comp_ratio = ratio + 0.5;
1733 }
1734
Namhyung Kim45604712014-05-12 09:47:24 +09001735 if (forks) {
1736 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02001737
Namhyung Kim45604712014-05-12 09:47:24 +09001738 if (!child_finished)
1739 kill(rec->evlist->workload.pid, SIGTERM);
1740
1741 wait(&exit_status);
1742
1743 if (err < 0)
1744 status = err;
1745 else if (WIFEXITED(exit_status))
1746 status = WEXITSTATUS(exit_status);
1747 else if (WIFSIGNALED(exit_status))
1748 signr = WTERMSIG(exit_status);
1749 } else
1750 status = err;
1751
Wang Nan4ea648a2016-07-14 08:34:47 +00001752 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09001753 /* this will be recalculated during process_buildids() */
1754 rec->samples = 0;
1755
Wang Nanecfd7a92016-04-13 08:21:07 +00001756 if (!err) {
1757 if (!rec->timestamp_filename) {
1758 record__finish_output(rec);
1759 } else {
1760 fd = record__switch_output(rec, true);
1761 if (fd < 0) {
1762 status = fd;
1763 goto out_delete_session;
1764 }
1765 }
1766 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001767
Wang Nana0748652016-11-26 07:03:28 +00001768 perf_hooks__invoke_record_end();
1769
Namhyung Kime3d59112015-01-29 17:06:44 +09001770 if (!err && !quiet) {
1771 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00001772 const char *postfix = rec->timestamp_filename ?
1773 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09001774
Adrian Hunteref149c22015-04-09 18:53:45 +03001775 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09001776 scnprintf(samples, sizeof(samples),
1777 " (%" PRIu64 " samples)", rec->samples);
1778 else
1779 samples[0] = '\0';
1780
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001781 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001782 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001783 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001784 if (ratio) {
1785 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
1786 rec->session->bytes_transferred / 1024.0 / 1024.0,
1787 ratio);
1788 }
1789 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09001790 }
1791
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001792out_delete_session:
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001793 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001794 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07001795
1796 if (!opts->no_bpf_event)
1797 perf_evlist__stop_sb_thread(sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09001798 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001799}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001800
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001801static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001802{
Kan Liangaad2b212015-01-05 13:23:04 -05001803 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01001804
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001805 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001806
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001807 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001808 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001809 callchain->dump_size);
1810}
1811
1812int record_opts__parse_callchain(struct record_opts *record,
1813 struct callchain_param *callchain,
1814 const char *arg, bool unset)
1815{
1816 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001817 callchain->enabled = !unset;
1818
1819 /* --no-call-graph */
1820 if (unset) {
1821 callchain->record_mode = CALLCHAIN_NONE;
1822 pr_debug("callchain: disabled\n");
1823 return 0;
1824 }
1825
1826 ret = parse_callchain_record_opt(arg, callchain);
1827 if (!ret) {
1828 /* Enable data address sampling for DWARF unwind. */
1829 if (callchain->record_mode == CALLCHAIN_DWARF)
1830 record->sample_address = true;
1831 callchain_debug(callchain);
1832 }
1833
1834 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001835}
1836
Kan Liangc421e802015-07-29 05:42:12 -04001837int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001838 const char *arg,
1839 int unset)
1840{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03001841 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02001842}
1843
Kan Liangc421e802015-07-29 05:42:12 -04001844int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001845 const char *arg __maybe_unused,
1846 int unset __maybe_unused)
1847{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001848 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04001849
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001850 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001851
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001852 if (callchain->record_mode == CALLCHAIN_NONE)
1853 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001854
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001855 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001856 return 0;
1857}
1858
Jiri Olsaeb853e82014-02-03 12:44:42 +01001859static int perf_record_config(const char *var, const char *value, void *cb)
1860{
Namhyung Kim7a29c082015-12-15 10:49:56 +09001861 struct record *rec = cb;
1862
1863 if (!strcmp(var, "record.build-id")) {
1864 if (!strcmp(value, "cache"))
1865 rec->no_buildid_cache = false;
1866 else if (!strcmp(value, "no-cache"))
1867 rec->no_buildid_cache = true;
1868 else if (!strcmp(value, "skip"))
1869 rec->no_buildid = true;
1870 else
1871 return -1;
1872 return 0;
1873 }
Yisheng Xiecff17202018-03-12 19:25:57 +08001874 if (!strcmp(var, "record.call-graph")) {
1875 var = "call-graph.record-mode";
1876 return perf_default_config(var, value, cb);
1877 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03001878#ifdef HAVE_AIO_SUPPORT
1879 if (!strcmp(var, "record.aio")) {
1880 rec->opts.nr_cblocks = strtol(value, NULL, 0);
1881 if (!rec->opts.nr_cblocks)
1882 rec->opts.nr_cblocks = nr_cblocks_default;
1883 }
1884#endif
Jiri Olsaeb853e82014-02-03 12:44:42 +01001885
Yisheng Xiecff17202018-03-12 19:25:57 +08001886 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001887}
1888
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001889struct clockid_map {
1890 const char *name;
1891 int clockid;
1892};
1893
1894#define CLOCKID_MAP(n, c) \
1895 { .name = n, .clockid = (c), }
1896
1897#define CLOCKID_END { .name = NULL, }
1898
1899
1900/*
1901 * Add the missing ones, we need to build on many distros...
1902 */
1903#ifndef CLOCK_MONOTONIC_RAW
1904#define CLOCK_MONOTONIC_RAW 4
1905#endif
1906#ifndef CLOCK_BOOTTIME
1907#define CLOCK_BOOTTIME 7
1908#endif
1909#ifndef CLOCK_TAI
1910#define CLOCK_TAI 11
1911#endif
1912
1913static const struct clockid_map clockids[] = {
1914 /* available for all events, NMI safe */
1915 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1916 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1917
1918 /* available for some events */
1919 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1920 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1921 CLOCKID_MAP("tai", CLOCK_TAI),
1922
1923 /* available for the lazy */
1924 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1925 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1926 CLOCKID_MAP("real", CLOCK_REALTIME),
1927 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1928
1929 CLOCKID_END,
1930};
1931
Alexey Budankovcf790512018-10-09 17:36:24 +03001932static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1933{
1934 struct timespec res;
1935
1936 *res_ns = 0;
1937 if (!clock_getres(clk_id, &res))
1938 *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1939 else
1940 pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1941
1942 return 0;
1943}
1944
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001945static int parse_clockid(const struct option *opt, const char *str, int unset)
1946{
1947 struct record_opts *opts = (struct record_opts *)opt->value;
1948 const struct clockid_map *cm;
1949 const char *ostr = str;
1950
1951 if (unset) {
1952 opts->use_clockid = 0;
1953 return 0;
1954 }
1955
1956 /* no arg passed */
1957 if (!str)
1958 return 0;
1959
1960 /* no setting it twice */
1961 if (opts->use_clockid)
1962 return -1;
1963
1964 opts->use_clockid = true;
1965
1966 /* if its a number, we're done */
1967 if (sscanf(str, "%d", &opts->clockid) == 1)
Alexey Budankovcf790512018-10-09 17:36:24 +03001968 return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001969
1970 /* allow a "CLOCK_" prefix to the name */
1971 if (!strncasecmp(str, "CLOCK_", 6))
1972 str += 6;
1973
1974 for (cm = clockids; cm->name; cm++) {
1975 if (!strcasecmp(str, cm->name)) {
1976 opts->clockid = cm->clockid;
Alexey Budankovcf790512018-10-09 17:36:24 +03001977 return get_clockid_res(opts->clockid,
1978 &opts->clockid_res_ns);
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001979 }
1980 }
1981
1982 opts->use_clockid = false;
1983 ui__warning("unknown clockid %s, check man page\n", ostr);
1984 return -1;
1985}
1986
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03001987static int record__parse_affinity(const struct option *opt, const char *str, int unset)
1988{
1989 struct record_opts *opts = (struct record_opts *)opt->value;
1990
1991 if (unset || !str)
1992 return 0;
1993
1994 if (!strcasecmp(str, "node"))
1995 opts->affinity = PERF_AFFINITY_NODE;
1996 else if (!strcasecmp(str, "cpu"))
1997 opts->affinity = PERF_AFFINITY_CPU;
1998
1999 return 0;
2000}
2001
Jiwei Sun6d575812019-10-22 16:09:01 +08002002static int parse_output_max_size(const struct option *opt,
2003 const char *str, int unset)
2004{
2005 unsigned long *s = (unsigned long *)opt->value;
2006 static struct parse_tag tags_size[] = {
2007 { .tag = 'B', .mult = 1 },
2008 { .tag = 'K', .mult = 1 << 10 },
2009 { .tag = 'M', .mult = 1 << 20 },
2010 { .tag = 'G', .mult = 1 << 30 },
2011 { .tag = 0 },
2012 };
2013 unsigned long val;
2014
2015 if (unset) {
2016 *s = 0;
2017 return 0;
2018 }
2019
2020 val = parse_tag_value(str, tags_size);
2021 if (val != (unsigned long) -1) {
2022 *s = val;
2023 return 0;
2024 }
2025
2026 return -1;
2027}
2028
Adrian Huntere9db1312015-04-09 18:53:46 +03002029static int record__parse_mmap_pages(const struct option *opt,
2030 const char *str,
2031 int unset __maybe_unused)
2032{
2033 struct record_opts *opts = opt->value;
2034 char *s, *p;
2035 unsigned int mmap_pages;
2036 int ret;
2037
2038 if (!str)
2039 return -EINVAL;
2040
2041 s = strdup(str);
2042 if (!s)
2043 return -ENOMEM;
2044
2045 p = strchr(s, ',');
2046 if (p)
2047 *p = '\0';
2048
2049 if (*s) {
2050 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
2051 if (ret)
2052 goto out_free;
2053 opts->mmap_pages = mmap_pages;
2054 }
2055
2056 if (!p) {
2057 ret = 0;
2058 goto out_free;
2059 }
2060
2061 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
2062 if (ret)
2063 goto out_free;
2064
2065 opts->auxtrace_mmap_pages = mmap_pages;
2066
2067out_free:
2068 free(s);
2069 return ret;
2070}
2071
Jiri Olsa0c582442017-01-09 10:51:59 +01002072static void switch_output_size_warn(struct record *rec)
2073{
Jiri Olsa9521b5f2019-07-28 12:45:35 +02002074 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
Jiri Olsa0c582442017-01-09 10:51:59 +01002075 struct switch_output *s = &rec->switch_output;
2076
2077 wakeup_size /= 2;
2078
2079 if (s->size < wakeup_size) {
2080 char buf[100];
2081
2082 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
2083 pr_warning("WARNING: switch-output data size lower than "
2084 "wakeup kernel buffer size (%s) "
2085 "expect bigger perf.data sizes\n", buf);
2086 }
2087}
2088
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002089static int switch_output_setup(struct record *rec)
2090{
2091 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002092 static struct parse_tag tags_size[] = {
2093 { .tag = 'B', .mult = 1 },
2094 { .tag = 'K', .mult = 1 << 10 },
2095 { .tag = 'M', .mult = 1 << 20 },
2096 { .tag = 'G', .mult = 1 << 30 },
2097 { .tag = 0 },
2098 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01002099 static struct parse_tag tags_time[] = {
2100 { .tag = 's', .mult = 1 },
2101 { .tag = 'm', .mult = 60 },
2102 { .tag = 'h', .mult = 60*60 },
2103 { .tag = 'd', .mult = 60*60*24 },
2104 { .tag = 0 },
2105 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01002106 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002107
2108 if (!s->set)
2109 return 0;
2110
2111 if (!strcmp(s->str, "signal")) {
2112 s->signal = true;
2113 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01002114 goto enabled;
2115 }
2116
2117 val = parse_tag_value(s->str, tags_size);
2118 if (val != (unsigned long) -1) {
2119 s->size = val;
2120 pr_debug("switch-output with %s size threshold\n", s->str);
2121 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002122 }
2123
Jiri Olsabfacbe32017-01-09 10:52:00 +01002124 val = parse_tag_value(s->str, tags_time);
2125 if (val != (unsigned long) -1) {
2126 s->time = val;
2127 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
2128 s->str, s->time);
2129 goto enabled;
2130 }
2131
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002132 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002133
2134enabled:
2135 rec->timestamp_filename = true;
2136 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01002137
2138 if (s->size && !rec->opts.no_buffering)
2139 switch_output_size_warn(rec);
2140
Jiri Olsadc0c6122017-01-09 10:51:58 +01002141 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002142}
2143
Namhyung Kime5b2c202014-10-23 00:15:46 +09002144static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02002145 "perf record [<options>] [<command>]",
2146 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002147 NULL
2148};
Namhyung Kime5b2c202014-10-23 00:15:46 +09002149const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002150
Arnaldo Carvalho de Melo6e0a9b32019-11-14 12:15:34 -03002151static int build_id__process_mmap(struct perf_tool *tool, union perf_event *event,
2152 struct perf_sample *sample, struct machine *machine)
2153{
2154 /*
2155 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
2156 * no need to add them twice.
2157 */
2158 if (!(event->header.misc & PERF_RECORD_MISC_USER))
2159 return 0;
2160 return perf_event__process_mmap(tool, event, sample, machine);
2161}
2162
2163static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *event,
2164 struct perf_sample *sample, struct machine *machine)
2165{
2166 /*
2167 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
2168 * no need to add them twice.
2169 */
2170 if (!(event->header.misc & PERF_RECORD_MISC_USER))
2171 return 0;
2172
2173 return perf_event__process_mmap2(tool, event, sample, machine);
2174}
2175
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002176/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002177 * XXX Ideally would be local to cmd_record() and passed to a record__new
2178 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002179 * after cmd_record() exits, but since record_options need to be accessible to
2180 * builtin-script, leave it here.
2181 *
2182 * At least we don't ouch it in all the other functions here directly.
2183 *
2184 * Just say no to tons of global variables, sigh.
2185 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002186static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002187 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08002188 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002189 .mmap_pages = UINT_MAX,
2190 .user_freq = UINT_MAX,
2191 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03002192 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002193 .target = {
2194 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02002195 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002196 },
Alexey Budankov470530b2019-03-18 20:40:26 +03002197 .mmap_flush = MMAP_FLUSH_DEFAULT,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002198 },
Namhyung Kime3d59112015-01-29 17:06:44 +09002199 .tool = {
2200 .sample = process_sample_event,
2201 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03002202 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09002203 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05302204 .namespaces = perf_event__process_namespaces,
Arnaldo Carvalho de Melo6e0a9b32019-11-14 12:15:34 -03002205 .mmap = build_id__process_mmap,
2206 .mmap2 = build_id__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03002207 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09002208 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002209};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02002210
Namhyung Kim76a26542015-10-22 23:28:32 +09002211const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
2212 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03002213
Wang Nan0aab2132016-06-16 08:02:41 +00002214static bool dry_run;
2215
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002216/*
2217 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
2218 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002219 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002220 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
2221 * using pipes, etc.
2222 */
Jiri Olsaefd21302017-01-03 09:19:55 +01002223static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002224 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02002225 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02002226 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002227 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08002228 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00002229 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2230 NULL, "don't record events from perf itself",
2231 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09002232 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002233 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002234 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002235 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002236 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002237 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03002238 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03002239 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002240 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02002241 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002242 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002243 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002244 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002245 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002246 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002247 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02002248 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002249 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2250 &record.opts.no_inherit_set,
2251 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00002252 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2253 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00002254 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Song Liu71184c62019-03-11 22:30:37 -07002255 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03002256 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2257 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002258 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2259 "profile at this frequency",
2260 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03002261 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2262 "number of mmap data pages and AUX area tracing mmap pages",
2263 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03002264 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2265 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2266 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002267 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08002268 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002269 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002270 NULL, "enables call-graph recording" ,
2271 &record_callchain_opt),
2272 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09002273 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002274 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10002275 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02002276 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002277 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002278 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002279 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02002280 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04002281 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2282 "Record the sample physical addresses"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02002283 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03002284 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2285 &record.opts.sample_time_set,
2286 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01002287 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2288 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002289 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002290 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002291 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2292 &record.no_buildid_cache_set,
2293 "do not update the buildid cache"),
2294 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2295 &record.no_buildid_set,
2296 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002297 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002298 "monitor event in cgroup name only",
2299 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03002300 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08002301 "ms to wait before starting measurement after program start"),
Adrian Huntereeb399b2019-10-04 11:31:21 +03002302 OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002303 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2304 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002305
2306 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2307 "branch any", "sample any taken branches",
2308 parse_branch_stack),
2309
2310 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2311 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002312 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002313 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2314 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002315 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2316 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002317 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2318 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002319 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2320 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002321 " use '-I?' to list register names", parse_intr_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002322 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2323 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002324 " use '--user-regs=?' to list register names", parse_user_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002325 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2326 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002327 OPT_CALLBACK('k', "clockid", &record.opts,
2328 "clockid", "clockid to use for events, see clock_gettime()",
2329 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002330 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2331 "opts", "AUX area tracing Snapshot Mode", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002332 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002333 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302334 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2335 "Record namespaces events"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03002336 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2337 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002338 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2339 "Configure all used events to run in kernel space.",
2340 PARSE_OPT_EXCLUSIVE),
2341 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2342 "Configure all used events to run in user space.",
2343 PARSE_OPT_EXCLUSIVE),
yuzhoujian53651b22019-05-30 14:29:22 +01002344 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
2345 "collect kernel callchains"),
2346 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
2347 "collect user callchains"),
Wang Nan71dc23262015-10-14 12:41:19 +00002348 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2349 "clang binary to use for compiling BPF scriptlets"),
2350 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2351 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002352 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2353 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002354 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2355 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002356 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2357 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002358 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2359 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002360 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002361 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2362 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002363 "signal"),
Andi Kleen03724b22019-03-14 15:49:55 -07002364 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2365 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002366 OPT_BOOLEAN(0, "dry-run", &dry_run,
2367 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002368#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002369 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2370 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002371 record__aio_parse),
2372#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002373 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2374 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2375 record__parse_affinity),
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002376#ifdef HAVE_ZSTD_SUPPORT
2377 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
2378 "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
2379 record__parse_comp_level),
2380#endif
Jiwei Sun6d575812019-10-22 16:09:01 +08002381 OPT_CALLBACK(0, "max-size", &record.output_max_size,
2382 "size", "Limit the maximum size of the output file", parse_output_max_size),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002383 OPT_END()
2384};
2385
Namhyung Kime5b2c202014-10-23 00:15:46 +09002386struct option *record_options = __record_options;
2387
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002388int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002389{
Adrian Hunteref149c22015-04-09 18:53:45 +03002390 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002391 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002392 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002393
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002394 setlocale(LC_ALL, "");
2395
Wang Nan48e1cab2015-12-14 10:39:22 +00002396#ifndef HAVE_LIBBPF_SUPPORT
2397# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2398 set_nobuild('\0', "clang-path", true);
2399 set_nobuild('\0', "clang-opt", true);
2400# undef set_nobuild
2401#endif
2402
He Kuang7efe0e02015-12-14 10:39:23 +00002403#ifndef HAVE_BPF_PROLOGUE
2404# if !defined (HAVE_DWARF_SUPPORT)
2405# define REASON "NO_DWARF=1"
2406# elif !defined (HAVE_LIBBPF_SUPPORT)
2407# define REASON "NO_LIBBPF=1"
2408# else
2409# define REASON "this architecture doesn't support BPF prologue"
2410# endif
2411# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2412 set_nobuild('\0', "vmlinux", true);
2413# undef set_nobuild
2414# undef REASON
2415#endif
2416
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002417 CPU_ZERO(&rec->affinity_mask);
2418 rec->opts.affinity = PERF_AFFINITY_SYS;
2419
Jiri Olsa0f98b112019-07-21 13:23:55 +02002420 rec->evlist = evlist__new();
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002421 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002422 return -ENOMEM;
2423
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002424 err = perf_config(perf_record_config, rec);
2425 if (err)
2426 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002427
Tom Zanussibca647a2010-11-10 08:11:30 -06002428 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002429 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002430 if (quiet)
2431 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002432
2433 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002434 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002435 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002436
Namhyung Kimbea03402012-04-26 14:15:15 +09002437 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002438 usage_with_options_msg(record_usage, record_options,
2439 "cgroup monitoring only available in system-wide mode");
2440
Stephane Eranian023695d2011-02-14 11:20:01 +02002441 }
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002442
Adrian Huntereeb399b2019-10-04 11:31:21 +03002443 if (rec->opts.kcore)
2444 rec->data.is_dir = true;
2445
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002446 if (rec->opts.comp_level != 0) {
2447 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
2448 rec->no_buildid = true;
2449 }
2450
Adrian Hunterb757bb02015-07-21 12:44:04 +03002451 if (rec->opts.record_switch_events &&
2452 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002453 ui__error("kernel does not support recording context switch events\n");
2454 parse_options_usage(record_usage, record_options, "switch-events", 0);
2455 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002456 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002457
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002458 if (switch_output_setup(rec)) {
2459 parse_options_usage(record_usage, record_options, "switch-output", 0);
2460 return -EINVAL;
2461 }
2462
Jiri Olsabfacbe32017-01-09 10:52:00 +01002463 if (rec->switch_output.time) {
2464 signal(SIGALRM, alarm_sig_handler);
2465 alarm(rec->switch_output.time);
2466 }
2467
Andi Kleen03724b22019-03-14 15:49:55 -07002468 if (rec->switch_output.num_files) {
2469 rec->switch_output.filenames = calloc(sizeof(char *),
2470 rec->switch_output.num_files);
2471 if (!rec->switch_output.filenames)
2472 return -EINVAL;
2473 }
2474
Adrian Hunter1b36c032016-09-23 17:38:39 +03002475 /*
2476 * Allow aliases to facilitate the lookup of symbols for address
2477 * filters. Refer to auxtrace_parse_filters().
2478 */
2479 symbol_conf.allow_aliases = true;
2480
2481 symbol__init(NULL);
2482
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002483 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002484 if (err)
2485 goto out;
2486
Wang Nan0aab2132016-06-16 08:02:41 +00002487 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002488 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002489
Wang Nand7888572016-04-08 15:07:24 +00002490 err = bpf__setup_stdout(rec->evlist);
2491 if (err) {
2492 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2493 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2494 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002495 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002496 }
2497
Adrian Hunteref149c22015-04-09 18:53:45 +03002498 err = -ENOMEM;
2499
Wang Nan0c1d46a2016-04-20 18:59:52 +00002500 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002501 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002502 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002503 /*
2504 * In 'perf record --switch-output', disable buildid
2505 * generation by default to reduce data file switching
2506 * overhead. Still generate buildid if they are required
2507 * explicitly using
2508 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002509 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002510 * --no-no-buildid-cache
2511 *
2512 * Following code equals to:
2513 *
2514 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2515 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2516 * disable_buildid_cache();
2517 */
2518 bool disable = true;
2519
2520 if (rec->no_buildid_set && !rec->no_buildid)
2521 disable = false;
2522 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2523 disable = false;
2524 if (disable) {
2525 rec->no_buildid = true;
2526 rec->no_buildid_cache = true;
2527 disable_buildid_cache();
2528 }
2529 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002530
Wang Nan4ea648a2016-07-14 08:34:47 +00002531 if (record.opts.overwrite)
2532 record.opts.tail_synthesize = true;
2533
Jiri Olsa6484d2f2019-07-21 13:24:28 +02002534 if (rec->evlist->core.nr_entries == 0 &&
Arnaldo Carvalho de Melo4b4cd502017-07-03 13:26:32 -03002535 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002536 pr_err("Not enough memory for event selector list\n");
Adrian Hunter394c01e2016-09-23 17:38:36 +03002537 goto out;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002538 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002539
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002540 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2541 rec->opts.no_inherit = true;
2542
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002543 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002544 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002545 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002546 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002547 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002548
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002549 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002550 if (err) {
2551 int saved_errno = errno;
2552
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002553 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002554 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002555
2556 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002557 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002558 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002559
Mengting Zhangca800062017-12-13 15:01:53 +08002560 /* Enable ignoring missing threads when -u/-p option is defined. */
2561 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002562
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002563 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002564 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002565 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002566
Adrian Hunteref149c22015-04-09 18:53:45 +03002567 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2568 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002569 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002570
Namhyung Kim61566812016-01-11 22:37:09 +09002571 /*
2572 * We take all buildids when the file contains
2573 * AUX area tracing data because we do not decode the
2574 * trace because it would take too long.
2575 */
2576 if (rec->opts.full_auxtrace)
2577 rec->buildid_all = true;
2578
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002579 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002580 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002581 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002582 }
2583
Alexey Budankov93f20c02018-11-06 12:07:19 +03002584 if (rec->opts.nr_cblocks > nr_cblocks_max)
2585 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002586 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002587
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002588 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002589 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002590
Alexey Budankov51255a82019-03-18 20:42:19 +03002591 if (rec->opts.comp_level > comp_level_max)
2592 rec->opts.comp_level = comp_level_max;
2593 pr_debug("comp level: %d\n", rec->opts.comp_level);
2594
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002595 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002596out:
Jiri Olsac12995a2019-07-21 13:23:56 +02002597 evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002598 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002599 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002600 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002601}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002602
2603static void snapshot_sig_handler(int sig __maybe_unused)
2604{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002605 struct record *rec = &record;
2606
Wang Nan5f9cf592016-04-20 18:59:49 +00002607 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2608 trigger_hit(&auxtrace_snapshot_trigger);
2609 auxtrace_record__snapshot_started = 1;
2610 if (auxtrace_record__snapshot_start(record.itr))
2611 trigger_error(&auxtrace_snapshot_trigger);
2612 }
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002613
Jiri Olsadc0c6122017-01-09 10:51:58 +01002614 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002615 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002616}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002617
2618static void alarm_sig_handler(int sig __maybe_unused)
2619{
2620 struct record *rec = &record;
2621
2622 if (switch_output_time(rec))
2623 trigger_hit(&switch_output_trigger);
2624}