blob: bb716c953d02fda883489d6703d40a3e4707bf15 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarabaff322009-06-02 22:59:57 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02008 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02009#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020011#include "util/build-id.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060012#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020013#include "util/parse-events.h"
Taeung Song41840d22016-06-23 17:55:17 +090014#include "util/config.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020015
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030016#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030017#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020018#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020019#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020020#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020021#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020022#include "util/debug.h"
Arnaldo Carvalho de Meloe0fcfb02019-09-23 12:20:38 -030023#include "util/mmap.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030024#include "util/target.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020025#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020026#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020027#include "util/symbol.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030028#include "util/record.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110029#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020030#include "util/thread_map.h"
Jiri Olsaf5fc14122013-10-15 16:27:32 +020031#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020032#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030033#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020034#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070035#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020036#include "util/parse-regs-options.h"
Arnaldo Carvalho de Melo40c7d242020-05-05 11:49:08 -030037#include "util/perf_api_probe.h"
Wang Nan71dc23262015-10-14 12:41:19 +000038#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000039#include "util/bpf-loader.h"
Wang Nan5f9cf592016-04-20 18:59:49 +000040#include "util/trigger.h"
Wang Nana0748652016-11-26 07:03:28 +000041#include "util/perf-hooks.h"
Alexey Budankovf13de662019-01-22 20:50:57 +030042#include "util/cpu-set-sched.h"
Arnaldo Carvalho de Meloea49e012019-09-18 11:36:13 -030043#include "util/synthetic-events.h"
Arnaldo Carvalho de Meloc5e40272017-04-19 16:12:39 -030044#include "util/time-utils.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030045#include "util/units.h"
Song Liu7b612e22019-01-17 08:15:19 -080046#include "util/bpf-event.h"
Stephane Eraniand99c22e2020-04-22 08:50:38 -070047#include "util/util.h"
Stephane Eranian70943492020-05-05 11:29:43 -070048#include "util/pfm.h"
Jiri Olsa6953beb2020-08-05 11:34:38 +020049#include "util/clockid.h"
Jin Yaob53a0752021-04-27 15:01:26 +080050#include "util/pmu-hybrid.h"
51#include "util/evlist-hybrid.h"
Wang Nand8871ea2016-02-26 09:32:06 +000052#include "asm/bug.h"
Arnaldo Carvalho de Meloc1a604d2019-08-29 15:20:59 -030053#include "perf.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020054
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030055#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030056#include <inttypes.h>
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -030057#include <locale.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030058#include <poll.h>
Stephane Eraniand99c22e2020-04-22 08:50:38 -070059#include <pthread.h>
Peter Zijlstra97124d5e2009-06-02 15:52:24 +020060#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020061#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030062#include <signal.h>
Anand K Mistryda231332020-05-13 12:20:23 +100063#ifdef HAVE_EVENTFD_SUPPORT
64#include <sys/eventfd.h>
65#endif
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030066#include <sys/mman.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030067#include <sys/wait.h>
Adrian Huntereeb399b2019-10-04 11:31:21 +030068#include <sys/types.h>
69#include <sys/stat.h>
70#include <fcntl.h>
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +053071#include <linux/err.h>
Arnaldo Carvalho de Melo8520a982019-08-29 16:18:59 -030072#include <linux/string.h>
Arnaldo Carvalho de Melo0693e682016-08-08 15:05:46 -030073#include <linux/time64.h>
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -030074#include <linux/zalloc.h>
Alexey Budankov8384a262019-12-03 14:45:27 +030075#include <linux/bitmap.h>
Jiri Olsad1e325c2020-08-05 11:34:40 +020076#include <sys/time.h>
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030077
Jiri Olsa1b43b702017-01-09 10:51:56 +010078struct switch_output {
Jiri Olsadc0c6122017-01-09 10:51:58 +010079 bool enabled;
Jiri Olsa1b43b702017-01-09 10:51:56 +010080 bool signal;
Jiri Olsadc0c6122017-01-09 10:51:58 +010081 unsigned long size;
Jiri Olsabfacbe32017-01-09 10:52:00 +010082 unsigned long time;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +010083 const char *str;
84 bool set;
Andi Kleen03724b22019-03-14 15:49:55 -070085 char **filenames;
86 int num_files;
87 int cur_file;
Jiri Olsa1b43b702017-01-09 10:51:56 +010088};
89
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030090struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020091 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030092 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020093 u64 bytes_written;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +010094 struct perf_data data;
Adrian Hunteref149c22015-04-09 18:53:45 +030095 struct auxtrace_record *itr;
Jiri Olsa63503db2019-07-21 13:23:52 +020096 struct evlist *evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020097 struct perf_session *session;
Arnaldo Carvalho de Melobc477d72020-04-24 10:24:04 -030098 struct evlist *sb_evlist;
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -030099 pthread_t thread_id;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200100 int realtime_prio;
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -0300101 bool switch_output_event_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200102 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +0000103 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200104 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +0000105 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +0900106 bool buildid_all;
Jiri Olsae29386c2020-12-14 11:54:57 +0100107 bool buildid_mmap;
Wang Nanecfd7a92016-04-13 08:21:07 +0000108 bool timestamp_filename;
Jin Yao68588ba2017-12-08 21:13:42 +0800109 bool timestamp_boundary;
Jiri Olsa1b43b702017-01-09 10:51:56 +0100110 struct switch_output switch_output;
Yang Shi9f065192015-09-29 14:49:43 -0700111 unsigned long long samples;
Alexey Budankov8384a262019-12-03 14:45:27 +0300112 struct mmap_cpu_mask affinity_mask;
Jiwei Sun6d575812019-10-22 16:09:01 +0800113 unsigned long output_max_size; /* = 0: unlimited */
Jiri Olsa9bce13e2021-12-09 21:04:25 +0100114 struct perf_debuginfod debuginfod;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200115};
Ingo Molnara21ca2c2009-06-06 09:58:57 +0200116
Jiwei Sun6d575812019-10-22 16:09:01 +0800117static volatile int done;
118
Jiri Olsadc0c6122017-01-09 10:51:58 +0100119static volatile int auxtrace_record__snapshot_started;
120static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
121static DEFINE_TRIGGER(switch_output_trigger);
122
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300123static const char *affinity_tags[PERF_AFFINITY_MAX] = {
124 "SYS", "NODE", "CPU"
125};
126
Jiri Olsadc0c6122017-01-09 10:51:58 +0100127static bool switch_output_signal(struct record *rec)
128{
129 return rec->switch_output.signal &&
130 trigger_is_ready(&switch_output_trigger);
131}
132
133static bool switch_output_size(struct record *rec)
134{
135 return rec->switch_output.size &&
136 trigger_is_ready(&switch_output_trigger) &&
137 (rec->bytes_written >= rec->switch_output.size);
138}
139
Jiri Olsabfacbe32017-01-09 10:52:00 +0100140static bool switch_output_time(struct record *rec)
141{
142 return rec->switch_output.time &&
143 trigger_is_ready(&switch_output_trigger);
144}
145
Jiwei Sun6d575812019-10-22 16:09:01 +0800146static bool record__output_max_size_exceeded(struct record *rec)
147{
148 return rec->output_max_size &&
149 (rec->bytes_written >= rec->output_max_size);
150}
151
Jiri Olsaa5830532019-07-27 20:30:53 +0200152static int record__write(struct record *rec, struct mmap *map __maybe_unused,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200153 void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +0200154{
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200155 struct perf_data_file *file = &rec->session->data->file;
156
157 if (perf_data_file__write(file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +0100158 pr_err("failed to write perf data, error: %m\n");
159 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200160 }
David Ahern8d3eca22012-08-26 12:24:47 -0600161
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -0300162 rec->bytes_written += size;
Jiri Olsadc0c6122017-01-09 10:51:58 +0100163
Jiwei Sun6d575812019-10-22 16:09:01 +0800164 if (record__output_max_size_exceeded(rec) && !done) {
165 fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
166 " stopping session ]\n",
167 rec->bytes_written >> 10);
168 done = 1;
169 }
170
Jiri Olsadc0c6122017-01-09 10:51:58 +0100171 if (switch_output_size(rec))
172 trigger_hit(&switch_output_trigger);
173
David Ahern8d3eca22012-08-26 12:24:47 -0600174 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +0200175}
176
Alexey Budankovef781122019-03-18 20:44:12 +0300177static int record__aio_enabled(struct record *rec);
178static int record__comp_enabled(struct record *rec);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300179static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
180 void *src, size_t src_size);
181
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300182#ifdef HAVE_AIO_SUPPORT
183static int record__aio_write(struct aiocb *cblock, int trace_fd,
184 void *buf, size_t size, off_t off)
185{
186 int rc;
187
188 cblock->aio_fildes = trace_fd;
189 cblock->aio_buf = buf;
190 cblock->aio_nbytes = size;
191 cblock->aio_offset = off;
192 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
193
194 do {
195 rc = aio_write(cblock);
196 if (rc == 0) {
197 break;
198 } else if (errno != EAGAIN) {
199 cblock->aio_fildes = -1;
200 pr_err("failed to queue perf data, error: %m\n");
201 break;
202 }
203 } while (1);
204
205 return rc;
206}
207
Jiri Olsaa5830532019-07-27 20:30:53 +0200208static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300209{
210 void *rem_buf;
211 off_t rem_off;
212 size_t rem_size;
213 int rc, aio_errno;
214 ssize_t aio_ret, written;
215
216 aio_errno = aio_error(cblock);
217 if (aio_errno == EINPROGRESS)
218 return 0;
219
220 written = aio_ret = aio_return(cblock);
221 if (aio_ret < 0) {
222 if (aio_errno != EINTR)
223 pr_err("failed to write perf data, error: %m\n");
224 written = 0;
225 }
226
227 rem_size = cblock->aio_nbytes - written;
228
229 if (rem_size == 0) {
230 cblock->aio_fildes = -1;
231 /*
Alexey Budankovef781122019-03-18 20:44:12 +0300232 * md->refcount is incremented in record__aio_pushfn() for
233 * every aio write request started in record__aio_push() so
234 * decrement it because the request is now complete.
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300235 */
Jiri Olsa80e53d12019-10-07 14:53:15 +0200236 perf_mmap__put(&md->core);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300237 rc = 1;
238 } else {
239 /*
240 * aio write request may require restart with the
241 * reminder if the kernel didn't write whole
242 * chunk at once.
243 */
244 rem_off = cblock->aio_offset + written;
245 rem_buf = (void *)(cblock->aio_buf + written);
246 record__aio_write(cblock, cblock->aio_fildes,
247 rem_buf, rem_size, rem_off);
248 rc = 0;
249 }
250
251 return rc;
252}
253
Jiri Olsaa5830532019-07-27 20:30:53 +0200254static int record__aio_sync(struct mmap *md, bool sync_all)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300255{
Alexey Budankov93f20c02018-11-06 12:07:19 +0300256 struct aiocb **aiocb = md->aio.aiocb;
257 struct aiocb *cblocks = md->aio.cblocks;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300258 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300259 int i, do_suspend;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300260
261 do {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300262 do_suspend = 0;
263 for (i = 0; i < md->aio.nr_cblocks; ++i) {
264 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
265 if (sync_all)
266 aiocb[i] = NULL;
267 else
268 return i;
269 } else {
270 /*
271 * Started aio write is not complete yet
272 * so it has to be waited before the
273 * next allocation.
274 */
275 aiocb[i] = &cblocks[i];
276 do_suspend = 1;
277 }
278 }
279 if (!do_suspend)
280 return -1;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300281
Alexey Budankov93f20c02018-11-06 12:07:19 +0300282 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300283 if (!(errno == EAGAIN || errno == EINTR))
284 pr_err("failed to sync perf data, error: %m\n");
285 }
286 } while (1);
287}
288
Alexey Budankovef781122019-03-18 20:44:12 +0300289struct record_aio {
290 struct record *rec;
291 void *data;
292 size_t size;
293};
294
Jiri Olsaa5830532019-07-27 20:30:53 +0200295static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300296{
Alexey Budankovef781122019-03-18 20:44:12 +0300297 struct record_aio *aio = to;
298
299 /*
Jiri Olsa547740f2019-07-27 22:07:44 +0200300 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
Alexey Budankovef781122019-03-18 20:44:12 +0300301 * to release space in the kernel buffer as fast as possible, calling
302 * perf_mmap__consume() from perf_mmap__push() function.
303 *
304 * That lets the kernel to proceed with storing more profiling data into
305 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
306 *
307 * Coping can be done in two steps in case the chunk of profiling data
308 * crosses the upper bound of the kernel buffer. In this case we first move
309 * part of data from map->start till the upper bound and then the reminder
310 * from the beginning of the kernel buffer till the end of the data chunk.
311 */
312
313 if (record__comp_enabled(aio->rec)) {
314 size = zstd_compress(aio->rec->session, aio->data + aio->size,
Jiri Olsabf59b302019-10-07 14:53:11 +0200315 mmap__mmap_len(map) - aio->size,
Alexey Budankovef781122019-03-18 20:44:12 +0300316 buf, size);
317 } else {
318 memcpy(aio->data + aio->size, buf, size);
319 }
320
321 if (!aio->size) {
322 /*
323 * Increment map->refcount to guard map->aio.data[] buffer
324 * from premature deallocation because map object can be
325 * released earlier than aio write request started on
326 * map->aio.data[] buffer is complete.
327 *
328 * perf_mmap__put() is done at record__aio_complete()
329 * after started aio request completion or at record__aio_push()
330 * if the request failed to start.
331 */
Jiri Olsae75710f2019-10-07 14:53:13 +0200332 perf_mmap__get(&map->core);
Alexey Budankovef781122019-03-18 20:44:12 +0300333 }
334
335 aio->size += size;
336
337 return size;
338}
339
Jiri Olsaa5830532019-07-27 20:30:53 +0200340static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
Alexey Budankovef781122019-03-18 20:44:12 +0300341{
342 int ret, idx;
343 int trace_fd = rec->session->data->file.fd;
344 struct record_aio aio = { .rec = rec, .size = 0 };
345
346 /*
347 * Call record__aio_sync() to wait till map->aio.data[] buffer
348 * becomes available after previous aio write operation.
349 */
350
351 idx = record__aio_sync(map, false);
352 aio.data = map->aio.data[idx];
353 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
354 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
355 return ret;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300356
357 rec->samples++;
Alexey Budankovef781122019-03-18 20:44:12 +0300358 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300359 if (!ret) {
Alexey Budankovef781122019-03-18 20:44:12 +0300360 *off += aio.size;
361 rec->bytes_written += aio.size;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300362 if (switch_output_size(rec))
363 trigger_hit(&switch_output_trigger);
Alexey Budankovef781122019-03-18 20:44:12 +0300364 } else {
365 /*
366 * Decrement map->refcount incremented in record__aio_pushfn()
367 * back if record__aio_write() operation failed to start, otherwise
368 * map->refcount is decremented in record__aio_complete() after
369 * aio write operation finishes successfully.
370 */
Jiri Olsa80e53d12019-10-07 14:53:15 +0200371 perf_mmap__put(&map->core);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300372 }
373
374 return ret;
375}
376
377static off_t record__aio_get_pos(int trace_fd)
378{
379 return lseek(trace_fd, 0, SEEK_CUR);
380}
381
382static void record__aio_set_pos(int trace_fd, off_t pos)
383{
384 lseek(trace_fd, pos, SEEK_SET);
385}
386
387static void record__aio_mmap_read_sync(struct record *rec)
388{
389 int i;
Jiri Olsa63503db2019-07-21 13:23:52 +0200390 struct evlist *evlist = rec->evlist;
Jiri Olsaa5830532019-07-27 20:30:53 +0200391 struct mmap *maps = evlist->mmap;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300392
Alexey Budankovef781122019-03-18 20:44:12 +0300393 if (!record__aio_enabled(rec))
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300394 return;
395
Jiri Olsac976ee12019-07-30 13:04:59 +0200396 for (i = 0; i < evlist->core.nr_mmaps; i++) {
Jiri Olsaa5830532019-07-27 20:30:53 +0200397 struct mmap *map = &maps[i];
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300398
Jiri Olsa547740f2019-07-27 22:07:44 +0200399 if (map->core.base)
Alexey Budankov93f20c02018-11-06 12:07:19 +0300400 record__aio_sync(map, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300401 }
402}
403
404static int nr_cblocks_default = 1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300405static int nr_cblocks_max = 4;
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300406
407static int record__aio_parse(const struct option *opt,
Alexey Budankov93f20c02018-11-06 12:07:19 +0300408 const char *str,
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300409 int unset)
410{
411 struct record_opts *opts = (struct record_opts *)opt->value;
412
Alexey Budankov93f20c02018-11-06 12:07:19 +0300413 if (unset) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300414 opts->nr_cblocks = 0;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300415 } else {
416 if (str)
417 opts->nr_cblocks = strtol(str, NULL, 0);
418 if (!opts->nr_cblocks)
419 opts->nr_cblocks = nr_cblocks_default;
420 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300421
422 return 0;
423}
424#else /* HAVE_AIO_SUPPORT */
Alexey Budankov93f20c02018-11-06 12:07:19 +0300425static int nr_cblocks_max = 0;
426
Jiri Olsaa5830532019-07-27 20:30:53 +0200427static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
Alexey Budankovef781122019-03-18 20:44:12 +0300428 off_t *off __maybe_unused)
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300429{
430 return -1;
431}
432
433static off_t record__aio_get_pos(int trace_fd __maybe_unused)
434{
435 return -1;
436}
437
438static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
439{
440}
441
442static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
443{
444}
445#endif
446
447static int record__aio_enabled(struct record *rec)
448{
449 return rec->opts.nr_cblocks > 0;
450}
451
Alexey Budankov470530b2019-03-18 20:40:26 +0300452#define MMAP_FLUSH_DEFAULT 1
453static int record__mmap_flush_parse(const struct option *opt,
454 const char *str,
455 int unset)
456{
457 int flush_max;
458 struct record_opts *opts = (struct record_opts *)opt->value;
459 static struct parse_tag tags[] = {
460 { .tag = 'B', .mult = 1 },
461 { .tag = 'K', .mult = 1 << 10 },
462 { .tag = 'M', .mult = 1 << 20 },
463 { .tag = 'G', .mult = 1 << 30 },
464 { .tag = 0 },
465 };
466
467 if (unset)
468 return 0;
469
470 if (str) {
471 opts->mmap_flush = parse_tag_value(str, tags);
472 if (opts->mmap_flush == (int)-1)
473 opts->mmap_flush = strtol(str, NULL, 0);
474 }
475
476 if (!opts->mmap_flush)
477 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
478
Jiri Olsa9521b5f2019-07-28 12:45:35 +0200479 flush_max = evlist__mmap_size(opts->mmap_pages);
Alexey Budankov470530b2019-03-18 20:40:26 +0300480 flush_max /= 4;
481 if (opts->mmap_flush > flush_max)
482 opts->mmap_flush = flush_max;
483
484 return 0;
485}
486
Alexey Budankov504c1ad2019-03-18 20:44:42 +0300487#ifdef HAVE_ZSTD_SUPPORT
488static unsigned int comp_level_default = 1;
489
490static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
491{
492 struct record_opts *opts = opt->value;
493
494 if (unset) {
495 opts->comp_level = 0;
496 } else {
497 if (str)
498 opts->comp_level = strtol(str, NULL, 0);
499 if (!opts->comp_level)
500 opts->comp_level = comp_level_default;
501 }
502
503 return 0;
504}
505#endif
Alexey Budankov51255a82019-03-18 20:42:19 +0300506static unsigned int comp_level_max = 22;
507
Alexey Budankov42e1fd82019-03-18 20:41:33 +0300508static int record__comp_enabled(struct record *rec)
509{
510 return rec->opts.comp_level > 0;
511}
512
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200513static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200514 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300515 struct perf_sample *sample __maybe_unused,
516 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200517{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300518 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200519 return record__write(rec, NULL, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -0200520}
521
Stephane Eraniand99c22e2020-04-22 08:50:38 -0700522static int process_locked_synthesized_event(struct perf_tool *tool,
523 union perf_event *event,
524 struct perf_sample *sample __maybe_unused,
525 struct machine *machine __maybe_unused)
526{
527 static pthread_mutex_t synth_lock = PTHREAD_MUTEX_INITIALIZER;
528 int ret;
529
530 pthread_mutex_lock(&synth_lock);
531 ret = process_synthesized_event(tool, event, sample, machine);
532 pthread_mutex_unlock(&synth_lock);
533 return ret;
534}
535
Jiri Olsaa5830532019-07-27 20:30:53 +0200536static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300537{
538 struct record *rec = to;
539
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300540 if (record__comp_enabled(rec)) {
Jiri Olsabf59b302019-10-07 14:53:11 +0200541 size = zstd_compress(rec->session, map->data, mmap__mmap_len(map), bf, size);
Alexey Budankov5d7f4112019-03-18 20:43:35 +0300542 bf = map->data;
543 }
544
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300545 rec->samples++;
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200546 return record__write(rec, map, bf, size);
Arnaldo Carvalho de Melod37f1582017-10-05 16:39:55 -0300547}
548
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300549static volatile int signr = -1;
550static volatile int child_finished;
Anand K Mistryda231332020-05-13 12:20:23 +1000551#ifdef HAVE_EVENTFD_SUPPORT
552static int done_fd = -1;
553#endif
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000554
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300555static void sig_handler(int sig)
556{
557 if (sig == SIGCHLD)
558 child_finished = 1;
559 else
560 signr = sig;
561
562 done = 1;
Anand K Mistryda231332020-05-13 12:20:23 +1000563#ifdef HAVE_EVENTFD_SUPPORT
564{
565 u64 tmp = 1;
566 /*
567 * It is possible for this signal handler to run after done is checked
568 * in the main loop, but before the perf counter fds are polled. If this
569 * happens, the poll() will continue to wait even though done is set,
570 * and will only break out if either another signal is received, or the
571 * counters are ready for read. To ensure the poll() doesn't sleep when
572 * done is set, use an eventfd (done_fd) to wake up the poll().
573 */
574 if (write(done_fd, &tmp, sizeof(tmp)) < 0)
575 pr_err("failed to signal wakeup fd, error: %m\n");
576}
577#endif // HAVE_EVENTFD_SUPPORT
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300578}
579
Wang Nana0748652016-11-26 07:03:28 +0000580static void sigsegv_handler(int sig)
581{
582 perf_hooks__recover();
583 sighandler_dump_stack(sig);
584}
585
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300586static void record__sig_exit(void)
587{
588 if (signr == -1)
589 return;
590
591 signal(signr, SIG_DFL);
592 raise(signr);
593}
594
Adrian Huntere31f0d02015-04-30 17:37:27 +0300595#ifdef HAVE_AUXTRACE_SUPPORT
596
Adrian Hunteref149c22015-04-09 18:53:45 +0300597static int record__process_auxtrace(struct perf_tool *tool,
Jiri Olsaa5830532019-07-27 20:30:53 +0200598 struct mmap *map,
Adrian Hunteref149c22015-04-09 18:53:45 +0300599 union perf_event *event, void *data1,
600 size_t len1, void *data2, size_t len2)
601{
602 struct record *rec = container_of(tool, struct record, tool);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100603 struct perf_data *data = &rec->data;
Adrian Hunteref149c22015-04-09 18:53:45 +0300604 size_t padding;
605 u8 pad[8] = {0};
606
Adrian Hunter46e201e2019-10-04 11:31:20 +0300607 if (!perf_data__is_pipe(data) && perf_data__is_single_file(data)) {
Adrian Hunter99fa2982015-04-30 17:37:25 +0300608 off_t file_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100609 int fd = perf_data__fd(data);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300610 int err;
611
612 file_offset = lseek(fd, 0, SEEK_CUR);
613 if (file_offset == -1)
614 return -1;
615 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
616 event, file_offset);
617 if (err)
618 return err;
619 }
620
Adrian Hunteref149c22015-04-09 18:53:45 +0300621 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
622 padding = (len1 + len2) & 7;
623 if (padding)
624 padding = 8 - padding;
625
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200626 record__write(rec, map, event, event->header.size);
627 record__write(rec, map, data1, len1);
Adrian Hunteref149c22015-04-09 18:53:45 +0300628 if (len2)
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200629 record__write(rec, map, data2, len2);
630 record__write(rec, map, &pad, padding);
Adrian Hunteref149c22015-04-09 18:53:45 +0300631
632 return 0;
633}
634
635static int record__auxtrace_mmap_read(struct record *rec,
Jiri Olsaa5830532019-07-27 20:30:53 +0200636 struct mmap *map)
Adrian Hunteref149c22015-04-09 18:53:45 +0300637{
638 int ret;
639
Jiri Olsae035f4c2018-09-13 14:54:05 +0200640 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
Adrian Hunteref149c22015-04-09 18:53:45 +0300641 record__process_auxtrace);
642 if (ret < 0)
643 return ret;
644
645 if (ret)
646 rec->samples++;
647
648 return 0;
649}
650
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300651static int record__auxtrace_mmap_read_snapshot(struct record *rec,
Jiri Olsaa5830532019-07-27 20:30:53 +0200652 struct mmap *map)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300653{
654 int ret;
655
Jiri Olsae035f4c2018-09-13 14:54:05 +0200656 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300657 record__process_auxtrace,
658 rec->opts.auxtrace_snapshot_size);
659 if (ret < 0)
660 return ret;
661
662 if (ret)
663 rec->samples++;
664
665 return 0;
666}
667
668static int record__auxtrace_read_snapshot_all(struct record *rec)
669{
670 int i;
671 int rc = 0;
672
Jiri Olsac976ee12019-07-30 13:04:59 +0200673 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
Jiri Olsaa5830532019-07-27 20:30:53 +0200674 struct mmap *map = &rec->evlist->mmap[i];
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300675
Jiri Olsae035f4c2018-09-13 14:54:05 +0200676 if (!map->auxtrace_mmap.base)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300677 continue;
678
Jiri Olsae035f4c2018-09-13 14:54:05 +0200679 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300680 rc = -1;
681 goto out;
682 }
683 }
684out:
685 return rc;
686}
687
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300688static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300689{
690 pr_debug("Recording AUX area tracing snapshot\n");
691 if (record__auxtrace_read_snapshot_all(rec) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +0000692 trigger_error(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300693 } else {
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300694 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
Wang Nan5f9cf592016-04-20 18:59:49 +0000695 trigger_error(&auxtrace_snapshot_trigger);
696 else
697 trigger_ready(&auxtrace_snapshot_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300698 }
699}
700
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300701static int record__auxtrace_snapshot_exit(struct record *rec)
702{
703 if (trigger_is_error(&auxtrace_snapshot_trigger))
704 return 0;
705
706 if (!auxtrace_record__snapshot_started &&
707 auxtrace_record__snapshot_start(rec->itr))
708 return -1;
709
710 record__read_auxtrace_snapshot(rec, true);
711 if (trigger_is_error(&auxtrace_snapshot_trigger))
712 return -1;
713
714 return 0;
715}
716
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200717static int record__auxtrace_init(struct record *rec)
718{
719 int err;
720
721 if (!rec->itr) {
722 rec->itr = auxtrace_record__init(rec->evlist, &err);
723 if (err)
724 return err;
725 }
726
727 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
728 rec->opts.auxtrace_snapshot_opts);
729 if (err)
730 return err;
731
Adrian Hunterc0a6de02019-11-15 14:42:16 +0200732 err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts,
733 rec->opts.auxtrace_sample_opts);
734 if (err)
735 return err;
736
Adrian Hunterd58b3f72021-01-21 16:04:18 +0200737 auxtrace_regroup_aux_output(rec->evlist);
738
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200739 return auxtrace_parse_filters(rec->evlist);
740}
741
Adrian Huntere31f0d02015-04-30 17:37:27 +0300742#else
743
744static inline
745int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
Jiri Olsaa5830532019-07-27 20:30:53 +0200746 struct mmap *map __maybe_unused)
Adrian Huntere31f0d02015-04-30 17:37:27 +0300747{
748 return 0;
749}
750
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300751static inline
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300752void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
753 bool on_exit __maybe_unused)
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300754{
755}
756
757static inline
758int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
759{
760 return 0;
761}
762
Alexander Shishkince7b0e42019-08-06 17:41:01 +0300763static inline
764int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
765{
766 return 0;
767}
768
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +0200769static int record__auxtrace_init(struct record *rec __maybe_unused)
770{
771 return 0;
772}
773
Adrian Huntere31f0d02015-04-30 17:37:27 +0300774#endif
775
Adrian Hunter246eba82020-05-12 15:19:18 +0300776static int record__config_text_poke(struct evlist *evlist)
777{
778 struct evsel *evsel;
779 int err;
780
781 /* Nothing to do if text poke is already configured */
782 evlist__for_each_entry(evlist, evsel) {
783 if (evsel->core.attr.text_poke)
784 return 0;
785 }
786
787 err = parse_events(evlist, "dummy:u", NULL);
788 if (err)
789 return err;
790
791 evsel = evlist__last(evlist);
792
793 evsel->core.attr.freq = 0;
794 evsel->core.attr.sample_period = 1;
795 evsel->core.attr.text_poke = 1;
796 evsel->core.attr.ksymbol = 1;
797
798 evsel->core.system_wide = true;
799 evsel->no_aux_samples = true;
800 evsel->immediate = true;
801
802 /* Text poke must be collected on all CPUs */
803 perf_cpu_map__put(evsel->core.own_cpus);
804 evsel->core.own_cpus = perf_cpu_map__new(NULL);
805 perf_cpu_map__put(evsel->core.cpus);
806 evsel->core.cpus = perf_cpu_map__get(evsel->core.own_cpus);
807
808 evsel__set_sample_bit(evsel, TIME);
809
810 return 0;
811}
812
Adrian Huntereeb399b2019-10-04 11:31:21 +0300813static bool record__kcore_readable(struct machine *machine)
814{
815 char kcore[PATH_MAX];
816 int fd;
817
818 scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir);
819
820 fd = open(kcore, O_RDONLY);
821 if (fd < 0)
822 return false;
823
824 close(fd);
825
826 return true;
827}
828
829static int record__kcore_copy(struct machine *machine, struct perf_data *data)
830{
831 char from_dir[PATH_MAX];
832 char kcore_dir[PATH_MAX];
833 int ret;
834
835 snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir);
836
837 ret = perf_data__make_kcore_dir(data, kcore_dir, sizeof(kcore_dir));
838 if (ret)
839 return ret;
840
841 return kcore_copy(from_dir, kcore_dir);
842}
843
Wang Nancda57a82016-06-27 10:24:03 +0000844static int record__mmap_evlist(struct record *rec,
Jiri Olsa63503db2019-07-21 13:23:52 +0200845 struct evlist *evlist)
Wang Nancda57a82016-06-27 10:24:03 +0000846{
847 struct record_opts *opts = &rec->opts;
Adrian Hunterc0a6de02019-11-15 14:42:16 +0200848 bool auxtrace_overwrite = opts->auxtrace_snapshot_mode ||
849 opts->auxtrace_sample_mode;
Wang Nancda57a82016-06-27 10:24:03 +0000850 char msg[512];
851
Alexey Budankovf13de662019-01-22 20:50:57 +0300852 if (opts->affinity != PERF_AFFINITY_SYS)
853 cpu__setup_cpunode_map();
854
Jiri Olsa9521b5f2019-07-28 12:45:35 +0200855 if (evlist__mmap_ex(evlist, opts->mmap_pages,
Wang Nancda57a82016-06-27 10:24:03 +0000856 opts->auxtrace_mmap_pages,
Adrian Hunterc0a6de02019-11-15 14:42:16 +0200857 auxtrace_overwrite,
Alexey Budankov470530b2019-03-18 20:40:26 +0300858 opts->nr_cblocks, opts->affinity,
Alexey Budankov51255a82019-03-18 20:42:19 +0300859 opts->mmap_flush, opts->comp_level) < 0) {
Wang Nancda57a82016-06-27 10:24:03 +0000860 if (errno == EPERM) {
861 pr_err("Permission error mapping pages.\n"
862 "Consider increasing "
863 "/proc/sys/kernel/perf_event_mlock_kb,\n"
864 "or try again with a smaller value of -m/--mmap_pages.\n"
865 "(current value: %u,%u)\n",
866 opts->mmap_pages, opts->auxtrace_mmap_pages);
867 return -errno;
868 } else {
869 pr_err("failed to mmap with %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300870 str_error_r(errno, msg, sizeof(msg)));
Wang Nancda57a82016-06-27 10:24:03 +0000871 if (errno)
872 return -errno;
873 else
874 return -EINVAL;
875 }
876 }
877 return 0;
878}
879
880static int record__mmap(struct record *rec)
881{
882 return record__mmap_evlist(rec, rec->evlist);
883}
884
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300885static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200886{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300887 char msg[BUFSIZ];
Jiri Olsa32dcd022019-07-21 13:23:51 +0200888 struct evsel *pos;
Jiri Olsa63503db2019-07-21 13:23:52 +0200889 struct evlist *evlist = rec->evlist;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200890 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300891 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600892 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200893
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300894 /*
Kan Liangb91e5492021-07-08 09:03:32 -0700895 * For initial_delay, system wide or a hybrid system, we need to add a
896 * dummy event so that we can track PERF_RECORD_MMAP to cover the delay
897 * of waiting or event synthesis.
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300898 */
Kan Liangb91e5492021-07-08 09:03:32 -0700899 if (opts->initial_delay || target__has_cpu(&opts->target) ||
900 perf_pmu__has_hybrid()) {
Arnaldo Carvalho de Meloe80db252020-11-30 14:39:41 -0300901 pos = evlist__get_tracking_event(evlist);
Adrian Hunter442ad2252020-06-29 12:19:51 +0300902 if (!evsel__is_dummy_event(pos)) {
903 /* Set up dummy event. */
Arnaldo Carvalho de Melofacbf0b2020-07-08 13:49:15 -0300904 if (evlist__add_dummy(evlist))
Adrian Hunter442ad2252020-06-29 12:19:51 +0300905 return -ENOMEM;
906 pos = evlist__last(evlist);
Arnaldo Carvalho de Meloe80db252020-11-30 14:39:41 -0300907 evlist__set_tracking_event(evlist, pos);
Adrian Hunter442ad2252020-06-29 12:19:51 +0300908 }
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300909
Ian Rogers0a892c12020-04-22 10:36:15 -0700910 /*
911 * Enable the dummy event when the process is forked for
912 * initial_delay, immediately for system wide.
913 */
Namhyung Kimbb07d622021-08-27 16:32:12 -0700914 if (opts->initial_delay && !pos->immediate &&
915 !target__has_cpu(&opts->target))
Ian Rogers0a892c12020-04-22 10:36:15 -0700916 pos->core.attr.enable_on_exec = 1;
917 else
918 pos->immediate = 1;
Arnaldo Carvalho de Melod3dbf432017-11-03 15:34:34 -0300919 }
920
Arnaldo Carvalho de Melo78e1bc22020-11-30 15:07:49 -0300921 evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100922
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300923 evlist__for_each_entry(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200924try_again:
Jiri Olsaaf663bd2019-07-21 13:24:39 +0200925 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
Arnaldo Carvalho de Meloae430892020-04-30 11:46:15 -0300926 if (evsel__fallback(pos, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +0900927 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300928 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300929 goto try_again;
930 }
Andi Kleencf99ad12018-10-01 12:59:27 -0700931 if ((errno == EINVAL || errno == EBADF) &&
Jiri Olsafba7c862021-07-06 17:17:00 +0200932 pos->core.leader != &pos->core &&
Andi Kleencf99ad12018-10-01 12:59:27 -0700933 pos->weak_group) {
Arnaldo Carvalho de Melo64b47782020-11-30 14:58:32 -0300934 pos = evlist__reset_weak_group(evlist, pos, true);
Andi Kleencf99ad12018-10-01 12:59:27 -0700935 goto try_again;
936 }
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300937 rc = -errno;
Arnaldo Carvalho de Melo2bb72db2020-05-04 13:43:03 -0300938 evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg));
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300939 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600940 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300941 }
Andi Kleenbfd8f722017-11-17 13:42:58 -0800942
943 pos->supported = true;
Li Zefanc171b552009-10-15 11:22:07 +0800944 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200945
Arnaldo Carvalho de Melo78e1bc22020-11-30 15:07:49 -0300946 if (symbol_conf.kptr_restrict && !evlist__exclude_kernel(evlist)) {
Arnaldo Carvalho de Meloc8b567c2019-09-23 11:07:29 -0300947 pr_warning(
948"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
949"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
950"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
951"file is not found in the buildid cache or in the vmlinux path.\n\n"
952"Samples in kernel modules won't be resolved at all.\n\n"
953"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
954"even with a suitable vmlinux or kallsyms file.\n\n");
955 }
956
Arnaldo Carvalho de Melo24bf91a2020-11-30 09:38:02 -0300957 if (evlist__apply_filters(evlist, &pos)) {
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -0300958 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300959 pos->filter, evsel__name(pos), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300960 str_error_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600961 rc = -1;
962 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100963 }
964
Wang Nancda57a82016-06-27 10:24:03 +0000965 rc = record__mmap(rec);
966 if (rc)
David Ahern8d3eca22012-08-26 12:24:47 -0600967 goto out;
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200968
Jiri Olsa563aecb2013-06-05 13:35:06 +0200969 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300970 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600971out:
972 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200973}
974
Adrian Hunter66286ed2021-05-03 09:42:22 +0300975static void set_timestamp_boundary(struct record *rec, u64 sample_time)
976{
977 if (rec->evlist->first_sample_time == 0)
978 rec->evlist->first_sample_time = sample_time;
979
980 if (sample_time)
981 rec->evlist->last_sample_time = sample_time;
982}
983
Namhyung Kime3d59112015-01-29 17:06:44 +0900984static int process_sample_event(struct perf_tool *tool,
985 union perf_event *event,
986 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200987 struct evsel *evsel,
Namhyung Kime3d59112015-01-29 17:06:44 +0900988 struct machine *machine)
989{
990 struct record *rec = container_of(tool, struct record, tool);
991
Adrian Hunter66286ed2021-05-03 09:42:22 +0300992 set_timestamp_boundary(rec, sample->time);
Jin Yao68588ba2017-12-08 21:13:42 +0800993
994 if (rec->buildid_all)
995 return 0;
996
997 rec->samples++;
Namhyung Kime3d59112015-01-29 17:06:44 +0900998 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
999}
1000
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001001static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02001002{
Jiri Olsaf5fc14122013-10-15 16:27:32 +02001003 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02001004
Jiri Olsa45112e82019-02-21 10:41:29 +01001005 if (perf_data__size(&rec->data) == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -03001006 return 0;
1007
Namhyung Kim00dc8652014-11-04 10:14:32 +09001008 /*
1009 * During this process, it'll load kernel map and replace the
1010 * dso->long_name to a real pathname it found. In this case
1011 * we prefer the vmlinux path like
1012 * /lib/modules/3.16.4/build/vmlinux
1013 *
1014 * rather than build-id path (in debug directory).
1015 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
1016 */
1017 symbol_conf.ignore_vmlinux_buildid = true;
1018
Namhyung Kim61566812016-01-11 22:37:09 +09001019 /*
1020 * If --buildid-all is given, it marks all DSO regardless of hits,
Jin Yao68588ba2017-12-08 21:13:42 +08001021 * so no need to process samples. But if timestamp_boundary is enabled,
1022 * it still needs to walk on all samples to get the timestamps of
1023 * first/last samples.
Namhyung Kim61566812016-01-11 22:37:09 +09001024 */
Jin Yao68588ba2017-12-08 21:13:42 +08001025 if (rec->buildid_all && !rec->timestamp_boundary)
Namhyung Kim61566812016-01-11 22:37:09 +09001026 rec->tool.sample = NULL;
1027
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001028 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02001029}
1030
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02001031static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001032{
1033 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001034 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001035 /*
1036 *As for guest kernel when processing subcommand record&report,
1037 *we arrange module mmap prior to guest kernel mmap and trigger
1038 *a preload dso because default guest module symbols are loaded
1039 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
1040 *method is used to avoid symbol missing when the first addr is
1041 *in module instead of in guest kernel.
1042 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001043 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02001044 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001045 if (err < 0)
1046 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -03001047 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001048
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001049 /*
1050 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
1051 * have no _text sometimes.
1052 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001053 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +02001054 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001055 if (err < 0)
1056 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -03001057 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001058}
1059
Frederic Weisbecker98402802010-05-02 22:05:29 +02001060static struct perf_event_header finished_round_event = {
1061 .size = sizeof(struct perf_event_header),
1062 .type = PERF_RECORD_FINISHED_ROUND,
1063};
1064
Jiri Olsaa5830532019-07-27 20:30:53 +02001065static void record__adjust_affinity(struct record *rec, struct mmap *map)
Alexey Budankovf13de662019-01-22 20:50:57 +03001066{
1067 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
Alexey Budankov8384a262019-12-03 14:45:27 +03001068 !bitmap_equal(rec->affinity_mask.bits, map->affinity_mask.bits,
1069 rec->affinity_mask.nbits)) {
1070 bitmap_zero(rec->affinity_mask.bits, rec->affinity_mask.nbits);
1071 bitmap_or(rec->affinity_mask.bits, rec->affinity_mask.bits,
1072 map->affinity_mask.bits, rec->affinity_mask.nbits);
1073 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&rec->affinity_mask),
1074 (cpu_set_t *)rec->affinity_mask.bits);
1075 if (verbose == 2)
1076 mmap_cpu_mask__scnprintf(&rec->affinity_mask, "thread");
Alexey Budankovf13de662019-01-22 20:50:57 +03001077 }
1078}
1079
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001080static size_t process_comp_header(void *record, size_t increment)
1081{
Jiri Olsa72932372019-08-28 15:57:16 +02001082 struct perf_record_compressed *event = record;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001083 size_t size = sizeof(*event);
1084
1085 if (increment) {
1086 event->header.size += increment;
1087 return increment;
1088 }
1089
1090 event->header.type = PERF_RECORD_COMPRESSED;
1091 event->header.size = size;
1092
1093 return size;
1094}
1095
1096static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
1097 void *src, size_t src_size)
1098{
1099 size_t compressed;
Jiri Olsa72932372019-08-28 15:57:16 +02001100 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001101
1102 compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
1103 max_record_size, process_comp_header);
1104
1105 session->bytes_transferred += src_size;
1106 session->bytes_compressed += compressed;
1107
1108 return compressed;
1109}
1110
Jiri Olsa63503db2019-07-21 13:23:52 +02001111static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
Alexey Budankov470530b2019-03-18 20:40:26 +03001112 bool overwrite, bool synch)
Frederic Weisbecker98402802010-05-02 22:05:29 +02001113{
Jiri Olsadcabb502014-07-25 16:56:16 +02001114 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +02001115 int i;
David Ahern8d3eca22012-08-26 12:24:47 -06001116 int rc = 0;
Jiri Olsaa5830532019-07-27 20:30:53 +02001117 struct mmap *maps;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001118 int trace_fd = rec->data.file.fd;
Alexey Budankovef781122019-03-18 20:44:12 +03001119 off_t off = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001120
Wang Nancb216862016-06-27 10:24:04 +00001121 if (!evlist)
1122 return 0;
Adrian Hunteref149c22015-04-09 18:53:45 +03001123
Wang Nan0b72d692017-12-04 16:51:07 +00001124 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
Wang Nana4ea0ec2016-07-14 08:34:36 +00001125 if (!maps)
1126 return 0;
Wang Nancb216862016-06-27 10:24:04 +00001127
Wang Nan0b72d692017-12-04 16:51:07 +00001128 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
Wang Nan54cc54d2016-07-14 08:34:42 +00001129 return 0;
1130
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001131 if (record__aio_enabled(rec))
1132 off = record__aio_get_pos(trace_fd);
1133
Jiri Olsac976ee12019-07-30 13:04:59 +02001134 for (i = 0; i < evlist->core.nr_mmaps; i++) {
Alexey Budankov470530b2019-03-18 20:40:26 +03001135 u64 flush = 0;
Jiri Olsaa5830532019-07-27 20:30:53 +02001136 struct mmap *map = &maps[i];
Wang Nana4ea0ec2016-07-14 08:34:36 +00001137
Jiri Olsa547740f2019-07-27 22:07:44 +02001138 if (map->core.base) {
Alexey Budankovf13de662019-01-22 20:50:57 +03001139 record__adjust_affinity(rec, map);
Alexey Budankov470530b2019-03-18 20:40:26 +03001140 if (synch) {
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001141 flush = map->core.flush;
1142 map->core.flush = 1;
Alexey Budankov470530b2019-03-18 20:40:26 +03001143 }
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001144 if (!record__aio_enabled(rec)) {
Alexey Budankovef781122019-03-18 20:44:12 +03001145 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
Alexey Budankov470530b2019-03-18 20:40:26 +03001146 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001147 map->core.flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001148 rc = -1;
1149 goto out;
1150 }
1151 } else {
Alexey Budankovef781122019-03-18 20:44:12 +03001152 if (record__aio_push(rec, map, &off) < 0) {
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001153 record__aio_set_pos(trace_fd, off);
Alexey Budankov470530b2019-03-18 20:40:26 +03001154 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001155 map->core.flush = flush;
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001156 rc = -1;
1157 goto out;
1158 }
David Ahern8d3eca22012-08-26 12:24:47 -06001159 }
Alexey Budankov470530b2019-03-18 20:40:26 +03001160 if (synch)
Jiri Olsa65aa2e62019-08-27 16:05:18 +02001161 map->core.flush = flush;
David Ahern8d3eca22012-08-26 12:24:47 -06001162 }
Adrian Hunteref149c22015-04-09 18:53:45 +03001163
Jiri Olsae035f4c2018-09-13 14:54:05 +02001164 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
Adrian Hunterc0a6de02019-11-15 14:42:16 +02001165 !rec->opts.auxtrace_sample_mode &&
Jiri Olsae035f4c2018-09-13 14:54:05 +02001166 record__auxtrace_mmap_read(rec, map) != 0) {
Adrian Hunteref149c22015-04-09 18:53:45 +03001167 rc = -1;
1168 goto out;
1169 }
Frederic Weisbecker98402802010-05-02 22:05:29 +02001170 }
1171
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001172 if (record__aio_enabled(rec))
1173 record__aio_set_pos(trace_fd, off);
1174
Jiri Olsadcabb502014-07-25 16:56:16 +02001175 /*
1176 * Mark the round finished in case we wrote
1177 * at least one event.
1178 */
1179 if (bytes_written != rec->bytes_written)
Jiri Olsaded2b8f2018-09-13 14:54:06 +02001180 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -06001181
Wang Nan0b72d692017-12-04 16:51:07 +00001182 if (overwrite)
Arnaldo Carvalho de Meloade9d202020-11-30 09:33:55 -03001183 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
David Ahern8d3eca22012-08-26 12:24:47 -06001184out:
1185 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +02001186}
1187
Alexey Budankov470530b2019-03-18 20:40:26 +03001188static int record__mmap_read_all(struct record *rec, bool synch)
Wang Nancb216862016-06-27 10:24:04 +00001189{
1190 int err;
1191
Alexey Budankov470530b2019-03-18 20:40:26 +03001192 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
Wang Nancb216862016-06-27 10:24:04 +00001193 if (err)
1194 return err;
1195
Alexey Budankov470530b2019-03-18 20:40:26 +03001196 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
Wang Nancb216862016-06-27 10:24:04 +00001197}
1198
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001199static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -07001200{
David Ahern57706ab2013-11-06 11:41:34 -07001201 struct perf_session *session = rec->session;
1202 int feat;
1203
1204 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1205 perf_header__set_feat(&session->header, feat);
1206
1207 if (rec->no_buildid)
1208 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1209
Jiri Olsace9036a2019-07-21 13:24:23 +02001210 if (!have_tracepoints(&rec->evlist->core.entries))
David Ahern57706ab2013-11-06 11:41:34 -07001211 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1212
1213 if (!rec->opts.branch_stack)
1214 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +03001215
1216 if (!rec->opts.full_auxtrace)
1217 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +01001218
Alexey Budankovcf790512018-10-09 17:36:24 +03001219 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1220 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1221
Jiri Olsad1e325c2020-08-05 11:34:40 +02001222 if (!rec->opts.use_clockid)
1223 perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA);
1224
Jiri Olsa258031c2019-03-08 14:47:39 +01001225 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001226 if (!record__comp_enabled(rec))
1227 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
Jiri Olsa258031c2019-03-08 14:47:39 +01001228
Jiri Olsaffa517a2015-10-25 15:51:43 +01001229 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -07001230}
1231
Wang Nane1ab48b2016-02-26 09:32:10 +00001232static void
1233record__finish_output(struct record *rec)
1234{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001235 struct perf_data *data = &rec->data;
1236 int fd = perf_data__fd(data);
Wang Nane1ab48b2016-02-26 09:32:10 +00001237
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001238 if (data->is_pipe)
Wang Nane1ab48b2016-02-26 09:32:10 +00001239 return;
1240
1241 rec->session->header.data_size += rec->bytes_written;
Jiri Olsa45112e82019-02-21 10:41:29 +01001242 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
Wang Nane1ab48b2016-02-26 09:32:10 +00001243
1244 if (!rec->no_buildid) {
1245 process_buildids(rec);
1246
1247 if (rec->buildid_all)
1248 dsos__hit_all(rec->session);
1249 }
1250 perf_session__write_header(rec->session, rec->evlist, fd, true);
1251
1252 return;
1253}
1254
Wang Nan4ea648a2016-07-14 08:34:47 +00001255static int record__synthesize_workload(struct record *rec, bool tail)
Wang Nanbe7b0c92016-04-20 18:59:54 +00001256{
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001257 int err;
Jiri Olsa9749b902019-07-21 13:23:50 +02001258 struct perf_thread_map *thread_map;
Namhyung Kim41b740b2021-08-10 21:46:58 -07001259 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001260
Wang Nan4ea648a2016-07-14 08:34:47 +00001261 if (rec->opts.tail_synthesize != tail)
1262 return 0;
1263
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001264 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1265 if (thread_map == NULL)
1266 return -1;
1267
1268 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
Wang Nanbe7b0c92016-04-20 18:59:54 +00001269 process_synthesized_event,
1270 &rec->session->machines.host,
Namhyung Kim41b740b2021-08-10 21:46:58 -07001271 needs_mmap,
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001272 rec->opts.sample_address);
Jiri Olsa7836e522019-07-21 13:24:20 +02001273 perf_thread_map__put(thread_map);
Arnaldo Carvalho de Melo9d6aae72017-02-14 10:59:04 -03001274 return err;
Wang Nanbe7b0c92016-04-20 18:59:54 +00001275}
1276
Wang Nan4ea648a2016-07-14 08:34:47 +00001277static int record__synthesize(struct record *rec, bool tail);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001278
Wang Nanecfd7a92016-04-13 08:21:07 +00001279static int
1280record__switch_output(struct record *rec, bool at_exit)
1281{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001282 struct perf_data *data = &rec->data;
Wang Nanecfd7a92016-04-13 08:21:07 +00001283 int fd, err;
Andi Kleen03724b22019-03-14 15:49:55 -07001284 char *new_filename;
Wang Nanecfd7a92016-04-13 08:21:07 +00001285
1286 /* Same Size: "2015122520103046"*/
1287 char timestamp[] = "InvalidTimestamp";
1288
Alexey Budankovd3d1af62018-11-06 12:04:58 +03001289 record__aio_mmap_read_sync(rec);
1290
Wang Nan4ea648a2016-07-14 08:34:47 +00001291 record__synthesize(rec, true);
1292 if (target__none(&rec->opts.target))
1293 record__synthesize_workload(rec, true);
1294
Wang Nanecfd7a92016-04-13 08:21:07 +00001295 rec->samples = 0;
1296 record__finish_output(rec);
1297 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1298 if (err) {
1299 pr_err("Failed to get current timestamp\n");
1300 return -EINVAL;
1301 }
1302
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001303 fd = perf_data__switch(data, timestamp,
Wang Nanecfd7a92016-04-13 08:21:07 +00001304 rec->session->header.data_offset,
Andi Kleen03724b22019-03-14 15:49:55 -07001305 at_exit, &new_filename);
Wang Nanecfd7a92016-04-13 08:21:07 +00001306 if (fd >= 0 && !at_exit) {
1307 rec->bytes_written = 0;
1308 rec->session->header.data_size = 0;
1309 }
1310
1311 if (!quiet)
1312 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001313 data->path, timestamp);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001314
Andi Kleen03724b22019-03-14 15:49:55 -07001315 if (rec->switch_output.num_files) {
1316 int n = rec->switch_output.cur_file + 1;
1317
1318 if (n >= rec->switch_output.num_files)
1319 n = 0;
1320 rec->switch_output.cur_file = n;
1321 if (rec->switch_output.filenames[n]) {
1322 remove(rec->switch_output.filenames[n]);
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001323 zfree(&rec->switch_output.filenames[n]);
Andi Kleen03724b22019-03-14 15:49:55 -07001324 }
1325 rec->switch_output.filenames[n] = new_filename;
1326 } else {
1327 free(new_filename);
1328 }
1329
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001330 /* Output tracking events */
Wang Nanbe7b0c92016-04-20 18:59:54 +00001331 if (!at_exit) {
Wang Nan4ea648a2016-07-14 08:34:47 +00001332 record__synthesize(rec, false);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001333
Wang Nanbe7b0c92016-04-20 18:59:54 +00001334 /*
1335 * In 'perf record --switch-output' without -a,
1336 * record__synthesize() in record__switch_output() won't
1337 * generate tracking events because there's no thread_map
1338 * in evlist. Which causes newly created perf.data doesn't
1339 * contain map and comm information.
1340 * Create a fake thread_map and directly call
1341 * perf_event__synthesize_thread_map() for those events.
1342 */
1343 if (target__none(&rec->opts.target))
Wang Nan4ea648a2016-07-14 08:34:47 +00001344 record__synthesize_workload(rec, false);
Wang Nanbe7b0c92016-04-20 18:59:54 +00001345 }
Wang Nanecfd7a92016-04-13 08:21:07 +00001346 return fd;
1347}
1348
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001349static volatile int workload_exec_errno;
1350
1351/*
Arnaldo Carvalho de Melo7b392ef2020-11-30 09:26:54 -03001352 * evlist__prepare_workload will send a SIGUSR1
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001353 * if the fork fails, since we asked by setting its
1354 * want_signal to true.
1355 */
Namhyung Kim45604712014-05-12 09:47:24 +09001356static void workload_exec_failed_signal(int signo __maybe_unused,
1357 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001358 void *ucontext __maybe_unused)
1359{
1360 workload_exec_errno = info->si_value.sival_int;
1361 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001362 child_finished = 1;
1363}
1364
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001365static void snapshot_sig_handler(int sig);
Jiri Olsabfacbe32017-01-09 10:52:00 +01001366static void alarm_sig_handler(int sig);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001367
Arnaldo Carvalho de Melodb0ea13c2020-11-30 15:19:40 -03001368static const struct perf_event_mmap_page *evlist__pick_pc(struct evlist *evlist)
Wang Nanee667f92016-06-27 10:24:05 +00001369{
Wang Nanb2cb6152016-07-14 08:34:39 +00001370 if (evlist) {
Jiri Olsa547740f2019-07-27 22:07:44 +02001371 if (evlist->mmap && evlist->mmap[0].core.base)
1372 return evlist->mmap[0].core.base;
1373 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
1374 return evlist->overwrite_mmap[0].core.base;
Wang Nanb2cb6152016-07-14 08:34:39 +00001375 }
Wang Nanee667f92016-06-27 10:24:05 +00001376 return NULL;
1377}
1378
Wang Nanc45628b2016-05-24 02:28:59 +00001379static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1380{
Arnaldo Carvalho de Melodb0ea13c2020-11-30 15:19:40 -03001381 const struct perf_event_mmap_page *pc = evlist__pick_pc(rec->evlist);
Wang Nanee667f92016-06-27 10:24:05 +00001382 if (pc)
1383 return pc;
Wang Nanc45628b2016-05-24 02:28:59 +00001384 return NULL;
1385}
1386
Wang Nan4ea648a2016-07-14 08:34:47 +00001387static int record__synthesize(struct record *rec, bool tail)
Wang Nanc45c86e2016-02-26 09:32:07 +00001388{
1389 struct perf_session *session = rec->session;
1390 struct machine *machine = &session->machines.host;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001391 struct perf_data *data = &rec->data;
Wang Nanc45c86e2016-02-26 09:32:07 +00001392 struct record_opts *opts = &rec->opts;
1393 struct perf_tool *tool = &rec->tool;
Wang Nanc45c86e2016-02-26 09:32:07 +00001394 int err = 0;
Stephane Eraniand99c22e2020-04-22 08:50:38 -07001395 event_op f = process_synthesized_event;
Wang Nanc45c86e2016-02-26 09:32:07 +00001396
Wang Nan4ea648a2016-07-14 08:34:47 +00001397 if (rec->opts.tail_synthesize != tail)
1398 return 0;
1399
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001400 if (data->is_pipe) {
Namhyung Kimc3a057d2021-07-19 15:31:52 -07001401 err = perf_event__synthesize_for_pipe(tool, session, data,
Jiri Olsaa2015512018-03-14 10:22:04 +01001402 process_synthesized_event);
Namhyung Kimc3a057d2021-07-19 15:31:52 -07001403 if (err < 0)
1404 goto out;
Jiri Olsaa2015512018-03-14 10:22:04 +01001405
Namhyung Kimc3a057d2021-07-19 15:31:52 -07001406 rec->bytes_written += err;
Wang Nanc45c86e2016-02-26 09:32:07 +00001407 }
1408
Wang Nanc45628b2016-05-24 02:28:59 +00001409 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001410 process_synthesized_event, machine);
1411 if (err)
1412 goto out;
1413
Adrian Hunterc0a6de02019-11-15 14:42:16 +02001414 /* Synthesize id_index before auxtrace_info */
Adrian Hunter61750472021-09-07 19:39:02 +03001415 if (rec->opts.auxtrace_sample_mode || rec->opts.full_auxtrace) {
Adrian Hunterc0a6de02019-11-15 14:42:16 +02001416 err = perf_event__synthesize_id_index(tool,
1417 process_synthesized_event,
1418 session->evlist, machine);
1419 if (err)
1420 goto out;
1421 }
1422
Wang Nanc45c86e2016-02-26 09:32:07 +00001423 if (rec->opts.full_auxtrace) {
1424 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1425 session, process_synthesized_event);
1426 if (err)
1427 goto out;
1428 }
1429
Arnaldo Carvalho de Melo78e1bc22020-11-30 15:07:49 -03001430 if (!evlist__exclude_kernel(rec->evlist)) {
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001431 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1432 machine);
1433 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1434 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1435 "Check /proc/kallsyms permission or run as root.\n");
Wang Nanc45c86e2016-02-26 09:32:07 +00001436
Arnaldo Carvalho de Melo6c443952017-11-14 11:03:19 -03001437 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1438 machine);
1439 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1440 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1441 "Check /proc/modules permission or run as root.\n");
1442 }
Wang Nanc45c86e2016-02-26 09:32:07 +00001443
1444 if (perf_guest) {
1445 machines__process_guests(&session->machines,
1446 perf_event__synthesize_guest_os, tool);
1447 }
1448
Andi Kleenbfd8f722017-11-17 13:42:58 -08001449 err = perf_event__synthesize_extra_attr(&rec->tool,
1450 rec->evlist,
1451 process_synthesized_event,
1452 data->is_pipe);
1453 if (err)
1454 goto out;
1455
Jiri Olsa03617c22019-07-21 13:24:42 +02001456 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
Andi Kleen373565d2017-11-17 13:42:59 -08001457 process_synthesized_event,
1458 NULL);
1459 if (err < 0) {
1460 pr_err("Couldn't synthesize thread map.\n");
1461 return err;
1462 }
1463
Jiri Olsaf72f9012019-07-21 13:24:41 +02001464 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
Andi Kleen373565d2017-11-17 13:42:59 -08001465 process_synthesized_event, NULL);
1466 if (err < 0) {
1467 pr_err("Couldn't synthesize cpu map.\n");
1468 return err;
1469 }
1470
Song Liue5416952019-03-11 22:30:41 -07001471 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
Song Liu7b612e22019-01-17 08:15:19 -08001472 machine, opts);
1473 if (err < 0)
1474 pr_warning("Couldn't synthesize bpf events.\n");
1475
Namhyung Kim41b740b2021-08-10 21:46:58 -07001476 if (rec->opts.synth & PERF_SYNTH_CGROUP) {
1477 err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
1478 machine);
1479 if (err < 0)
1480 pr_warning("Couldn't synthesize cgroup events.\n");
1481 }
Namhyung Kimab640692020-03-25 21:45:33 +09001482
Stephane Eraniand99c22e2020-04-22 08:50:38 -07001483 if (rec->opts.nr_threads_synthesize > 1) {
1484 perf_set_multithreaded();
1485 f = process_locked_synthesized_event;
1486 }
1487
Namhyung Kim41b740b2021-08-10 21:46:58 -07001488 if (rec->opts.synth & PERF_SYNTH_TASK) {
1489 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
1490
1491 err = __machine__synthesize_threads(machine, tool, &opts->target,
1492 rec->evlist->core.threads,
1493 f, needs_mmap, opts->sample_address,
1494 rec->opts.nr_threads_synthesize);
1495 }
Stephane Eraniand99c22e2020-04-22 08:50:38 -07001496
1497 if (rec->opts.nr_threads_synthesize > 1)
1498 perf_set_singlethreaded();
1499
Wang Nanc45c86e2016-02-26 09:32:07 +00001500out:
1501 return err;
1502}
1503
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03001504static int record__process_signal_event(union perf_event *event __maybe_unused, void *data)
1505{
1506 struct record *rec = data;
1507 pthread_kill(rec->thread_id, SIGUSR2);
1508 return 0;
1509}
1510
Arnaldo Carvalho de Melo23cbb412020-04-28 14:58:29 -03001511static int record__setup_sb_evlist(struct record *rec)
1512{
1513 struct record_opts *opts = &rec->opts;
1514
1515 if (rec->sb_evlist != NULL) {
1516 /*
1517 * We get here if --switch-output-event populated the
1518 * sb_evlist, so associate a callback that will send a SIGUSR2
1519 * to the main thread.
1520 */
1521 evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
1522 rec->thread_id = pthread_self();
1523 }
Jin Yao1101c872020-08-05 10:29:37 +08001524#ifdef HAVE_LIBBPF_SUPPORT
Arnaldo Carvalho de Melo23cbb412020-04-28 14:58:29 -03001525 if (!opts->no_bpf_event) {
1526 if (rec->sb_evlist == NULL) {
1527 rec->sb_evlist = evlist__new();
1528
1529 if (rec->sb_evlist == NULL) {
1530 pr_err("Couldn't create side band evlist.\n.");
1531 return -1;
1532 }
1533 }
1534
1535 if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
1536 pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
1537 return -1;
1538 }
1539 }
Jin Yao1101c872020-08-05 10:29:37 +08001540#endif
Arnaldo Carvalho de Melo08c83992020-11-30 09:40:10 -03001541 if (evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
Arnaldo Carvalho de Melo23cbb412020-04-28 14:58:29 -03001542 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1543 opts->no_bpf_event = true;
1544 }
1545
1546 return 0;
1547}
1548
Jiri Olsad1e325c2020-08-05 11:34:40 +02001549static int record__init_clock(struct record *rec)
1550{
1551 struct perf_session *session = rec->session;
1552 struct timespec ref_clockid;
1553 struct timeval ref_tod;
1554 u64 ref;
1555
1556 if (!rec->opts.use_clockid)
1557 return 0;
1558
Jiri Olsa9d88a1a12020-08-05 11:34:41 +02001559 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1560 session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
1561
Jiri Olsad1e325c2020-08-05 11:34:40 +02001562 session->header.env.clock.clockid = rec->opts.clockid;
1563
1564 if (gettimeofday(&ref_tod, NULL) != 0) {
1565 pr_err("gettimeofday failed, cannot set reference time.\n");
1566 return -1;
1567 }
1568
1569 if (clock_gettime(rec->opts.clockid, &ref_clockid)) {
1570 pr_err("clock_gettime failed, cannot set reference time.\n");
1571 return -1;
1572 }
1573
1574 ref = (u64) ref_tod.tv_sec * NSEC_PER_SEC +
1575 (u64) ref_tod.tv_usec * NSEC_PER_USEC;
1576
1577 session->header.env.clock.tod_ns = ref;
1578
1579 ref = (u64) ref_clockid.tv_sec * NSEC_PER_SEC +
1580 (u64) ref_clockid.tv_nsec;
1581
1582 session->header.env.clock.clockid_ns = ref;
1583 return 0;
1584}
1585
Adrian Hunterd20aff12020-09-01 12:37:57 +03001586static void hit_auxtrace_snapshot_trigger(struct record *rec)
1587{
1588 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1589 trigger_hit(&auxtrace_snapshot_trigger);
1590 auxtrace_record__snapshot_started = 1;
1591 if (auxtrace_record__snapshot_start(rec->itr))
1592 trigger_error(&auxtrace_snapshot_trigger);
1593 }
1594}
1595
Jin Yao91c0f5e2021-04-27 15:01:30 +08001596static void record__uniquify_name(struct record *rec)
1597{
1598 struct evsel *pos;
1599 struct evlist *evlist = rec->evlist;
1600 char *new_name;
1601 int ret;
1602
1603 if (!perf_pmu__has_hybrid())
1604 return;
1605
1606 evlist__for_each_entry(evlist, pos) {
1607 if (!evsel__is_hybrid(pos))
1608 continue;
1609
1610 if (strchr(pos->name, '/'))
1611 continue;
1612
1613 ret = asprintf(&new_name, "%s/%s/",
1614 pos->pmu_name, pos->name);
1615 if (ret) {
1616 free(pos->name);
1617 pos->name = new_name;
1618 }
1619 }
1620}
1621
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001622static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +02001623{
David Ahern57706ab2013-11-06 11:41:34 -07001624 int err;
Namhyung Kim45604712014-05-12 09:47:24 +09001625 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001626 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -03001627 const bool forks = argc > 0;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001628 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001629 struct record_opts *opts = &rec->opts;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001630 struct perf_data *data = &rec->data;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001631 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001632 bool disabled = false, draining = false;
Namhyung Kim42aa2762015-01-29 17:06:48 +09001633 int fd;
Alexey Budankovd3c8c082019-03-18 20:41:02 +03001634 float ratio = 0;
Alexey Budankovacce0222020-07-17 10:07:50 +03001635 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001636
Namhyung Kim45604712014-05-12 09:47:24 +09001637 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +02001638 signal(SIGCHLD, sig_handler);
1639 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -06001640 signal(SIGTERM, sig_handler);
Wang Nana0748652016-11-26 07:03:28 +00001641 signal(SIGSEGV, sigsegv_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001642
Hari Bathinif3b36142017-03-08 02:11:43 +05301643 if (rec->opts.record_namespaces)
1644 tool->namespace_events = true;
1645
Namhyung Kim8fb4b672020-03-25 21:45:34 +09001646 if (rec->opts.record_cgroup) {
1647#ifdef HAVE_FILE_HANDLE
1648 tool->cgroup_events = true;
1649#else
1650 pr_err("cgroup tracking is not supported\n");
1651 return -1;
1652#endif
1653 }
1654
Jiri Olsadc0c6122017-01-09 10:51:58 +01001655 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001656 signal(SIGUSR2, snapshot_sig_handler);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001657 if (rec->opts.auxtrace_snapshot_mode)
1658 trigger_on(&auxtrace_snapshot_trigger);
Jiri Olsadc0c6122017-01-09 10:51:58 +01001659 if (rec->switch_output.enabled)
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001660 trigger_on(&switch_output_trigger);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001661 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001662 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001663 }
Peter Zijlstraf5970552009-06-18 23:22:55 +02001664
Namhyung Kim2681bd82021-07-19 15:31:49 -07001665 session = perf_session__new(data, tool);
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05301666 if (IS_ERR(session)) {
Adrien BAKffa91882014-04-18 11:00:43 +09001667 pr_err("Perf session creation failed.\n");
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05301668 return PTR_ERR(session);
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -02001669 }
1670
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001671 fd = perf_data__fd(data);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001672 rec->session = session;
1673
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001674 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1675 pr_err("Compression initialization failed.\n");
1676 return -1;
1677 }
Anand K Mistryda231332020-05-13 12:20:23 +10001678#ifdef HAVE_EVENTFD_SUPPORT
1679 done_fd = eventfd(0, EFD_NONBLOCK);
1680 if (done_fd < 0) {
1681 pr_err("Failed to create wakeup eventfd, error: %m\n");
1682 status = -1;
1683 goto out_delete_session;
1684 }
Yang Jihonge16c2ce2021-02-05 14:50:01 +08001685 err = evlist__add_wakeup_eventfd(rec->evlist, done_fd);
Anand K Mistryda231332020-05-13 12:20:23 +10001686 if (err < 0) {
1687 pr_err("Failed to add wakeup eventfd to poll list\n");
1688 status = err;
1689 goto out_delete_session;
1690 }
1691#endif // HAVE_EVENTFD_SUPPORT
Alexey Budankov5d7f4112019-03-18 20:43:35 +03001692
1693 session->header.env.comp_type = PERF_COMP_ZSTD;
1694 session->header.env.comp_level = rec->opts.comp_level;
1695
Adrian Huntereeb399b2019-10-04 11:31:21 +03001696 if (rec->opts.kcore &&
1697 !record__kcore_readable(&session->machines.host)) {
1698 pr_err("ERROR: kcore is not readable.\n");
1699 return -1;
1700 }
1701
Jiri Olsad1e325c2020-08-05 11:34:40 +02001702 if (record__init_clock(rec))
1703 return -1;
1704
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001705 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +01001706
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -02001707 if (forks) {
Arnaldo Carvalho de Melo7b392ef2020-11-30 09:26:54 -03001708 err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe,
1709 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001710 if (err < 0) {
1711 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001712 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001713 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +02001714 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001715 }
1716
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001717 /*
1718 * If we have just single event and are sending data
1719 * through pipe, we need to force the ids allocation,
1720 * because we synthesize event name through the pipe
1721 * and need the id for that.
1722 */
Jiri Olsa6484d2f2019-07-21 13:24:28 +02001723 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
Jiri Olsaad46e48c2018-03-02 17:13:54 +01001724 rec->opts.sample_id = true;
1725
Jin Yao91c0f5e2021-04-27 15:01:30 +08001726 record__uniquify_name(rec);
1727
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001728 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -06001729 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001730 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001731 }
Jiri Olsaf6fa4372019-08-06 15:14:05 +02001732 session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001733
Adrian Huntereeb399b2019-10-04 11:31:21 +03001734 if (rec->opts.kcore) {
1735 err = record__kcore_copy(&session->machines.host, data);
1736 if (err) {
1737 pr_err("ERROR: Failed to copy kcore\n");
1738 goto out_child;
1739 }
1740 }
1741
Wang Nan8690a2a2016-02-22 09:10:32 +00001742 err = bpf__apply_obj_config();
1743 if (err) {
1744 char errbuf[BUFSIZ];
1745
1746 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1747 pr_err("ERROR: Apply config to BPF failed: %s\n",
1748 errbuf);
1749 goto out_child;
1750 }
1751
Adrian Huntercca84822015-08-19 17:29:21 +03001752 /*
1753 * Normally perf_session__new would do this, but it doesn't have the
1754 * evlist.
1755 */
Arnaldo Carvalho de Melo8cedf3a2020-06-17 09:29:48 -03001756 if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) {
Adrian Huntercca84822015-08-19 17:29:21 +03001757 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1758 rec->tool.ordered_events = false;
1759 }
1760
Jiri Olsa3a683122021-07-06 17:17:01 +02001761 if (!rec->evlist->core.nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001762 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1763
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001764 if (data->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001765 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -05001766 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001767 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +02001768 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +09001769 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001770 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001771 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02001772 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02001773
Arnaldo Carvalho de Melob38d85e2020-04-24 12:24:51 -03001774 err = -1;
David Ahernd3665492012-02-06 15:27:52 -07001775 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +01001776 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -07001777 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +01001778 "Use --no-buildid to profile anyway.\n");
Namhyung Kim45604712014-05-12 09:47:24 +09001779 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +01001780 }
1781
Arnaldo Carvalho de Melo23cbb412020-04-28 14:58:29 -03001782 err = record__setup_sb_evlist(rec);
1783 if (err)
1784 goto out_child;
Song Liu657ee552019-03-11 22:30:50 -07001785
Wang Nan4ea648a2016-07-14 08:34:47 +00001786 err = record__synthesize(rec, false);
Wang Nanc45c86e2016-02-26 09:32:07 +00001787 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +09001788 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001789
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001790 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001791 struct sched_param param;
1792
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001793 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001794 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -02001795 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -06001796 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001797 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001798 }
1799 }
1800
Jiri Olsa774cb492012-11-12 18:34:01 +01001801 /*
1802 * When perf is starting the traced process, all the events
1803 * (apart from group members) have enable_on_exec=1 set,
1804 * so don't spoil it by prematurely enabling them.
1805 */
Andi Kleen6619a532014-01-11 13:38:27 -08001806 if (!target__none(&opts->target) && !opts->initial_delay)
Jiri Olsa1c87f162019-07-21 13:24:08 +02001807 evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -06001808
Peter Zijlstra856e9662009-12-16 17:55:55 +01001809 /*
1810 * Let the child rip
1811 */
Namhyung Kime803cf92015-09-22 09:24:55 +09001812 if (forks) {
Jiri Olsa20a8a3c2018-03-07 16:50:04 +01001813 struct machine *machine = &session->machines.host;
Namhyung Kime5bed5642015-09-30 10:45:24 +09001814 union perf_event *event;
Hari Bathinie907caf2017-03-08 02:11:51 +05301815 pid_t tgid;
Namhyung Kime5bed5642015-09-30 10:45:24 +09001816
1817 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1818 if (event == NULL) {
1819 err = -ENOMEM;
1820 goto out_child;
1821 }
1822
Namhyung Kime803cf92015-09-22 09:24:55 +09001823 /*
1824 * Some H/W events are generated before COMM event
1825 * which is emitted during exec(), so perf script
1826 * cannot see a correct process name for those events.
1827 * Synthesize COMM event to prevent it.
1828 */
Hari Bathinie907caf2017-03-08 02:11:51 +05301829 tgid = perf_event__synthesize_comm(tool, event,
1830 rec->evlist->workload.pid,
1831 process_synthesized_event,
1832 machine);
1833 free(event);
1834
1835 if (tgid == -1)
1836 goto out_child;
1837
1838 event = malloc(sizeof(event->namespaces) +
1839 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1840 machine->id_hdr_size);
1841 if (event == NULL) {
1842 err = -ENOMEM;
1843 goto out_child;
1844 }
1845
1846 /*
1847 * Synthesize NAMESPACES event for the command specified.
1848 */
1849 perf_event__synthesize_namespaces(tool, event,
1850 rec->evlist->workload.pid,
1851 tgid, process_synthesized_event,
1852 machine);
Namhyung Kime5bed5642015-09-30 10:45:24 +09001853 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +09001854
Arnaldo Carvalho de Melo7b392ef2020-11-30 09:26:54 -03001855 evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +09001856 }
Peter Zijlstra856e9662009-12-16 17:55:55 +01001857
Alexey Budankov1d078cc2020-07-17 10:08:23 +03001858 if (evlist__initialize_ctlfd(rec->evlist, opts->ctl_fd, opts->ctl_fd_ack))
1859 goto out_child;
1860
Andi Kleen6619a532014-01-11 13:38:27 -08001861 if (opts->initial_delay) {
Alexey Budankov68cd3b42020-07-17 10:07:03 +03001862 pr_info(EVLIST_DISABLED_MSG);
1863 if (opts->initial_delay > 0) {
1864 usleep(opts->initial_delay * USEC_PER_MSEC);
1865 evlist__enable(rec->evlist);
1866 pr_info(EVLIST_ENABLED_MSG);
1867 }
Andi Kleen6619a532014-01-11 13:38:27 -08001868 }
1869
Wang Nan5f9cf592016-04-20 18:59:49 +00001870 trigger_ready(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001871 trigger_ready(&switch_output_trigger);
Wang Nana0748652016-11-26 07:03:28 +00001872 perf_hooks__invoke_record_start();
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001873 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -07001874 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001875
Wang Nan057374642016-07-14 08:34:43 +00001876 /*
1877 * rec->evlist->bkw_mmap_state is possible to be
1878 * BKW_MMAP_EMPTY here: when done == true and
1879 * hits != rec->samples in previous round.
1880 *
Arnaldo Carvalho de Meloade9d202020-11-30 09:33:55 -03001881 * evlist__toggle_bkw_mmap ensure we never
Wang Nan057374642016-07-14 08:34:43 +00001882 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1883 */
1884 if (trigger_is_hit(&switch_output_trigger) || done || draining)
Arnaldo Carvalho de Meloade9d202020-11-30 09:33:55 -03001885 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
Wang Nan057374642016-07-14 08:34:43 +00001886
Alexey Budankov470530b2019-03-18 20:40:26 +03001887 if (record__mmap_read_all(rec, false) < 0) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001888 trigger_error(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001889 trigger_error(&switch_output_trigger);
David Ahern8d3eca22012-08-26 12:24:47 -06001890 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09001891 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -06001892 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001893
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001894 if (auxtrace_record__snapshot_started) {
1895 auxtrace_record__snapshot_started = 0;
Wang Nan5f9cf592016-04-20 18:59:49 +00001896 if (!trigger_is_error(&auxtrace_snapshot_trigger))
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001897 record__read_auxtrace_snapshot(rec, false);
Wang Nan5f9cf592016-04-20 18:59:49 +00001898 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001899 pr_err("AUX area tracing snapshot failed\n");
1900 err = -1;
1901 goto out_child;
1902 }
1903 }
1904
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001905 if (trigger_is_hit(&switch_output_trigger)) {
Wang Nan057374642016-07-14 08:34:43 +00001906 /*
1907 * If switch_output_trigger is hit, the data in
1908 * overwritable ring buffer should have been collected,
1909 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1910 *
1911 * If SIGUSR2 raise after or during record__mmap_read_all(),
1912 * record__mmap_read_all() didn't collect data from
1913 * overwritable ring buffer. Read again.
1914 */
1915 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1916 continue;
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001917 trigger_ready(&switch_output_trigger);
1918
Wang Nan057374642016-07-14 08:34:43 +00001919 /*
1920 * Reenable events in overwrite ring buffer after
1921 * record__mmap_read_all(): we should have collected
1922 * data from it.
1923 */
Arnaldo Carvalho de Meloade9d202020-11-30 09:33:55 -03001924 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
Wang Nan057374642016-07-14 08:34:43 +00001925
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001926 if (!quiet)
1927 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1928 waking);
1929 waking = 0;
1930 fd = record__switch_output(rec, false);
1931 if (fd < 0) {
1932 pr_err("Failed to switch to new file\n");
1933 trigger_error(&switch_output_trigger);
1934 err = fd;
1935 goto out_child;
1936 }
Jiri Olsabfacbe32017-01-09 10:52:00 +01001937
1938 /* re-arm the alarm */
1939 if (rec->switch_output.time)
1940 alarm(rec->switch_output.time);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001941 }
1942
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001943 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001944 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001945 break;
Jiri Olsa80ab2982019-08-31 22:48:33 +02001946 err = evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -04001947 /*
1948 * Propagate error, only if there's any. Ignore positive
1949 * number of returned events and interrupt error.
1950 */
1951 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +09001952 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001953 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001954
Jiri Olsaf4009e72019-08-16 16:00:45 +02001955 if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -03001956 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +02001957 }
1958
Alexey Budankovacce0222020-07-17 10:07:50 +03001959 if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) {
1960 switch (cmd) {
Adrian Hunterd20aff12020-09-01 12:37:57 +03001961 case EVLIST_CTL_CMD_SNAPSHOT:
1962 hit_auxtrace_snapshot_trigger(rec);
1963 evlist__ctlfd_ack(rec->evlist);
1964 break;
Jiri Olsaf186cd62020-12-27 00:20:37 +01001965 case EVLIST_CTL_CMD_STOP:
1966 done = 1;
1967 break;
Alexey Budankovacce0222020-07-17 10:07:50 +03001968 case EVLIST_CTL_CMD_ACK:
1969 case EVLIST_CTL_CMD_UNSUPPORTED:
Jiri Olsa991ae4e2020-12-27 00:20:35 +01001970 case EVLIST_CTL_CMD_ENABLE:
1971 case EVLIST_CTL_CMD_DISABLE:
Jiri Olsa142544a2020-12-27 00:20:36 +01001972 case EVLIST_CTL_CMD_EVLIST:
Jiri Olsa47fddcb2020-12-27 00:20:38 +01001973 case EVLIST_CTL_CMD_PING:
Alexey Budankovacce0222020-07-17 10:07:50 +03001974 default:
1975 break;
1976 }
1977 }
1978
Jiri Olsa774cb492012-11-12 18:34:01 +01001979 /*
1980 * When perf is starting the traced process, at the end events
1981 * die with the process and we wait for that. Thus no need to
1982 * disable events in this case.
1983 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001984 if (done && !disabled && !target__none(&opts->target)) {
Wang Nan5f9cf592016-04-20 18:59:49 +00001985 trigger_off(&auxtrace_snapshot_trigger);
Jiri Olsae74676d2019-07-21 13:24:09 +02001986 evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +01001987 disabled = true;
1988 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001989 }
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001990
Wang Nan5f9cf592016-04-20 18:59:49 +00001991 trigger_off(&auxtrace_snapshot_trigger);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00001992 trigger_off(&switch_output_trigger);
Peter Zijlstrade9ac072009-04-08 15:01:31 +02001993
Alexander Shishkince7b0e42019-08-06 17:41:01 +03001994 if (opts->auxtrace_snapshot_on_exit)
1995 record__auxtrace_snapshot_exit(rec);
1996
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001997 if (forks && workload_exec_errno) {
Arnaldo Carvalho de Melo3535a692021-04-14 09:32:14 -03001998 char msg[STRERR_BUFSIZE], strevsels[2048];
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001999 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melo3535a692021-04-14 09:32:14 -03002000
2001 evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels);
2002
2003 pr_err("Failed to collect '%s' for the '%s' workload: %s\n",
2004 strevsels, argv[0], emsg);
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03002005 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +09002006 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03002007 }
2008
Namhyung Kime3d59112015-01-29 17:06:44 +09002009 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +09002010 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002011
Wang Nan4ea648a2016-07-14 08:34:47 +00002012 if (target__none(&rec->opts.target))
2013 record__synthesize_workload(rec, true);
2014
Namhyung Kim45604712014-05-12 09:47:24 +09002015out_child:
Alexey Budankov1d078cc2020-07-17 10:08:23 +03002016 evlist__finalize_ctlfd(rec->evlist);
Alexey Budankov470530b2019-03-18 20:40:26 +03002017 record__mmap_read_all(rec, true);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002018 record__aio_mmap_read_sync(rec);
2019
Alexey Budankovd3c8c082019-03-18 20:41:02 +03002020 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
2021 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
2022 session->header.env.comp_ratio = ratio + 0.5;
2023 }
2024
Namhyung Kim45604712014-05-12 09:47:24 +09002025 if (forks) {
2026 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +02002027
Namhyung Kim45604712014-05-12 09:47:24 +09002028 if (!child_finished)
2029 kill(rec->evlist->workload.pid, SIGTERM);
2030
2031 wait(&exit_status);
2032
2033 if (err < 0)
2034 status = err;
2035 else if (WIFEXITED(exit_status))
2036 status = WEXITSTATUS(exit_status);
2037 else if (WIFSIGNALED(exit_status))
2038 signr = WTERMSIG(exit_status);
2039 } else
2040 status = err;
2041
Wang Nan4ea648a2016-07-14 08:34:47 +00002042 record__synthesize(rec, true);
Namhyung Kime3d59112015-01-29 17:06:44 +09002043 /* this will be recalculated during process_buildids() */
2044 rec->samples = 0;
2045
Wang Nanecfd7a92016-04-13 08:21:07 +00002046 if (!err) {
2047 if (!rec->timestamp_filename) {
2048 record__finish_output(rec);
2049 } else {
2050 fd = record__switch_output(rec, true);
2051 if (fd < 0) {
2052 status = fd;
2053 goto out_delete_session;
2054 }
2055 }
2056 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002057
Wang Nana0748652016-11-26 07:03:28 +00002058 perf_hooks__invoke_record_end();
2059
Namhyung Kime3d59112015-01-29 17:06:44 +09002060 if (!err && !quiet) {
2061 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +00002062 const char *postfix = rec->timestamp_filename ?
2063 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +09002064
Adrian Hunteref149c22015-04-09 18:53:45 +03002065 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +09002066 scnprintf(samples, sizeof(samples),
2067 " (%" PRIu64 " samples)", rec->samples);
2068 else
2069 samples[0] = '\0';
2070
Alexey Budankovd3c8c082019-03-18 20:41:02 +03002071 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002072 perf_data__size(data) / 1024.0 / 1024.0,
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002073 data->path, postfix, samples);
Alexey Budankovd3c8c082019-03-18 20:41:02 +03002074 if (ratio) {
2075 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
2076 rec->session->bytes_transferred / 1024.0 / 1024.0,
2077 ratio);
2078 }
2079 fprintf(stderr, " ]\n");
Namhyung Kime3d59112015-01-29 17:06:44 +09002080 }
2081
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002082out_delete_session:
Anand K Mistryda231332020-05-13 12:20:23 +10002083#ifdef HAVE_EVENTFD_SUPPORT
2084 if (done_fd >= 0)
2085 close(done_fd);
2086#endif
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002087 zstd_fini(&session->zstd_data);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002088 perf_session__delete(session);
Song Liu657ee552019-03-11 22:30:50 -07002089
2090 if (!opts->no_bpf_event)
Arnaldo Carvalho de Melo08c83992020-11-30 09:40:10 -03002091 evlist__stop_sb_thread(rec->sb_evlist);
Namhyung Kim45604712014-05-12 09:47:24 +09002092 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +02002093}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002094
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002095static void callchain_debug(struct callchain_param *callchain)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002096{
Kan Liangaad2b212015-01-05 13:23:04 -05002097 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +01002098
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002099 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002100
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002101 if (callchain->record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002102 pr_debug("callchain: stack dump size %d\n",
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002103 callchain->dump_size);
2104}
2105
2106int record_opts__parse_callchain(struct record_opts *record,
2107 struct callchain_param *callchain,
2108 const char *arg, bool unset)
2109{
2110 int ret;
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002111 callchain->enabled = !unset;
2112
2113 /* --no-call-graph */
2114 if (unset) {
2115 callchain->record_mode = CALLCHAIN_NONE;
2116 pr_debug("callchain: disabled\n");
2117 return 0;
2118 }
2119
2120 ret = parse_callchain_record_opt(arg, callchain);
2121 if (!ret) {
2122 /* Enable data address sampling for DWARF unwind. */
2123 if (callchain->record_mode == CALLCHAIN_DWARF)
2124 record->sample_address = true;
2125 callchain_debug(callchain);
2126 }
2127
2128 return ret;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002129}
2130
Kan Liangc421e802015-07-29 05:42:12 -04002131int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002132 const char *arg,
2133 int unset)
2134{
Arnaldo Carvalho de Melo0883e822016-04-15 16:37:17 -03002135 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
Jiri Olsa26d33022012-08-07 15:20:47 +02002136}
2137
Kan Liangc421e802015-07-29 05:42:12 -04002138int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002139 const char *arg __maybe_unused,
2140 int unset __maybe_unused)
2141{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002142 struct callchain_param *callchain = opt->value;
Kan Liangc421e802015-07-29 05:42:12 -04002143
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002144 callchain->enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002145
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002146 if (callchain->record_mode == CALLCHAIN_NONE)
2147 callchain->record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002148
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002149 callchain_debug(callchain);
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002150 return 0;
2151}
2152
Jiri Olsaeb853e82014-02-03 12:44:42 +01002153static int perf_record_config(const char *var, const char *value, void *cb)
2154{
Namhyung Kim7a29c082015-12-15 10:49:56 +09002155 struct record *rec = cb;
2156
2157 if (!strcmp(var, "record.build-id")) {
2158 if (!strcmp(value, "cache"))
2159 rec->no_buildid_cache = false;
2160 else if (!strcmp(value, "no-cache"))
2161 rec->no_buildid_cache = true;
2162 else if (!strcmp(value, "skip"))
2163 rec->no_buildid = true;
Jiri Olsae29386c2020-12-14 11:54:57 +01002164 else if (!strcmp(value, "mmap"))
2165 rec->buildid_mmap = true;
Namhyung Kim7a29c082015-12-15 10:49:56 +09002166 else
2167 return -1;
2168 return 0;
2169 }
Yisheng Xiecff17202018-03-12 19:25:57 +08002170 if (!strcmp(var, "record.call-graph")) {
2171 var = "call-graph.record-mode";
2172 return perf_default_config(var, value, cb);
2173 }
Alexey Budankov93f20c02018-11-06 12:07:19 +03002174#ifdef HAVE_AIO_SUPPORT
2175 if (!strcmp(var, "record.aio")) {
2176 rec->opts.nr_cblocks = strtol(value, NULL, 0);
2177 if (!rec->opts.nr_cblocks)
2178 rec->opts.nr_cblocks = nr_cblocks_default;
2179 }
2180#endif
Jiri Olsa9bce13e2021-12-09 21:04:25 +01002181 if (!strcmp(var, "record.debuginfod")) {
2182 rec->debuginfod.urls = strdup(value);
2183 if (!rec->debuginfod.urls)
2184 return -ENOMEM;
2185 rec->debuginfod.set = true;
2186 }
Jiri Olsaeb853e82014-02-03 12:44:42 +01002187
Yisheng Xiecff17202018-03-12 19:25:57 +08002188 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002189}
2190
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002191
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002192static int record__parse_affinity(const struct option *opt, const char *str, int unset)
2193{
2194 struct record_opts *opts = (struct record_opts *)opt->value;
2195
2196 if (unset || !str)
2197 return 0;
2198
2199 if (!strcasecmp(str, "node"))
2200 opts->affinity = PERF_AFFINITY_NODE;
2201 else if (!strcasecmp(str, "cpu"))
2202 opts->affinity = PERF_AFFINITY_CPU;
2203
2204 return 0;
2205}
2206
Jiwei Sun6d575812019-10-22 16:09:01 +08002207static int parse_output_max_size(const struct option *opt,
2208 const char *str, int unset)
2209{
2210 unsigned long *s = (unsigned long *)opt->value;
2211 static struct parse_tag tags_size[] = {
2212 { .tag = 'B', .mult = 1 },
2213 { .tag = 'K', .mult = 1 << 10 },
2214 { .tag = 'M', .mult = 1 << 20 },
2215 { .tag = 'G', .mult = 1 << 30 },
2216 { .tag = 0 },
2217 };
2218 unsigned long val;
2219
2220 if (unset) {
2221 *s = 0;
2222 return 0;
2223 }
2224
2225 val = parse_tag_value(str, tags_size);
2226 if (val != (unsigned long) -1) {
2227 *s = val;
2228 return 0;
2229 }
2230
2231 return -1;
2232}
2233
Adrian Huntere9db1312015-04-09 18:53:46 +03002234static int record__parse_mmap_pages(const struct option *opt,
2235 const char *str,
2236 int unset __maybe_unused)
2237{
2238 struct record_opts *opts = opt->value;
2239 char *s, *p;
2240 unsigned int mmap_pages;
2241 int ret;
2242
2243 if (!str)
2244 return -EINVAL;
2245
2246 s = strdup(str);
2247 if (!s)
2248 return -ENOMEM;
2249
2250 p = strchr(s, ',');
2251 if (p)
2252 *p = '\0';
2253
2254 if (*s) {
Arnaldo Carvalho de Melo25f847022020-11-30 15:09:45 -03002255 ret = __evlist__parse_mmap_pages(&mmap_pages, s);
Adrian Huntere9db1312015-04-09 18:53:46 +03002256 if (ret)
2257 goto out_free;
2258 opts->mmap_pages = mmap_pages;
2259 }
2260
2261 if (!p) {
2262 ret = 0;
2263 goto out_free;
2264 }
2265
Arnaldo Carvalho de Melo25f847022020-11-30 15:09:45 -03002266 ret = __evlist__parse_mmap_pages(&mmap_pages, p + 1);
Adrian Huntere9db1312015-04-09 18:53:46 +03002267 if (ret)
2268 goto out_free;
2269
2270 opts->auxtrace_mmap_pages = mmap_pages;
2271
2272out_free:
2273 free(s);
2274 return ret;
2275}
2276
Alexandre Truong7248e302021-12-17 15:45:15 +00002277void __weak arch__add_leaf_frame_record_opts(struct record_opts *opts __maybe_unused)
2278{
2279}
2280
Alexey Budankov1d078cc2020-07-17 10:08:23 +03002281static int parse_control_option(const struct option *opt,
2282 const char *str,
2283 int unset __maybe_unused)
2284{
Adrian Hunter9864a662020-09-01 12:37:53 +03002285 struct record_opts *opts = opt->value;
Alexey Budankov1d078cc2020-07-17 10:08:23 +03002286
Adrian Huntera8fcbd22020-09-02 13:57:07 +03002287 return evlist__parse_control(str, &opts->ctl_fd, &opts->ctl_fd_ack, &opts->ctl_fd_close);
2288}
2289
Jiri Olsa0c582442017-01-09 10:51:59 +01002290static void switch_output_size_warn(struct record *rec)
2291{
Jiri Olsa9521b5f2019-07-28 12:45:35 +02002292 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
Jiri Olsa0c582442017-01-09 10:51:59 +01002293 struct switch_output *s = &rec->switch_output;
2294
2295 wakeup_size /= 2;
2296
2297 if (s->size < wakeup_size) {
2298 char buf[100];
2299
2300 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
2301 pr_warning("WARNING: switch-output data size lower than "
2302 "wakeup kernel buffer size (%s) "
2303 "expect bigger perf.data sizes\n", buf);
2304 }
2305}
2306
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002307static int switch_output_setup(struct record *rec)
2308{
2309 struct switch_output *s = &rec->switch_output;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002310 static struct parse_tag tags_size[] = {
2311 { .tag = 'B', .mult = 1 },
2312 { .tag = 'K', .mult = 1 << 10 },
2313 { .tag = 'M', .mult = 1 << 20 },
2314 { .tag = 'G', .mult = 1 << 30 },
2315 { .tag = 0 },
2316 };
Jiri Olsabfacbe32017-01-09 10:52:00 +01002317 static struct parse_tag tags_time[] = {
2318 { .tag = 's', .mult = 1 },
2319 { .tag = 'm', .mult = 60 },
2320 { .tag = 'h', .mult = 60*60 },
2321 { .tag = 'd', .mult = 60*60*24 },
2322 { .tag = 0 },
2323 };
Jiri Olsadc0c6122017-01-09 10:51:58 +01002324 unsigned long val;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002325
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03002326 /*
2327 * If we're using --switch-output-events, then we imply its
2328 * --switch-output=signal, as we'll send a SIGUSR2 from the side band
2329 * thread to its parent.
2330 */
2331 if (rec->switch_output_event_set)
2332 goto do_signal;
2333
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002334 if (!s->set)
2335 return 0;
2336
2337 if (!strcmp(s->str, "signal")) {
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03002338do_signal:
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002339 s->signal = true;
2340 pr_debug("switch-output with SIGUSR2 signal\n");
Jiri Olsadc0c6122017-01-09 10:51:58 +01002341 goto enabled;
2342 }
2343
2344 val = parse_tag_value(s->str, tags_size);
2345 if (val != (unsigned long) -1) {
2346 s->size = val;
2347 pr_debug("switch-output with %s size threshold\n", s->str);
2348 goto enabled;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002349 }
2350
Jiri Olsabfacbe32017-01-09 10:52:00 +01002351 val = parse_tag_value(s->str, tags_time);
2352 if (val != (unsigned long) -1) {
2353 s->time = val;
2354 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
2355 s->str, s->time);
2356 goto enabled;
2357 }
2358
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002359 return -1;
Jiri Olsadc0c6122017-01-09 10:51:58 +01002360
2361enabled:
2362 rec->timestamp_filename = true;
2363 s->enabled = true;
Jiri Olsa0c582442017-01-09 10:51:59 +01002364
2365 if (s->size && !rec->opts.no_buffering)
2366 switch_output_size_warn(rec);
2367
Jiri Olsadc0c6122017-01-09 10:51:58 +01002368 return 0;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002369}
2370
Namhyung Kime5b2c202014-10-23 00:15:46 +09002371static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02002372 "perf record [<options>] [<command>]",
2373 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002374 NULL
2375};
Namhyung Kime5b2c202014-10-23 00:15:46 +09002376const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002377
Arnaldo Carvalho de Melo6e0a9b32019-11-14 12:15:34 -03002378static int build_id__process_mmap(struct perf_tool *tool, union perf_event *event,
2379 struct perf_sample *sample, struct machine *machine)
2380{
2381 /*
2382 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
2383 * no need to add them twice.
2384 */
2385 if (!(event->header.misc & PERF_RECORD_MISC_USER))
2386 return 0;
2387 return perf_event__process_mmap(tool, event, sample, machine);
2388}
2389
2390static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *event,
2391 struct perf_sample *sample, struct machine *machine)
2392{
2393 /*
2394 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
2395 * no need to add them twice.
2396 */
2397 if (!(event->header.misc & PERF_RECORD_MISC_USER))
2398 return 0;
2399
2400 return perf_event__process_mmap2(tool, event, sample, machine);
2401}
2402
Adrian Hunter66286ed2021-05-03 09:42:22 +03002403static int process_timestamp_boundary(struct perf_tool *tool,
2404 union perf_event *event __maybe_unused,
2405 struct perf_sample *sample,
2406 struct machine *machine __maybe_unused)
2407{
2408 struct record *rec = container_of(tool, struct record, tool);
2409
2410 set_timestamp_boundary(rec, sample->time);
2411 return 0;
2412}
2413
Namhyung Kim41b740b2021-08-10 21:46:58 -07002414static int parse_record_synth_option(const struct option *opt,
2415 const char *str,
2416 int unset __maybe_unused)
2417{
2418 struct record_opts *opts = opt->value;
2419 char *p = strdup(str);
2420
2421 if (p == NULL)
2422 return -1;
2423
2424 opts->synth = parse_synth_opt(p);
2425 free(p);
2426
2427 if (opts->synth < 0) {
2428 pr_err("Invalid synth option: %s\n", str);
2429 return -1;
2430 }
2431 return 0;
2432}
2433
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002434/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002435 * XXX Ideally would be local to cmd_record() and passed to a record__new
2436 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002437 * after cmd_record() exits, but since record_options need to be accessible to
2438 * builtin-script, leave it here.
2439 *
2440 * At least we don't ouch it in all the other functions here directly.
2441 *
2442 * Just say no to tons of global variables, sigh.
2443 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002444static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002445 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08002446 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002447 .mmap_pages = UINT_MAX,
2448 .user_freq = UINT_MAX,
2449 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03002450 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002451 .target = {
2452 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02002453 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09002454 },
Alexey Budankov470530b2019-03-18 20:40:26 +03002455 .mmap_flush = MMAP_FLUSH_DEFAULT,
Stephane Eraniand99c22e2020-04-22 08:50:38 -07002456 .nr_threads_synthesize = 1,
Alexey Budankov1d078cc2020-07-17 10:08:23 +03002457 .ctl_fd = -1,
2458 .ctl_fd_ack = -1,
Namhyung Kim41b740b2021-08-10 21:46:58 -07002459 .synth = PERF_SYNTH_ALL,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002460 },
Namhyung Kime3d59112015-01-29 17:06:44 +09002461 .tool = {
2462 .sample = process_sample_event,
2463 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03002464 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09002465 .comm = perf_event__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05302466 .namespaces = perf_event__process_namespaces,
Arnaldo Carvalho de Melo6e0a9b32019-11-14 12:15:34 -03002467 .mmap = build_id__process_mmap,
2468 .mmap2 = build_id__process_mmap2,
Adrian Hunter66286ed2021-05-03 09:42:22 +03002469 .itrace_start = process_timestamp_boundary,
2470 .aux = process_timestamp_boundary,
Adrian Huntercca84822015-08-19 17:29:21 +03002471 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09002472 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002473};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02002474
Namhyung Kim76a26542015-10-22 23:28:32 +09002475const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
2476 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03002477
Wang Nan0aab2132016-06-16 08:02:41 +00002478static bool dry_run;
2479
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002480/*
2481 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
2482 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002483 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melo7b392ef2020-11-30 09:26:54 -03002484 * evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002485 * using pipes, etc.
2486 */
Jiri Olsaefd21302017-01-03 09:19:55 +01002487static struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002488 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02002489 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02002490 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002491 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08002492 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00002493 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2494 NULL, "don't record events from perf itself",
2495 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09002496 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002497 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002498 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002499 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002500 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002501 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03002502 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03002503 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002504 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02002505 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002506 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002507 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002508 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002509 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002510 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsa2d4f2792019-02-21 10:41:30 +01002511 OPT_STRING('o', "output", &record.data.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02002512 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002513 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2514 &record.opts.no_inherit_set,
2515 "child tasks do not inherit counters"),
Wang Nan4ea648a2016-07-14 08:34:47 +00002516 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2517 "synthesize non-sample events at the end of output"),
Wang Nan626a6b72016-07-14 08:34:45 +00002518 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
Wei Lia060c1f2020-08-19 11:19:47 +08002519 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
Arnaldo Carvalho de Melob09c2362018-03-01 14:52:50 -03002520 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2521 "Fail if the specified frequency can't be used"),
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002522 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2523 "profile at this frequency",
2524 record__parse_freq),
Adrian Huntere9db1312015-04-09 18:53:46 +03002525 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2526 "number of mmap data pages and AUX area tracing mmap pages",
2527 record__parse_mmap_pages),
Alexey Budankov470530b2019-03-18 20:40:26 +03002528 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2529 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2530 record__mmap_flush_parse),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002531 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08002532 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03002533 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002534 NULL, "enables call-graph recording" ,
2535 &record_callchain_opt),
2536 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09002537 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02002538 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10002539 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02002540 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02002541 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002542 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002543 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02002544 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Kan Liang3b0a5da2017-08-29 13:11:08 -04002545 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2546 "Record the sample physical addresses"),
Kan Liang542b88f2020-11-30 09:27:53 -08002547 OPT_BOOLEAN(0, "data-page-size", &record.opts.sample_data_page_size,
2548 "Record the sampled data address data page size"),
Kan Liangc1de7f32021-01-05 11:57:49 -08002549 OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
2550 "Record the sampled code address (ip) page size"),
Jiri Olsab6f35ed2016-08-01 20:02:35 +02002551 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03002552 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2553 &record.opts.sample_time_set,
2554 "Record the sample timestamps"),
Jiri Olsaf290aa12018-02-01 09:38:11 +01002555 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2556 "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002557 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02002558 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00002559 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2560 &record.no_buildid_cache_set,
2561 "do not update the buildid cache"),
2562 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2563 &record.no_buildid_set,
2564 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002565 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02002566 "monitor event in cgroup name only",
2567 parse_cgroups),
Alexey Budankov68cd3b42020-07-17 10:07:03 +03002568 OPT_INTEGER('D', "delay", &record.opts.initial_delay,
2569 "ms to wait before starting measurement after program start (-1: start with events disabled)"),
Adrian Huntereeb399b2019-10-04 11:31:21 +03002570 OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
Namhyung Kimbea03402012-04-26 14:15:15 +09002571 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2572 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01002573
2574 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2575 "branch any", "sample any taken branches",
2576 parse_branch_stack),
2577
2578 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2579 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01002580 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01002581 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2582 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07002583 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2584 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02002585 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2586 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02002587 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2588 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002589 " use '-I?' to list register names", parse_intr_regs),
Andi Kleen84c41742017-09-05 10:00:28 -07002590 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2591 "sample selected machine registers on interrupt,"
Kan Liangaeea9062019-05-14 13:19:32 -07002592 " use '--user-regs=?' to list register names", parse_user_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08002593 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2594 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002595 OPT_CALLBACK('k', "clockid", &record.opts,
2596 "clockid", "clockid to use for events, see clock_gettime()",
2597 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002598 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2599 "opts", "AUX area tracing Snapshot Mode", ""),
Adrian Hunterc0a6de02019-11-15 14:42:16 +02002600 OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
2601 "opts", "sample AUX area", ""),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08002602 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04002603 "per thread proc mmap processing timeout in ms"),
Hari Bathinif3b36142017-03-08 02:11:43 +05302604 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2605 "Record namespaces events"),
Namhyung Kim8fb4b672020-03-25 21:45:34 +09002606 OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
2607 "Record cgroup events"),
Adrian Hunter16b4b4e2020-05-28 15:08:58 +03002608 OPT_BOOLEAN_SET(0, "switch-events", &record.opts.record_switch_events,
2609 &record.opts.record_switch_events_set,
2610 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01002611 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2612 "Configure all used events to run in kernel space.",
2613 PARSE_OPT_EXCLUSIVE),
2614 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2615 "Configure all used events to run in user space.",
2616 PARSE_OPT_EXCLUSIVE),
yuzhoujian53651b22019-05-30 14:29:22 +01002617 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
2618 "collect kernel callchains"),
2619 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
2620 "collect user callchains"),
Wang Nan71dc23262015-10-14 12:41:19 +00002621 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2622 "clang binary to use for compiling BPF scriptlets"),
2623 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2624 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00002625 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2626 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09002627 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2628 "Record build-id of all DSOs regardless of hits"),
Jiri Olsae29386c2020-12-14 11:54:57 +01002629 OPT_BOOLEAN(0, "buildid-mmap", &record.buildid_mmap,
2630 "Record build-id in map events"),
Wang Nanecfd7a92016-04-13 08:21:07 +00002631 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2632 "append timestamp to output filename"),
Jin Yao68588ba2017-12-08 21:13:42 +08002633 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2634 "Record timestamp boundary (time of first/last samples)"),
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002635 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
Andi Kleenc38dab72019-03-14 15:49:56 -07002636 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2637 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
Jiri Olsadc0c6122017-01-09 10:51:58 +01002638 "signal"),
Arnaldo Carvalho de Melo899e5ff2020-04-27 17:56:37 -03002639 OPT_CALLBACK_SET(0, "switch-output-event", &record.sb_evlist, &record.switch_output_event_set, "switch output event",
2640 "switch output event selector. use 'perf list' to list available events",
2641 parse_events_option_new_evlist),
Andi Kleen03724b22019-03-14 15:49:55 -07002642 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2643 "Limit number of switch output generated files"),
Wang Nan0aab2132016-06-16 08:02:41 +00002644 OPT_BOOLEAN(0, "dry-run", &dry_run,
2645 "Parse options then exit"),
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002646#ifdef HAVE_AIO_SUPPORT
Alexey Budankov93f20c02018-11-06 12:07:19 +03002647 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2648 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002649 record__aio_parse),
2650#endif
Alexey Budankovf4fe11b2019-01-22 20:52:03 +03002651 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2652 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2653 record__parse_affinity),
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002654#ifdef HAVE_ZSTD_SUPPORT
2655 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
2656 "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
2657 record__parse_comp_level),
2658#endif
Jiwei Sun6d575812019-10-22 16:09:01 +08002659 OPT_CALLBACK(0, "max-size", &record.output_max_size,
2660 "size", "Limit the maximum size of the output file", parse_output_max_size),
Stephane Eraniand99c22e2020-04-22 08:50:38 -07002661 OPT_UINTEGER(0, "num-thread-synthesize",
2662 &record.opts.nr_threads_synthesize,
2663 "number of threads to run for event synthesis"),
Stephane Eranian70943492020-05-05 11:29:43 -07002664#ifdef HAVE_LIBPFM
2665 OPT_CALLBACK(0, "pfm-events", &record.evlist, "event",
2666 "libpfm4 event selector. use 'perf list' to list available events",
2667 parse_libpfm_events_option),
2668#endif
Adrian Huntera8fcbd22020-09-02 13:57:07 +03002669 OPT_CALLBACK(0, "control", &record.opts, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
Adrian Hunterd20aff12020-09-01 12:37:57 +03002670 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events,\n"
2671 "\t\t\t 'snapshot': AUX area tracing snapshot).\n"
Adrian Huntera8fcbd22020-09-02 13:57:07 +03002672 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
2673 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
Alexey Budankov1d078cc2020-07-17 10:08:23 +03002674 parse_control_option),
Namhyung Kim41b740b2021-08-10 21:46:58 -07002675 OPT_CALLBACK(0, "synth", &record.opts, "no|all|task|mmap|cgroup",
2676 "Fine-tune event synthesis: default=all", parse_record_synth_option),
Jiri Olsa9bce13e2021-12-09 21:04:25 +01002677 OPT_STRING_OPTARG_SET(0, "debuginfod", &record.debuginfod.urls,
2678 &record.debuginfod.set, "debuginfod urls",
2679 "Enable debuginfod data retrieval from DEBUGINFOD_URLS or specified urls",
2680 "system"),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002681 OPT_END()
2682};
2683
Namhyung Kime5b2c202014-10-23 00:15:46 +09002684struct option *record_options = __record_options;
2685
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03002686int cmd_record(int argc, const char **argv)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002687{
Adrian Hunteref149c22015-04-09 18:53:45 +03002688 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03002689 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002690 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002691
Arnaldo Carvalho de Melo67230472018-03-01 13:46:23 -03002692 setlocale(LC_ALL, "");
2693
Wang Nan48e1cab2015-12-14 10:39:22 +00002694#ifndef HAVE_LIBBPF_SUPPORT
2695# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2696 set_nobuild('\0', "clang-path", true);
2697 set_nobuild('\0', "clang-opt", true);
2698# undef set_nobuild
2699#endif
2700
He Kuang7efe0e02015-12-14 10:39:23 +00002701#ifndef HAVE_BPF_PROLOGUE
2702# if !defined (HAVE_DWARF_SUPPORT)
2703# define REASON "NO_DWARF=1"
2704# elif !defined (HAVE_LIBBPF_SUPPORT)
2705# define REASON "NO_LIBBPF=1"
2706# else
2707# define REASON "this architecture doesn't support BPF prologue"
2708# endif
2709# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2710 set_nobuild('\0', "vmlinux", true);
2711# undef set_nobuild
2712# undef REASON
2713#endif
2714
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002715 rec->opts.affinity = PERF_AFFINITY_SYS;
2716
Jiri Olsa0f98b112019-07-21 13:23:55 +02002717 rec->evlist = evlist__new();
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03002718 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002719 return -ENOMEM;
2720
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002721 err = perf_config(perf_record_config, rec);
2722 if (err)
2723 return err;
Jiri Olsaeb853e82014-02-03 12:44:42 +01002724
Tom Zanussibca647a2010-11-10 08:11:30 -06002725 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002726 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kim68ba3232017-02-17 17:17:42 +09002727 if (quiet)
2728 perf_quiet_option();
Jiri Olsa483635a2017-02-17 18:00:18 +01002729
James Clark7cc72552021-10-18 14:48:42 +01002730 err = symbol__validate_sym_arguments();
2731 if (err)
2732 return err;
2733
Jiri Olsa9bce13e2021-12-09 21:04:25 +01002734 perf_debuginfod_setup(&record.debuginfod);
2735
Jiri Olsa483635a2017-02-17 18:00:18 +01002736 /* Make system wide (-a) the default target. */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002737 if (!argc && target__none(&rec->opts.target))
Jiri Olsa483635a2017-02-17 18:00:18 +01002738 rec->opts.target.system_wide = true;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002739
Namhyung Kimbea03402012-04-26 14:15:15 +09002740 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002741 usage_with_options_msg(record_usage, record_options,
2742 "cgroup monitoring only available in system-wide mode");
2743
Stephane Eranian023695d2011-02-14 11:20:01 +02002744 }
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002745
Jiri Olsae29386c2020-12-14 11:54:57 +01002746 if (rec->buildid_mmap) {
2747 if (!perf_can_record_build_id()) {
2748 pr_err("Failed: no support to record build id in mmap events, update your kernel.\n");
2749 err = -EINVAL;
2750 goto out_opts;
2751 }
2752 pr_debug("Enabling build id in mmap2 events.\n");
2753 /* Enable mmap build id synthesizing. */
2754 symbol_conf.buildid_mmap2 = true;
2755 /* Enable perf_event_attr::build_id bit. */
2756 rec->opts.build_id = true;
2757 /* Disable build id cache. */
2758 rec->no_buildid = true;
2759 }
2760
Namhyung Kim4f2abe92021-05-27 11:28:35 -07002761 if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
2762 pr_err("Kernel has no cgroup sampling support.\n");
2763 err = -EINVAL;
2764 goto out_opts;
2765 }
2766
Adrian Huntereeb399b2019-10-04 11:31:21 +03002767 if (rec->opts.kcore)
2768 rec->data.is_dir = true;
2769
Alexey Budankov504c1ad2019-03-18 20:44:42 +03002770 if (rec->opts.comp_level != 0) {
2771 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
2772 rec->no_buildid = true;
2773 }
2774
Adrian Hunterb757bb02015-07-21 12:44:04 +03002775 if (rec->opts.record_switch_events &&
2776 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09002777 ui__error("kernel does not support recording context switch events\n");
2778 parse_options_usage(record_usage, record_options, "switch-events", 0);
Adrian Huntera8fcbd22020-09-02 13:57:07 +03002779 err = -EINVAL;
2780 goto out_opts;
Adrian Hunterb757bb02015-07-21 12:44:04 +03002781 }
Stephane Eranian023695d2011-02-14 11:20:01 +02002782
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002783 if (switch_output_setup(rec)) {
2784 parse_options_usage(record_usage, record_options, "switch-output", 0);
Adrian Huntera8fcbd22020-09-02 13:57:07 +03002785 err = -EINVAL;
2786 goto out_opts;
Jiri Olsacb4e1eb2017-01-09 10:51:57 +01002787 }
2788
Jiri Olsabfacbe32017-01-09 10:52:00 +01002789 if (rec->switch_output.time) {
2790 signal(SIGALRM, alarm_sig_handler);
2791 alarm(rec->switch_output.time);
2792 }
2793
Andi Kleen03724b22019-03-14 15:49:55 -07002794 if (rec->switch_output.num_files) {
2795 rec->switch_output.filenames = calloc(sizeof(char *),
2796 rec->switch_output.num_files);
Adrian Huntera8fcbd22020-09-02 13:57:07 +03002797 if (!rec->switch_output.filenames) {
2798 err = -EINVAL;
2799 goto out_opts;
2800 }
Andi Kleen03724b22019-03-14 15:49:55 -07002801 }
2802
Adrian Hunter1b36c032016-09-23 17:38:39 +03002803 /*
2804 * Allow aliases to facilitate the lookup of symbols for address
2805 * filters. Refer to auxtrace_parse_filters().
2806 */
2807 symbol_conf.allow_aliases = true;
2808
2809 symbol__init(NULL);
2810
Alexey Budankov8384a262019-12-03 14:45:27 +03002811 if (rec->opts.affinity != PERF_AFFINITY_SYS) {
Ian Rogers6d188042022-01-04 22:13:51 -08002812 rec->affinity_mask.nbits = cpu__max_cpu().cpu;
Andy Shevchenko7fc5b572021-09-07 19:59:35 -07002813 rec->affinity_mask.bits = bitmap_zalloc(rec->affinity_mask.nbits);
Alexey Budankov8384a262019-12-03 14:45:27 +03002814 if (!rec->affinity_mask.bits) {
2815 pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits);
Adrian Huntera8fcbd22020-09-02 13:57:07 +03002816 err = -ENOMEM;
2817 goto out_opts;
Alexey Budankov8384a262019-12-03 14:45:27 +03002818 }
2819 pr_debug2("thread mask[%zd]: empty\n", rec->affinity_mask.nbits);
2820 }
2821
Adrian Hunter4b5ea3b2018-03-06 11:13:12 +02002822 err = record__auxtrace_init(rec);
Adrian Hunter1b36c032016-09-23 17:38:39 +03002823 if (err)
2824 goto out;
2825
Wang Nan0aab2132016-06-16 08:02:41 +00002826 if (dry_run)
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002827 goto out;
Wang Nan0aab2132016-06-16 08:02:41 +00002828
Wang Nand7888572016-04-08 15:07:24 +00002829 err = bpf__setup_stdout(rec->evlist);
2830 if (err) {
2831 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2832 pr_err("ERROR: Setup BPF stdout failed: %s\n",
2833 errbuf);
Adrian Hunter5c01ad602016-09-23 17:38:37 +03002834 goto out;
Wang Nand7888572016-04-08 15:07:24 +00002835 }
2836
Adrian Hunteref149c22015-04-09 18:53:45 +03002837 err = -ENOMEM;
2838
Wang Nan0c1d46a2016-04-20 18:59:52 +00002839 if (rec->no_buildid_cache || rec->no_buildid) {
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02002840 disable_buildid_cache();
Jiri Olsadc0c6122017-01-09 10:51:58 +01002841 } else if (rec->switch_output.enabled) {
Wang Nan0c1d46a2016-04-20 18:59:52 +00002842 /*
2843 * In 'perf record --switch-output', disable buildid
2844 * generation by default to reduce data file switching
2845 * overhead. Still generate buildid if they are required
2846 * explicitly using
2847 *
Jiri Olsa60437ac2017-01-03 09:19:56 +01002848 * perf record --switch-output --no-no-buildid \
Wang Nan0c1d46a2016-04-20 18:59:52 +00002849 * --no-no-buildid-cache
2850 *
2851 * Following code equals to:
2852 *
2853 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2854 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2855 * disable_buildid_cache();
2856 */
2857 bool disable = true;
2858
2859 if (rec->no_buildid_set && !rec->no_buildid)
2860 disable = false;
2861 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2862 disable = false;
2863 if (disable) {
2864 rec->no_buildid = true;
2865 rec->no_buildid_cache = true;
2866 disable_buildid_cache();
2867 }
2868 }
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02002869
Wang Nan4ea648a2016-07-14 08:34:47 +00002870 if (record.opts.overwrite)
2871 record.opts.tail_synthesize = true;
2872
Jin Yaob53a0752021-04-27 15:01:26 +08002873 if (rec->evlist->core.nr_entries == 0) {
2874 if (perf_pmu__has_hybrid()) {
2875 err = evlist__add_default_hybrid(rec->evlist,
2876 !record.opts.no_samples);
2877 } else {
2878 err = __evlist__add_default(rec->evlist,
2879 !record.opts.no_samples);
2880 }
2881
2882 if (err < 0) {
2883 pr_err("Not enough memory for event selector list\n");
2884 goto out;
2885 }
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02002886 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002887
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02002888 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2889 rec->opts.no_inherit = true;
2890
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002891 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002892 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002893 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Jiri Olsac3dec272018-02-06 19:17:58 +01002894 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002895 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09002896
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002897 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002898 if (err) {
2899 int saved_errno = errno;
2900
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002901 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09002902 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002903
2904 err = -saved_errno;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002905 goto out;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002906 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02002907
Mengting Zhangca800062017-12-13 15:01:53 +08002908 /* Enable ignoring missing threads when -u/-p option is defined. */
2909 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
Jiri Olsa23dc4f12016-12-12 11:35:43 +01002910
Jin Yao1d3351e2021-07-23 14:34:33 +08002911 if (evlist__fix_hybrid_cpus(rec->evlist, rec->opts.target.cpu_list)) {
2912 pr_err("failed to use cpu list %s\n",
2913 rec->opts.target.cpu_list);
2914 goto out;
2915 }
2916
2917 rec->opts.target.hybrid = perf_pmu__has_hybrid();
Alexandre Truong7248e302021-12-17 15:45:15 +00002918
2919 if (callchain_param.enabled && callchain_param.record_mode == CALLCHAIN_FP)
2920 arch__add_leaf_frame_record_opts(&rec->opts);
2921
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09002922 err = -ENOMEM;
Arnaldo Carvalho de Melo7748bb72020-11-30 14:56:52 -03002923 if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02002924 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02002925
Adrian Hunteref149c22015-04-09 18:53:45 +03002926 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2927 if (err)
Adrian Hunter394c01e2016-09-23 17:38:36 +03002928 goto out;
Adrian Hunteref149c22015-04-09 18:53:45 +03002929
Namhyung Kim61566812016-01-11 22:37:09 +09002930 /*
2931 * We take all buildids when the file contains
2932 * AUX area tracing data because we do not decode the
2933 * trace because it would take too long.
2934 */
2935 if (rec->opts.full_auxtrace)
2936 rec->buildid_all = true;
2937
Adrian Hunter246eba82020-05-12 15:19:18 +03002938 if (rec->opts.text_poke) {
2939 err = record__config_text_poke(rec->evlist);
2940 if (err) {
2941 pr_err("record__config_text_poke failed, error %d\n", err);
2942 goto out;
2943 }
2944 }
2945
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03002946 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002947 err = -EINVAL;
Adrian Hunter394c01e2016-09-23 17:38:36 +03002948 goto out;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02002949 }
2950
Alexey Budankov93f20c02018-11-06 12:07:19 +03002951 if (rec->opts.nr_cblocks > nr_cblocks_max)
2952 rec->opts.nr_cblocks = nr_cblocks_max;
Alexey Budankov5d7f4112019-03-18 20:43:35 +03002953 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
Alexey Budankovd3d1af62018-11-06 12:04:58 +03002954
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002955 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
Alexey Budankov470530b2019-03-18 20:40:26 +03002956 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
Alexey Budankov9d2ed642019-01-22 20:47:43 +03002957
Alexey Budankov51255a82019-03-18 20:42:19 +03002958 if (rec->opts.comp_level > comp_level_max)
2959 rec->opts.comp_level = comp_level_max;
2960 pr_debug("comp level: %d\n", rec->opts.comp_level);
2961
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02002962 err = __cmd_record(&record, argc, argv);
Adrian Hunter394c01e2016-09-23 17:38:36 +03002963out:
Alexey Budankov8384a262019-12-03 14:45:27 +03002964 bitmap_free(rec->affinity_mask.bits);
Jiri Olsac12995a2019-07-21 13:23:56 +02002965 evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03002966 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03002967 auxtrace_record__free(rec->itr);
Adrian Huntera8fcbd22020-09-02 13:57:07 +03002968out_opts:
Adrian Hunteree7fe312020-09-03 15:29:37 +03002969 evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03002970 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02002971}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002972
2973static void snapshot_sig_handler(int sig __maybe_unused)
2974{
Jiri Olsadc0c6122017-01-09 10:51:58 +01002975 struct record *rec = &record;
2976
Adrian Hunterd20aff12020-09-01 12:37:57 +03002977 hit_auxtrace_snapshot_trigger(rec);
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002978
Jiri Olsadc0c6122017-01-09 10:51:58 +01002979 if (switch_output_signal(rec))
Wang Nan3c1cb7e2016-04-20 18:59:50 +00002980 trigger_hit(&switch_output_trigger);
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03002981}
Jiri Olsabfacbe32017-01-09 10:52:00 +01002982
2983static void alarm_sig_handler(int sig __maybe_unused)
2984{
2985 struct record *rec = &record;
2986
2987 if (switch_output_time(rec))
2988 trigger_hit(&switch_output_trigger);
2989}