blob: 37efa1f43d8bcf52c5e333d51cba04a79e3069b5 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -03002#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -03003#include <inttypes.h>
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02004#include <linux/kernel.h>
Arnaldo Carvalho de Melo7f7c5362019-07-04 11:32:27 -03005#include <linux/zalloc.h>
Robert Richter4e319022013-06-11 17:29:18 +02006#include <traceevent/event-parse.h>
Alexander Shishkin05a1f472017-03-16 18:41:59 +02007#include <api/fs/fs.h>
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02008
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02009#include <byteswap.h>
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020010#include <unistd.h>
11#include <sys/types.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030012#include <sys/mman.h>
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020013
Arnaldo Carvalho de Meloe248de32011-03-05 21:40:06 -030014#include "evlist.h"
15#include "evsel.h"
Arnaldo Carvalho de Melo98521b32017-04-25 15:45:35 -030016#include "memswap.h"
Arnaldo Carvalho de Melo1101f692019-01-27 13:42:37 +010017#include "map.h"
Arnaldo Carvalho de Melodaecf9e2019-01-28 00:03:34 +010018#include "symbol.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020019#include "session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020020#include "tool.h"
Arnaldo Carvalho de Meloa3286262009-12-14 14:22:59 -020021#include "sort.h"
Anton Blanchard5d67be92011-07-04 21:57:50 +100022#include "cpumap.h"
Jiri Olsa0f6a3012012-08-07 15:20:45 +020023#include "perf_regs.h"
Jiri Olsab0a45202014-06-12 09:50:11 +020024#include "asm/bug.h"
Adrian Hunterc4468702015-04-09 18:53:48 +030025#include "auxtrace.h"
Arnaldo Carvalho de Meloe7ff8922017-04-19 21:34:35 -030026#include "thread.h"
Adrian Huntera5499b32015-05-29 16:33:30 +030027#include "thread-stack.h"
Thomas Richter93115d32019-01-17 10:37:17 -030028#include "sample-raw.h"
Jiri Olsa2d2aea62015-10-25 15:51:42 +010029#include "stat.h"
Adrian Hunterec1891a2018-11-06 23:07:10 +020030#include "arch/common.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020031
Alexey Budankovcb62c6f2019-03-18 20:45:11 +030032#ifdef HAVE_ZSTD_SUPPORT
33static int perf_session__process_compressed_event(struct perf_session *session,
34 union perf_event *event, u64 file_offset)
35{
36 void *src;
37 size_t decomp_size, src_size;
38 u64 decomp_last_rem = 0;
Alexey Budankov872c8ee2019-07-09 17:48:14 +030039 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
Alexey Budankovcb62c6f2019-03-18 20:45:11 +030040 struct decomp *decomp, *decomp_last = session->decomp_last;
41
Alexey Budankov872c8ee2019-07-09 17:48:14 +030042 if (decomp_last) {
43 decomp_last_rem = decomp_last->size - decomp_last->head;
44 decomp_len += decomp_last_rem;
45 }
46
47 mmap_len = sizeof(struct decomp) + decomp_len;
48 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
Alexey Budankovcb62c6f2019-03-18 20:45:11 +030049 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
50 if (decomp == MAP_FAILED) {
51 pr_err("Couldn't allocate memory for decompression\n");
52 return -1;
53 }
54
55 decomp->file_pos = file_offset;
Alexey Budankov872c8ee2019-07-09 17:48:14 +030056 decomp->mmap_len = mmap_len;
Alexey Budankovcb62c6f2019-03-18 20:45:11 +030057 decomp->head = 0;
58
Alexey Budankov872c8ee2019-07-09 17:48:14 +030059 if (decomp_last_rem) {
Alexey Budankovcb62c6f2019-03-18 20:45:11 +030060 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
61 decomp->size = decomp_last_rem;
62 }
63
64 src = (void *)event + sizeof(struct compressed_event);
65 src_size = event->pack.header.size - sizeof(struct compressed_event);
66
67 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
68 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
69 if (!decomp_size) {
Alexey Budankov872c8ee2019-07-09 17:48:14 +030070 munmap(decomp, mmap_len);
Alexey Budankovcb62c6f2019-03-18 20:45:11 +030071 pr_err("Couldn't decompress data\n");
72 return -1;
73 }
74
75 decomp->size += decomp_size;
76
77 if (session->decomp == NULL) {
78 session->decomp = decomp;
79 session->decomp_last = decomp;
80 } else {
81 session->decomp_last->next = decomp;
82 session->decomp_last = decomp;
83 }
84
85 pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
86
87 return 0;
88}
89#else /* !HAVE_ZSTD_SUPPORT */
90#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
91#endif
92
Adrian Hunterc4468702015-04-09 18:53:48 +030093static int perf_session__deliver_event(struct perf_session *session,
94 union perf_event *event,
Adrian Hunterc4468702015-04-09 18:53:48 +030095 struct perf_tool *tool,
96 u64 file_offset);
Arnaldo Carvalho de Melod10eb1e2015-03-03 12:20:38 -030097
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -030098static int perf_session__open(struct perf_session *session)
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020099{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100100 struct perf_data *data = session->data;
Tom Zanussi8dc58102010-04-01 23:59:15 -0500101
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300102 if (perf_session__read_header(session) < 0) {
Arnaldo Carvalho de Meloe87b4912015-11-09 17:12:03 -0300103 pr_err("incompatible file format (rerun with -v to learn more)\n");
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200104 return -1;
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200105 }
106
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100107 if (perf_data__is_pipe(data))
Jiri Olsacc9784bd2013-10-15 16:27:34 +0200108 return 0;
109
Jiri Olsa3ba78bd2015-11-05 15:40:47 +0100110 if (perf_header__has_feat(&session->header, HEADER_STAT))
111 return 0;
112
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300113 if (!perf_evlist__valid_sample_type(session->evlist)) {
Arnaldo Carvalho de Meloe87b4912015-11-09 17:12:03 -0300114 pr_err("non matching sample_type\n");
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200115 return -1;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300116 }
117
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300118 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
Arnaldo Carvalho de Meloe87b4912015-11-09 17:12:03 -0300119 pr_err("non matching sample_id_all\n");
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200120 return -1;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300121 }
122
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300123 if (!perf_evlist__valid_read_format(session->evlist)) {
Arnaldo Carvalho de Meloe87b4912015-11-09 17:12:03 -0300124 pr_err("non matching read_format\n");
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200125 return -1;
Jiri Olsa9ede4732012-10-10 17:38:13 +0200126 }
127
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200128 return 0;
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200129}
130
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300131void perf_session__set_id_hdr_size(struct perf_session *session)
Arnaldo Carvalho de Melo9c90a612010-12-02 10:25:28 -0200132{
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300133 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
134
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300135 machines__set_id_hdr_size(&session->machines, id_hdr_size);
Arnaldo Carvalho de Melo9c90a612010-12-02 10:25:28 -0200136}
137
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300138int perf_session__create_kernel_maps(struct perf_session *session)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800139{
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300140 int ret = machine__create_kernel_maps(&session->machines.host);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800141
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800142 if (ret >= 0)
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300143 ret = machines__create_guest_kernel_maps(&session->machines);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800144 return ret;
145}
146
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300147static void perf_session__destroy_kernel_maps(struct perf_session *session)
Arnaldo Carvalho de Melo076c6e452010-08-02 18:18:28 -0300148{
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300149 machines__destroy_kernel_maps(&session->machines);
Arnaldo Carvalho de Melo076c6e452010-08-02 18:18:28 -0300150}
151
Adrian Huntercfe1c412014-07-31 09:00:45 +0300152static bool perf_session__has_comm_exec(struct perf_session *session)
153{
154 struct perf_evsel *evsel;
155
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300156 evlist__for_each_entry(session->evlist, evsel) {
Adrian Huntercfe1c412014-07-31 09:00:45 +0300157 if (evsel->attr.comm_exec)
158 return true;
159 }
160
161 return false;
162}
163
164static void perf_session__set_comm_exec(struct perf_session *session)
165{
166 bool comm_exec = perf_session__has_comm_exec(session);
167
168 machines__set_comm_exec(&session->machines, comm_exec);
169}
170
Arnaldo Carvalho de Melod10eb1e2015-03-03 12:20:38 -0300171static int ordered_events__deliver_event(struct ordered_events *oe,
Arnaldo Carvalho de Melo9870d782015-03-31 12:48:16 -0300172 struct ordered_event *event)
Arnaldo Carvalho de Melod10eb1e2015-03-03 12:20:38 -0300173{
Arnaldo Carvalho de Melo9870d782015-03-31 12:48:16 -0300174 struct perf_session *session = container_of(oe, struct perf_session,
175 ordered_events);
Arnaldo Carvalho de Melo9870d782015-03-31 12:48:16 -0300176
Jiri Olsa93d10af2017-08-03 13:21:14 +0200177 return perf_session__deliver_event(session, event->event,
Adrian Hunterc4468702015-04-09 18:53:48 +0300178 session->tool, event->file_offset);
Arnaldo Carvalho de Melod10eb1e2015-03-03 12:20:38 -0300179}
180
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100181struct perf_session *perf_session__new(struct perf_data *data,
Jiri Olsaf5fc14122013-10-15 16:27:32 +0200182 bool repipe, struct perf_tool *tool)
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200183{
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300184 struct perf_session *session = zalloc(sizeof(*session));
Robert Richterefad1412011-12-07 10:02:54 +0100185
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300186 if (!session)
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200187 goto out;
188
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300189 session->repipe = repipe;
Arnaldo Carvalho de Melo9870d782015-03-31 12:48:16 -0300190 session->tool = tool;
Adrian Hunter99fa2982015-04-30 17:37:25 +0300191 INIT_LIST_HEAD(&session->auxtrace_index);
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300192 machines__init(&session->machines);
Jiri Olsaa4a66682018-11-07 16:58:36 +0100193 ordered_events__init(&session->ordered_events,
194 ordered_events__deliver_event, NULL);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200195
Song Liue4378f02019-03-11 22:30:42 -0700196 perf_env__init(&session->header.env);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100197 if (data) {
198 if (perf_data__open(data))
Arnaldo Carvalho de Melo64abebf72010-01-27 21:05:52 -0200199 goto out_delete;
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200200
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100201 session->data = data;
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200202
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100203 if (perf_data__is_read(data)) {
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300204 if (perf_session__open(session) < 0)
Jiri Olsabefa09b2019-03-05 16:25:35 +0100205 goto out_delete;
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200206
David Carrillo-Cisneros0973ad92017-04-10 13:14:30 -0700207 /*
208 * set session attributes that are present in perf.data
209 * but not in pipe-mode.
210 */
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100211 if (!data->is_pipe) {
David Carrillo-Cisneros0973ad92017-04-10 13:14:30 -0700212 perf_session__set_id_hdr_size(session);
213 perf_session__set_comm_exec(session);
214 }
Thomas Richter93115d32019-01-17 10:37:17 -0300215
216 perf_evlist__init_trace_event_sample_raw(session->evlist);
Jiri Olsaec65def2019-03-08 14:47:35 +0100217
218 /* Open the directory data. */
219 if (data->is_dir && perf_data__open_dir(data))
220 goto out_delete;
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200221 }
Arnaldo Carvalho de Melo4cde9982015-09-09 12:25:00 -0300222 } else {
223 session->machines.host.env = &perf_env;
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200224 }
225
Adrian Hunterec1891a2018-11-06 23:07:10 +0200226 session->machines.host.single_address_space =
227 perf_env__single_address_space(session->machines.host.env);
228
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100229 if (!data || perf_data__is_write(data)) {
Arnaldo Carvalho de Melo64abebf72010-01-27 21:05:52 -0200230 /*
231 * In O_RDONLY mode this will be performed when reading the
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200232 * kernel MMAP event, in perf_event__process_mmap().
Arnaldo Carvalho de Melo64abebf72010-01-27 21:05:52 -0200233 */
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300234 if (perf_session__create_kernel_maps(session) < 0)
Andi Kleena5c2a4c2014-09-24 14:39:54 -0700235 pr_warning("Cannot read kernel map\n");
Arnaldo Carvalho de Melo64abebf72010-01-27 21:05:52 -0200236 }
Arnaldo Carvalho de Melod549c7692009-12-27 21:37:02 -0200237
David Carrillo-Cisneros0973ad92017-04-10 13:14:30 -0700238 /*
239 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
240 * processed, so perf_evlist__sample_id_all is not meaningful here.
241 */
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100242 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
Jiri Olsa0a8cb852014-07-06 14:18:21 +0200243 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
Ian Munsie21ef97f2010-12-10 14:09:16 +1100244 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
Jiri Olsa0a8cb852014-07-06 14:18:21 +0200245 tool->ordered_events = false;
Arnaldo Carvalho de Melod10eb1e2015-03-03 12:20:38 -0300246 }
Ian Munsie21ef97f2010-12-10 14:09:16 +1100247
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300248 return session;
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200249
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200250 out_delete:
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300251 perf_session__delete(session);
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200252 out:
Arnaldo Carvalho de Melo4aa65632009-12-13 19:50:29 -0200253 return NULL;
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200254}
255
Arnaldo Carvalho de Melob424eba2011-11-09 13:24:25 -0200256static void perf_session__delete_threads(struct perf_session *session)
257{
Arnaldo Carvalho de Melo876650e62012-12-18 19:15:48 -0300258 machine__delete_threads(&session->machines.host);
Arnaldo Carvalho de Melob424eba2011-11-09 13:24:25 -0200259}
260
Alexey Budankovcb62c6f2019-03-18 20:45:11 +0300261static void perf_session__release_decomp_events(struct perf_session *session)
262{
263 struct decomp *next, *decomp;
Alexey Budankov872c8ee2019-07-09 17:48:14 +0300264 size_t mmap_len;
Alexey Budankovcb62c6f2019-03-18 20:45:11 +0300265 next = session->decomp;
Alexey Budankovcb62c6f2019-03-18 20:45:11 +0300266 do {
267 decomp = next;
268 if (decomp == NULL)
269 break;
270 next = decomp->next;
Alexey Budankov872c8ee2019-07-09 17:48:14 +0300271 mmap_len = decomp->mmap_len;
272 munmap(decomp, mmap_len);
Alexey Budankovcb62c6f2019-03-18 20:45:11 +0300273 } while (1);
274}
275
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300276void perf_session__delete(struct perf_session *session)
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200277{
Arnaldo Carvalho de Meloe1446552016-06-22 10:02:16 -0300278 if (session == NULL)
279 return;
Adrian Hunterc4468702015-04-09 18:53:48 +0300280 auxtrace__free(session);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300281 auxtrace_index__free(&session->auxtrace_index);
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300282 perf_session__destroy_kernel_maps(session);
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300283 perf_session__delete_threads(session);
Alexey Budankovcb62c6f2019-03-18 20:45:11 +0300284 perf_session__release_decomp_events(session);
Arnaldo Carvalho de Melof0ce8882015-09-08 13:30:00 -0300285 perf_env__exit(&session->header.env);
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300286 machines__exit(&session->machines);
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100287 if (session->data)
288 perf_data__close(session->data);
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300289 free(session);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200290}
Arnaldo Carvalho de Meloa3286262009-12-14 14:22:59 -0200291
Jiri Olsa89f16882018-09-13 14:54:03 +0200292static int process_event_synth_tracing_data_stub(struct perf_session *session
Adrian Hunter47c3d102013-07-04 16:20:21 +0300293 __maybe_unused,
294 union perf_event *event
Jiri Olsa89f16882018-09-13 14:54:03 +0200295 __maybe_unused)
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200296{
297 dump_printf(": unhandled!\n");
298 return 0;
299}
300
Adrian Hunter47c3d102013-07-04 16:20:21 +0300301static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
302 union perf_event *event __maybe_unused,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300303 struct perf_evlist **pevlist
304 __maybe_unused)
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -0200305{
306 dump_printf(": unhandled!\n");
307 return 0;
308}
309
Jiri Olsaffe777252015-10-25 15:51:36 +0100310static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
311 union perf_event *event __maybe_unused,
312 struct perf_evlist **pevlist
313 __maybe_unused)
314{
Jiri Olsa2d2aea62015-10-25 15:51:42 +0100315 if (dump_trace)
316 perf_event__fprintf_event_update(event, stdout);
317
Jiri Olsaffe777252015-10-25 15:51:36 +0100318 dump_printf(": unhandled!\n");
319 return 0;
320}
321
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300322static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
323 union perf_event *event __maybe_unused,
324 struct perf_sample *sample __maybe_unused,
325 struct perf_evsel *evsel __maybe_unused,
326 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo9e69c212011-03-15 15:44:01 -0300327{
328 dump_printf(": unhandled!\n");
329 return 0;
330}
331
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300332static int process_event_stub(struct perf_tool *tool __maybe_unused,
333 union perf_event *event __maybe_unused,
334 struct perf_sample *sample __maybe_unused,
335 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -0200336{
337 dump_printf(": unhandled!\n");
338 return 0;
339}
340
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300341static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
342 union perf_event *event __maybe_unused,
Arnaldo Carvalho de Melod704ebd2015-03-03 12:37:54 -0300343 struct ordered_events *oe __maybe_unused)
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200344{
345 dump_printf(": unhandled!\n");
346 return 0;
347}
348
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200349static int process_finished_round(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200350 union perf_event *event,
Arnaldo Carvalho de Melod704ebd2015-03-03 12:37:54 -0300351 struct ordered_events *oe);
Frederic Weisbeckerd6b17be2010-05-03 15:14:33 +0200352
Adrian Huntera16ac022015-04-09 18:53:43 +0300353static int skipn(int fd, off_t n)
354{
355 char buf[4096];
356 ssize_t ret;
357
358 while (n > 0) {
359 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
360 if (ret <= 0)
361 return ret;
362 n -= ret;
363 }
364
365 return 0;
366}
367
Jiri Olsa73365552018-09-13 14:54:04 +0200368static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
369 union perf_event *event)
Adrian Huntera16ac022015-04-09 18:53:43 +0300370{
371 dump_printf(": unhandled!\n");
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100372 if (perf_data__is_pipe(session->data))
373 skipn(perf_data__fd(session->data), event->auxtrace.size);
Adrian Huntera16ac022015-04-09 18:53:43 +0300374 return event->auxtrace.size;
375}
376
Jiri Olsa89f16882018-09-13 14:54:03 +0200377static int process_event_op2_stub(struct perf_session *session __maybe_unused,
378 union perf_event *event __maybe_unused)
Adrian Huntere9bf54d2015-04-09 18:53:47 +0300379{
380 dump_printf(": unhandled!\n");
381 return 0;
382}
383
Jiri Olsa5f3339d2015-10-25 15:51:19 +0100384
385static
Jiri Olsa89f16882018-09-13 14:54:03 +0200386int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
387 union perf_event *event __maybe_unused)
Jiri Olsa5f3339d2015-10-25 15:51:19 +0100388{
Jiri Olsa2d2aea62015-10-25 15:51:42 +0100389 if (dump_trace)
390 perf_event__fprintf_thread_map(event, stdout);
391
Jiri Olsa5f3339d2015-10-25 15:51:19 +0100392 dump_printf(": unhandled!\n");
393 return 0;
394}
395
Jiri Olsa6640b6c2015-10-25 15:51:23 +0100396static
Jiri Olsa89f16882018-09-13 14:54:03 +0200397int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
398 union perf_event *event __maybe_unused)
Jiri Olsa6640b6c2015-10-25 15:51:23 +0100399{
Jiri Olsa2d2aea62015-10-25 15:51:42 +0100400 if (dump_trace)
401 perf_event__fprintf_cpu_map(event, stdout);
402
Jiri Olsa6640b6c2015-10-25 15:51:23 +0100403 dump_printf(": unhandled!\n");
404 return 0;
405}
406
Jiri Olsa374fb9e2015-10-25 15:51:27 +0100407static
Jiri Olsa89f16882018-09-13 14:54:03 +0200408int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
409 union perf_event *event __maybe_unused)
Jiri Olsa374fb9e2015-10-25 15:51:27 +0100410{
Jiri Olsa2d2aea62015-10-25 15:51:42 +0100411 if (dump_trace)
412 perf_event__fprintf_stat_config(event, stdout);
413
Jiri Olsa374fb9e2015-10-25 15:51:27 +0100414 dump_printf(": unhandled!\n");
415 return 0;
416}
417
Jiri Olsa89f16882018-09-13 14:54:03 +0200418static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
419 union perf_event *event)
Jiri Olsad80518c2015-10-25 15:51:30 +0100420{
Jiri Olsa2d2aea62015-10-25 15:51:42 +0100421 if (dump_trace)
422 perf_event__fprintf_stat(event, stdout);
423
Jiri Olsad80518c2015-10-25 15:51:30 +0100424 dump_printf(": unhandled!\n");
425 return 0;
426}
427
Jiri Olsa89f16882018-09-13 14:54:03 +0200428static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
429 union perf_event *event)
Jiri Olsa2d8f0f12015-10-25 15:51:33 +0100430{
Jiri Olsa2d2aea62015-10-25 15:51:42 +0100431 if (dump_trace)
432 perf_event__fprintf_stat_round(event, stdout);
433
Jiri Olsa2d8f0f12015-10-25 15:51:33 +0100434 dump_printf(": unhandled!\n");
435 return 0;
436}
437
Alexey Budankov61a77732019-03-18 20:45:11 +0300438static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
439 union perf_event *event __maybe_unused,
440 u64 file_offset __maybe_unused)
441{
442 dump_printf(": unhandled!\n");
443 return 0;
444}
445
David Ahern9c501402013-08-02 14:05:41 -0600446void perf_tool__fill_defaults(struct perf_tool *tool)
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -0200447{
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200448 if (tool->sample == NULL)
449 tool->sample = process_event_sample_stub;
450 if (tool->mmap == NULL)
451 tool->mmap = process_event_stub;
David Ahern6adb0b02013-09-22 19:44:59 -0600452 if (tool->mmap2 == NULL)
453 tool->mmap2 = process_event_stub;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200454 if (tool->comm == NULL)
455 tool->comm = process_event_stub;
Namhyung Kim7f0cd232017-10-17 22:29:00 +0900456 if (tool->namespaces == NULL)
457 tool->namespaces = process_event_stub;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200458 if (tool->fork == NULL)
459 tool->fork = process_event_stub;
460 if (tool->exit == NULL)
461 tool->exit = process_event_stub;
462 if (tool->lost == NULL)
463 tool->lost = perf_event__process_lost;
Kan Liangc4937a92015-05-10 15:13:15 -0400464 if (tool->lost_samples == NULL)
465 tool->lost_samples = perf_event__process_lost_samples;
Adrian Hunter4a96f7a2015-04-30 17:37:29 +0300466 if (tool->aux == NULL)
467 tool->aux = perf_event__process_aux;
Adrian Hunter0ad21f62015-04-30 17:37:30 +0300468 if (tool->itrace_start == NULL)
469 tool->itrace_start = perf_event__process_itrace_start;
Adrian Hunter02860392015-07-21 12:44:03 +0300470 if (tool->context_switch == NULL)
471 tool->context_switch = perf_event__process_switch;
Song Liu9aa0bfa2019-01-17 08:15:17 -0800472 if (tool->ksymbol == NULL)
473 tool->ksymbol = perf_event__process_ksymbol;
Song Liu45178a92019-01-17 08:15:18 -0800474 if (tool->bpf_event == NULL)
475 tool->bpf_event = perf_event__process_bpf_event;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200476 if (tool->read == NULL)
477 tool->read = process_event_sample_stub;
478 if (tool->throttle == NULL)
479 tool->throttle = process_event_stub;
480 if (tool->unthrottle == NULL)
481 tool->unthrottle = process_event_stub;
482 if (tool->attr == NULL)
483 tool->attr = process_event_synth_attr_stub;
Jiri Olsaffe777252015-10-25 15:51:36 +0100484 if (tool->event_update == NULL)
485 tool->event_update = process_event_synth_event_update_stub;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200486 if (tool->tracing_data == NULL)
487 tool->tracing_data = process_event_synth_tracing_data_stub;
488 if (tool->build_id == NULL)
Adrian Hunter5fb0ac12016-03-07 16:44:39 -0300489 tool->build_id = process_event_op2_stub;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200490 if (tool->finished_round == NULL) {
Jiri Olsa0a8cb852014-07-06 14:18:21 +0200491 if (tool->ordered_events)
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200492 tool->finished_round = process_finished_round;
Frederic Weisbeckerd6b17be2010-05-03 15:14:33 +0200493 else
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200494 tool->finished_round = process_finished_round_stub;
Frederic Weisbeckerd6b17be2010-05-03 15:14:33 +0200495 }
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200496 if (tool->id_index == NULL)
Adrian Hunter5fb0ac12016-03-07 16:44:39 -0300497 tool->id_index = process_event_op2_stub;
Adrian Huntera16ac022015-04-09 18:53:43 +0300498 if (tool->auxtrace_info == NULL)
Adrian Hunter5fb0ac12016-03-07 16:44:39 -0300499 tool->auxtrace_info = process_event_op2_stub;
Adrian Huntera16ac022015-04-09 18:53:43 +0300500 if (tool->auxtrace == NULL)
501 tool->auxtrace = process_event_auxtrace_stub;
Adrian Huntere9bf54d2015-04-09 18:53:47 +0300502 if (tool->auxtrace_error == NULL)
Adrian Hunter5fb0ac12016-03-07 16:44:39 -0300503 tool->auxtrace_error = process_event_op2_stub;
Jiri Olsa5f3339d2015-10-25 15:51:19 +0100504 if (tool->thread_map == NULL)
505 tool->thread_map = process_event_thread_map_stub;
Jiri Olsa6640b6c2015-10-25 15:51:23 +0100506 if (tool->cpu_map == NULL)
507 tool->cpu_map = process_event_cpu_map_stub;
Jiri Olsa374fb9e2015-10-25 15:51:27 +0100508 if (tool->stat_config == NULL)
509 tool->stat_config = process_event_stat_config_stub;
Jiri Olsad80518c2015-10-25 15:51:30 +0100510 if (tool->stat == NULL)
511 tool->stat = process_stat_stub;
Jiri Olsa2d8f0f12015-10-25 15:51:33 +0100512 if (tool->stat_round == NULL)
513 tool->stat_round = process_stat_round_stub;
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200514 if (tool->time_conv == NULL)
515 tool->time_conv = process_event_op2_stub;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -0700516 if (tool->feature == NULL)
517 tool->feature = process_event_op2_stub;
Alexey Budankov61a77732019-03-18 20:45:11 +0300518 if (tool->compressed == NULL)
Alexey Budankovcb62c6f2019-03-18 20:45:11 +0300519 tool->compressed = perf_session__process_compressed_event;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -0200520}
Arnaldo Carvalho de Melo48000a12014-12-17 17:24:45 -0300521
Jiri Olsa268fb202012-05-30 14:23:43 +0200522static void swap_sample_id_all(union perf_event *event, void *data)
523{
524 void *end = (void *) event + event->header.size;
525 int size = end - data;
526
527 BUG_ON(size % sizeof(u64));
528 mem_bswap_64(data, size);
529}
530
531static void perf_event__all64_swap(union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300532 bool sample_id_all __maybe_unused)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200533{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200534 struct perf_event_header *hdr = &event->header;
535 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200536}
537
Jiri Olsa268fb202012-05-30 14:23:43 +0200538static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200539{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200540 event->comm.pid = bswap_32(event->comm.pid);
541 event->comm.tid = bswap_32(event->comm.tid);
Jiri Olsa268fb202012-05-30 14:23:43 +0200542
543 if (sample_id_all) {
544 void *data = &event->comm.comm;
545
Irina Tirdea9ac3e482012-09-11 01:15:01 +0300546 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
Jiri Olsa268fb202012-05-30 14:23:43 +0200547 swap_sample_id_all(event, data);
548 }
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200549}
550
Jiri Olsa268fb202012-05-30 14:23:43 +0200551static void perf_event__mmap_swap(union perf_event *event,
552 bool sample_id_all)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200553{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200554 event->mmap.pid = bswap_32(event->mmap.pid);
555 event->mmap.tid = bswap_32(event->mmap.tid);
556 event->mmap.start = bswap_64(event->mmap.start);
557 event->mmap.len = bswap_64(event->mmap.len);
558 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
Jiri Olsa268fb202012-05-30 14:23:43 +0200559
560 if (sample_id_all) {
561 void *data = &event->mmap.filename;
562
Irina Tirdea9ac3e482012-09-11 01:15:01 +0300563 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
Jiri Olsa268fb202012-05-30 14:23:43 +0200564 swap_sample_id_all(event, data);
565 }
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200566}
567
Stephane Eranian5c5e8542013-08-21 12:10:25 +0200568static void perf_event__mmap2_swap(union perf_event *event,
569 bool sample_id_all)
570{
571 event->mmap2.pid = bswap_32(event->mmap2.pid);
572 event->mmap2.tid = bswap_32(event->mmap2.tid);
573 event->mmap2.start = bswap_64(event->mmap2.start);
574 event->mmap2.len = bswap_64(event->mmap2.len);
575 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
576 event->mmap2.maj = bswap_32(event->mmap2.maj);
577 event->mmap2.min = bswap_32(event->mmap2.min);
578 event->mmap2.ino = bswap_64(event->mmap2.ino);
579
580 if (sample_id_all) {
581 void *data = &event->mmap2.filename;
582
583 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
584 swap_sample_id_all(event, data);
585 }
586}
Jiri Olsa268fb202012-05-30 14:23:43 +0200587static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200588{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200589 event->fork.pid = bswap_32(event->fork.pid);
590 event->fork.tid = bswap_32(event->fork.tid);
591 event->fork.ppid = bswap_32(event->fork.ppid);
592 event->fork.ptid = bswap_32(event->fork.ptid);
593 event->fork.time = bswap_64(event->fork.time);
Jiri Olsa268fb202012-05-30 14:23:43 +0200594
595 if (sample_id_all)
596 swap_sample_id_all(event, &event->fork + 1);
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200597}
598
Jiri Olsa268fb202012-05-30 14:23:43 +0200599static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200600{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200601 event->read.pid = bswap_32(event->read.pid);
602 event->read.tid = bswap_32(event->read.tid);
603 event->read.value = bswap_64(event->read.value);
604 event->read.time_enabled = bswap_64(event->read.time_enabled);
605 event->read.time_running = bswap_64(event->read.time_running);
606 event->read.id = bswap_64(event->read.id);
Jiri Olsa268fb202012-05-30 14:23:43 +0200607
608 if (sample_id_all)
609 swap_sample_id_all(event, &event->read + 1);
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200610}
611
Adrian Hunter4a96f7a2015-04-30 17:37:29 +0300612static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
613{
614 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
615 event->aux.aux_size = bswap_64(event->aux.aux_size);
616 event->aux.flags = bswap_64(event->aux.flags);
617
618 if (sample_id_all)
619 swap_sample_id_all(event, &event->aux + 1);
620}
621
Adrian Hunter0ad21f62015-04-30 17:37:30 +0300622static void perf_event__itrace_start_swap(union perf_event *event,
623 bool sample_id_all)
624{
625 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
626 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
627
628 if (sample_id_all)
629 swap_sample_id_all(event, &event->itrace_start + 1);
630}
631
Adrian Hunter02860392015-07-21 12:44:03 +0300632static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
633{
634 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
635 event->context_switch.next_prev_pid =
636 bswap_32(event->context_switch.next_prev_pid);
637 event->context_switch.next_prev_tid =
638 bswap_32(event->context_switch.next_prev_tid);
639 }
640
641 if (sample_id_all)
642 swap_sample_id_all(event, &event->context_switch + 1);
643}
644
Jiri Olsadd96c462013-09-01 12:36:15 +0200645static void perf_event__throttle_swap(union perf_event *event,
646 bool sample_id_all)
647{
648 event->throttle.time = bswap_64(event->throttle.time);
649 event->throttle.id = bswap_64(event->throttle.id);
650 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
651
652 if (sample_id_all)
653 swap_sample_id_all(event, &event->throttle + 1);
654}
655
Namhyung Kimacd244b2019-05-22 14:32:49 +0900656static void perf_event__namespaces_swap(union perf_event *event,
657 bool sample_id_all)
658{
659 u64 i;
660
661 event->namespaces.pid = bswap_32(event->namespaces.pid);
662 event->namespaces.tid = bswap_32(event->namespaces.tid);
663 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
664
665 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
666 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
667
668 ns->dev = bswap_64(ns->dev);
669 ns->ino = bswap_64(ns->ino);
670 }
671
672 if (sample_id_all)
673 swap_sample_id_all(event, &event->namespaces.link_info[i]);
674}
675
Jiri Olsae108c662012-05-16 08:59:03 +0200676static u8 revbyte(u8 b)
677{
678 int rev = (b >> 4) | ((b & 0xf) << 4);
679 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
680 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
681 return (u8) rev;
682}
683
684/*
685 * XXX this is hack in attempt to carry flags bitfield
Adam Buchbinderbd1a0be52016-02-24 10:02:25 -0800686 * through endian village. ABI says:
Jiri Olsae108c662012-05-16 08:59:03 +0200687 *
688 * Bit-fields are allocated from right to left (least to most significant)
689 * on little-endian implementations and from left to right (most to least
690 * significant) on big-endian implementations.
691 *
692 * The above seems to be byte specific, so we need to reverse each
693 * byte of the bitfield. 'Internet' also says this might be implementation
694 * specific and we probably need proper fix and carry perf_event_attr
695 * bitfield flags in separate data file FEAT_ section. Thought this seems
696 * to work for now.
697 */
698static void swap_bitfield(u8 *p, unsigned len)
699{
700 unsigned i;
701
702 for (i = 0; i < len; i++) {
703 *p = revbyte(*p);
704 p++;
705 }
706}
707
David Aherneda39132011-07-15 12:34:09 -0600708/* exported for swapping attributes in file header */
709void perf_event__attr_swap(struct perf_event_attr *attr)
710{
711 attr->type = bswap_32(attr->type);
712 attr->size = bswap_32(attr->size);
Jiri Olsae108c662012-05-16 08:59:03 +0200713
Wang Nanb30b6172015-06-17 09:56:39 +0000714#define bswap_safe(f, n) \
715 (attr->size > (offsetof(struct perf_event_attr, f) + \
716 sizeof(attr->f) * (n)))
717#define bswap_field(f, sz) \
718do { \
719 if (bswap_safe(f, 0)) \
720 attr->f = bswap_##sz(attr->f); \
721} while(0)
Arnaldo Carvalho de Melo792d48b2016-04-28 19:03:42 -0300722#define bswap_field_16(f) bswap_field(f, 16)
Wang Nanb30b6172015-06-17 09:56:39 +0000723#define bswap_field_32(f) bswap_field(f, 32)
724#define bswap_field_64(f) bswap_field(f, 64)
725
726 bswap_field_64(config);
727 bswap_field_64(sample_period);
728 bswap_field_64(sample_type);
729 bswap_field_64(read_format);
730 bswap_field_32(wakeup_events);
731 bswap_field_32(bp_type);
732 bswap_field_64(bp_addr);
733 bswap_field_64(bp_len);
734 bswap_field_64(branch_sample_type);
735 bswap_field_64(sample_regs_user);
736 bswap_field_32(sample_stack_user);
737 bswap_field_32(aux_watermark);
Arnaldo Carvalho de Melo792d48b2016-04-28 19:03:42 -0300738 bswap_field_16(sample_max_stack);
Wang Nanb30b6172015-06-17 09:56:39 +0000739
740 /*
741 * After read_format are bitfields. Check read_format because
742 * we are unable to use offsetof on bitfield.
743 */
744 if (bswap_safe(read_format, 1))
745 swap_bitfield((u8 *) (&attr->read_format + 1),
746 sizeof(u64));
747#undef bswap_field_64
748#undef bswap_field_32
749#undef bswap_field
750#undef bswap_safe
David Aherneda39132011-07-15 12:34:09 -0600751}
752
Jiri Olsa268fb202012-05-30 14:23:43 +0200753static void perf_event__hdr_attr_swap(union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300754 bool sample_id_all __maybe_unused)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -0500755{
756 size_t size;
757
David Aherneda39132011-07-15 12:34:09 -0600758 perf_event__attr_swap(&event->attr.attr);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -0500759
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200760 size = event->header.size;
761 size -= (void *)&event->attr.id - (void *)event;
762 mem_bswap_64(event->attr.id, size);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -0500763}
764
Jiri Olsaffe777252015-10-25 15:51:36 +0100765static void perf_event__event_update_swap(union perf_event *event,
766 bool sample_id_all __maybe_unused)
767{
768 event->event_update.type = bswap_64(event->event_update.type);
769 event->event_update.id = bswap_64(event->event_update.id);
770}
771
Jiri Olsa268fb202012-05-30 14:23:43 +0200772static void perf_event__event_type_swap(union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300773 bool sample_id_all __maybe_unused)
Tom Zanussicd19a032010-04-01 23:59:20 -0500774{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200775 event->event_type.event_type.event_id =
776 bswap_64(event->event_type.event_type.event_id);
Tom Zanussicd19a032010-04-01 23:59:20 -0500777}
778
Jiri Olsa268fb202012-05-30 14:23:43 +0200779static void perf_event__tracing_data_swap(union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300780 bool sample_id_all __maybe_unused)
Tom Zanussi92155452010-04-01 23:59:21 -0500781{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200782 event->tracing_data.size = bswap_32(event->tracing_data.size);
Tom Zanussi92155452010-04-01 23:59:21 -0500783}
784
Adrian Huntera16ac022015-04-09 18:53:43 +0300785static void perf_event__auxtrace_info_swap(union perf_event *event,
786 bool sample_id_all __maybe_unused)
787{
788 size_t size;
789
790 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
791
792 size = event->header.size;
793 size -= (void *)&event->auxtrace_info.priv - (void *)event;
794 mem_bswap_64(event->auxtrace_info.priv, size);
795}
796
797static void perf_event__auxtrace_swap(union perf_event *event,
798 bool sample_id_all __maybe_unused)
799{
800 event->auxtrace.size = bswap_64(event->auxtrace.size);
801 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
802 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
803 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
804 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
805 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
806}
807
Adrian Huntere9bf54d2015-04-09 18:53:47 +0300808static void perf_event__auxtrace_error_swap(union perf_event *event,
809 bool sample_id_all __maybe_unused)
810{
811 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
812 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
813 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
814 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
815 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
Adrian Hunter16bd4322019-02-06 12:39:47 +0200816 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
Adrian Huntere9bf54d2015-04-09 18:53:47 +0300817 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
Adrian Hunter16bd4322019-02-06 12:39:47 +0200818 if (event->auxtrace_error.fmt)
819 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
Adrian Huntere9bf54d2015-04-09 18:53:47 +0300820}
821
Jiri Olsa5f3339d2015-10-25 15:51:19 +0100822static void perf_event__thread_map_swap(union perf_event *event,
823 bool sample_id_all __maybe_unused)
824{
825 unsigned i;
826
827 event->thread_map.nr = bswap_64(event->thread_map.nr);
828
829 for (i = 0; i < event->thread_map.nr; i++)
830 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
831}
832
Jiri Olsa6640b6c2015-10-25 15:51:23 +0100833static void perf_event__cpu_map_swap(union perf_event *event,
834 bool sample_id_all __maybe_unused)
835{
836 struct cpu_map_data *data = &event->cpu_map.data;
837 struct cpu_map_entries *cpus;
838 struct cpu_map_mask *mask;
839 unsigned i;
840
841 data->type = bswap_64(data->type);
842
843 switch (data->type) {
844 case PERF_CPU_MAP__CPUS:
845 cpus = (struct cpu_map_entries *)data->data;
846
847 cpus->nr = bswap_16(cpus->nr);
848
849 for (i = 0; i < cpus->nr; i++)
850 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
851 break;
852 case PERF_CPU_MAP__MASK:
853 mask = (struct cpu_map_mask *) data->data;
854
855 mask->nr = bswap_16(mask->nr);
856 mask->long_size = bswap_16(mask->long_size);
857
858 switch (mask->long_size) {
859 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
860 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
861 default:
862 pr_err("cpu_map swap: unsupported long size\n");
863 }
864 default:
865 break;
866 }
867}
868
Jiri Olsa374fb9e2015-10-25 15:51:27 +0100869static void perf_event__stat_config_swap(union perf_event *event,
870 bool sample_id_all __maybe_unused)
871{
872 u64 size;
873
874 size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
875 size += 1; /* nr item itself */
876 mem_bswap_64(&event->stat_config.nr, size);
877}
878
Jiri Olsad80518c2015-10-25 15:51:30 +0100879static void perf_event__stat_swap(union perf_event *event,
880 bool sample_id_all __maybe_unused)
881{
882 event->stat.id = bswap_64(event->stat.id);
883 event->stat.thread = bswap_32(event->stat.thread);
884 event->stat.cpu = bswap_32(event->stat.cpu);
885 event->stat.val = bswap_64(event->stat.val);
886 event->stat.ena = bswap_64(event->stat.ena);
887 event->stat.run = bswap_64(event->stat.run);
888}
889
Jiri Olsa2d8f0f12015-10-25 15:51:33 +0100890static void perf_event__stat_round_swap(union perf_event *event,
891 bool sample_id_all __maybe_unused)
892{
893 event->stat_round.type = bswap_64(event->stat_round.type);
894 event->stat_round.time = bswap_64(event->stat_round.time);
895}
896
Jiri Olsa268fb202012-05-30 14:23:43 +0200897typedef void (*perf_event__swap_op)(union perf_event *event,
898 bool sample_id_all);
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200899
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200900static perf_event__swap_op perf_event__swap_ops[] = {
901 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
Stephane Eranian5c5e8542013-08-21 12:10:25 +0200902 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200903 [PERF_RECORD_COMM] = perf_event__comm_swap,
904 [PERF_RECORD_FORK] = perf_event__task_swap,
905 [PERF_RECORD_EXIT] = perf_event__task_swap,
906 [PERF_RECORD_LOST] = perf_event__all64_swap,
907 [PERF_RECORD_READ] = perf_event__read_swap,
Jiri Olsadd96c462013-09-01 12:36:15 +0200908 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
909 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200910 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
Adrian Hunter4a96f7a2015-04-30 17:37:29 +0300911 [PERF_RECORD_AUX] = perf_event__aux_swap,
Adrian Hunter0ad21f62015-04-30 17:37:30 +0300912 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
Kan Liangc4937a92015-05-10 15:13:15 -0400913 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
Adrian Hunter02860392015-07-21 12:44:03 +0300914 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
915 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
Namhyung Kimacd244b2019-05-22 14:32:49 +0900916 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
David Aherneda39132011-07-15 12:34:09 -0600917 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200918 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
919 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
920 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200921 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
Adrian Huntera16ac022015-04-09 18:53:43 +0300922 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
923 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
Adrian Huntere9bf54d2015-04-09 18:53:47 +0300924 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
Jiri Olsa5f3339d2015-10-25 15:51:19 +0100925 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
Jiri Olsa6640b6c2015-10-25 15:51:23 +0100926 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
Jiri Olsa374fb9e2015-10-25 15:51:27 +0100927 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
Jiri Olsad80518c2015-10-25 15:51:30 +0100928 [PERF_RECORD_STAT] = perf_event__stat_swap,
Jiri Olsa2d8f0f12015-10-25 15:51:33 +0100929 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
Jiri Olsaffe777252015-10-25 15:51:36 +0100930 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200931 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200932 [PERF_RECORD_HEADER_MAX] = NULL,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200933};
934
Frederic Weisbeckerd6b17be2010-05-03 15:14:33 +0200935/*
936 * When perf record finishes a pass on every buffers, it records this pseudo
937 * event.
938 * We record the max timestamp t found in the pass n.
939 * Assuming these timestamps are monotonic across cpus, we know that if
940 * a buffer still has events with timestamps below t, they will be all
941 * available and then read in the pass n + 1.
942 * Hence when we start to read the pass n + 2, we can safely flush every
943 * events with timestamps below t.
944 *
945 * ============ PASS n =================
946 * CPU 0 | CPU 1
947 * |
948 * cnt1 timestamps | cnt2 timestamps
949 * 1 | 2
950 * 2 | 3
951 * - | 4 <--- max recorded
952 *
953 * ============ PASS n + 1 ==============
954 * CPU 0 | CPU 1
955 * |
956 * cnt1 timestamps | cnt2 timestamps
957 * 3 | 5
958 * 4 | 6
959 * 5 | 7 <---- max recorded
960 *
961 * Flush every events below timestamp 4
962 *
963 * ============ PASS n + 2 ==============
964 * CPU 0 | CPU 1
965 * |
966 * cnt1 timestamps | cnt2 timestamps
967 * 6 | 8
968 * 7 | 9
969 * - | 10
970 *
971 * Flush every events below timestamp 7
972 * etc...
973 */
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300974static int process_finished_round(struct perf_tool *tool __maybe_unused,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300975 union perf_event *event __maybe_unused,
Arnaldo Carvalho de Melod704ebd2015-03-03 12:37:54 -0300976 struct ordered_events *oe)
Frederic Weisbeckerd6b17be2010-05-03 15:14:33 +0200977{
Adrian Hunter5531e162015-06-23 10:52:48 +0300978 if (dump_trace)
979 fprintf(stdout, "\n");
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300980 return ordered_events__flush(oe, OE_FLUSH__ROUND);
Frederic Weisbeckerd6b17be2010-05-03 15:14:33 +0200981}
982
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300983int perf_session__queue_event(struct perf_session *s, union perf_event *event,
Jiri Olsadc83e132017-08-03 13:24:33 +0200984 u64 timestamp, u64 file_offset)
Frederic Weisbeckerc61e52e2010-04-24 00:04:12 +0200985{
Jiri Olsadc83e132017-08-03 13:24:33 +0200986 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
Frederic Weisbeckerc61e52e2010-04-24 00:04:12 +0200987}
988
Kan Liang384b6052015-01-05 13:23:05 -0500989static void callchain__lbr_callstack_printf(struct perf_sample *sample)
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -0200990{
Kan Liang384b6052015-01-05 13:23:05 -0500991 struct ip_callchain *callchain = sample->callchain;
992 struct branch_stack *lbr_stack = sample->branch_stack;
993 u64 kernel_callchain_nr = callchain->nr;
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -0200994 unsigned int i;
995
Kan Liang384b6052015-01-05 13:23:05 -0500996 for (i = 0; i < kernel_callchain_nr; i++) {
997 if (callchain->ips[i] == PERF_CONTEXT_USER)
998 break;
999 }
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -02001000
Kan Liang384b6052015-01-05 13:23:05 -05001001 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1002 u64 total_nr;
1003 /*
1004 * LBR callstack can only get user call chain,
1005 * i is kernel call chain number,
1006 * 1 is PERF_CONTEXT_USER.
1007 *
1008 * The user call chain is stored in LBR registers.
1009 * LBR are pair registers. The caller is stored
1010 * in "from" register, while the callee is stored
1011 * in "to" register.
1012 * For example, there is a call stack
1013 * "A"->"B"->"C"->"D".
1014 * The LBR registers will recorde like
1015 * "C"->"D", "B"->"C", "A"->"B".
1016 * So only the first "to" register and all "from"
1017 * registers are needed to construct the whole stack.
1018 */
1019 total_nr = i + 1 + lbr_stack->nr + 1;
1020 kernel_callchain_nr = i + 1;
1021
1022 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1023
1024 for (i = 0; i < kernel_callchain_nr; i++)
1025 printf("..... %2d: %016" PRIx64 "\n",
1026 i, callchain->ips[i]);
1027
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -02001028 printf("..... %2d: %016" PRIx64 "\n",
Kan Liang384b6052015-01-05 13:23:05 -05001029 (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
1030 for (i = 0; i < lbr_stack->nr; i++)
1031 printf("..... %2d: %016" PRIx64 "\n",
1032 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
1033 }
1034}
1035
1036static void callchain__printf(struct perf_evsel *evsel,
1037 struct perf_sample *sample)
1038{
1039 unsigned int i;
1040 struct ip_callchain *callchain = sample->callchain;
1041
Arnaldo Carvalho de Meloacf2abb2016-04-18 10:35:03 -03001042 if (perf_evsel__has_branch_callstack(evsel))
Kan Liang384b6052015-01-05 13:23:05 -05001043 callchain__lbr_callstack_printf(sample);
1044
1045 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1046
1047 for (i = 0; i < callchain->nr; i++)
1048 printf("..... %2d: %016" PRIx64 "\n",
1049 i, callchain->ips[i]);
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -02001050}
1051
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001052static void branch_stack__printf(struct perf_sample *sample)
1053{
1054 uint64_t i;
1055
1056 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
1057
Andi Kleen0e332f02015-07-18 08:24:46 -07001058 for (i = 0; i < sample->branch_stack->nr; i++) {
1059 struct branch_entry *e = &sample->branch_stack->entries[i];
1060
1061 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1062 i, e->from, e->to,
Arnaldo Carvalho de Melo8074bf52017-02-17 12:27:26 -03001063 (unsigned short)e->flags.cycles,
Andi Kleen0e332f02015-07-18 08:24:46 -07001064 e->flags.mispred ? "M" : " ",
1065 e->flags.predicted ? "P" : " ",
1066 e->flags.abort ? "A" : " ",
1067 e->flags.in_tx ? "T" : " ",
1068 (unsigned)e->flags.reserved);
1069 }
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001070}
1071
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001072static void regs_dump__printf(u64 mask, u64 *regs)
1073{
1074 unsigned rid, i = 0;
1075
1076 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1077 u64 val = regs[i++];
1078
1079 printf(".... %-5s 0x%" PRIx64 "\n",
1080 perf_reg_name(rid), val);
1081 }
1082}
1083
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02001084static const char *regs_abi[] = {
1085 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1086 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1087 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1088};
1089
1090static inline const char *regs_dump_abi(struct regs_dump *d)
1091{
1092 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1093 return "unknown";
1094
1095 return regs_abi[d->abi];
1096}
1097
1098static void regs__printf(const char *type, struct regs_dump *regs)
1099{
1100 u64 mask = regs->mask;
1101
1102 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1103 type,
1104 mask,
1105 regs_dump_abi(regs));
1106
1107 regs_dump__printf(mask, regs->regs);
1108}
1109
Jiri Olsa352ea452014-01-07 13:47:25 +01001110static void regs_user__printf(struct perf_sample *sample)
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001111{
1112 struct regs_dump *user_regs = &sample->user_regs;
1113
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02001114 if (user_regs->regs)
1115 regs__printf("user", user_regs);
1116}
1117
1118static void regs_intr__printf(struct perf_sample *sample)
1119{
1120 struct regs_dump *intr_regs = &sample->intr_regs;
1121
1122 if (intr_regs->regs)
1123 regs__printf("intr", intr_regs);
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001124}
1125
1126static void stack_user__printf(struct stack_dump *dump)
1127{
1128 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1129 dump->size, dump->offset);
1130}
1131
Arnaldo Carvalho de Melo9fa87272015-02-14 15:08:51 -03001132static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02001133 union perf_event *event,
Arnaldo Carvalho de Melo8d50e5b2011-01-29 13:02:00 -02001134 struct perf_sample *sample)
Arnaldo Carvalho de Melo9c90a612010-12-02 10:25:28 -02001135{
Arnaldo Carvalho de Melo9fa87272015-02-14 15:08:51 -03001136 u64 sample_type = __perf_evlist__combined_sample_type(evlist);
Arnaldo Carvalho de Melo7f3be652012-08-01 19:15:52 -03001137
Arnaldo Carvalho de Melo9c90a612010-12-02 10:25:28 -02001138 if (event->header.type != PERF_RECORD_SAMPLE &&
Arnaldo Carvalho de Melo9fa87272015-02-14 15:08:51 -03001139 !perf_evlist__sample_id_all(evlist)) {
Arnaldo Carvalho de Melo9c90a612010-12-02 10:25:28 -02001140 fputs("-1 -1 ", stdout);
1141 return;
1142 }
1143
Arnaldo Carvalho de Melo7f3be652012-08-01 19:15:52 -03001144 if ((sample_type & PERF_SAMPLE_CPU))
Arnaldo Carvalho de Melo9c90a612010-12-02 10:25:28 -02001145 printf("%u ", sample->cpu);
1146
Arnaldo Carvalho de Melo7f3be652012-08-01 19:15:52 -03001147 if (sample_type & PERF_SAMPLE_TIME)
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -02001148 printf("%" PRIu64 " ", sample->time);
Arnaldo Carvalho de Melo9c90a612010-12-02 10:25:28 -02001149}
1150
Jiri Olsa9ede4732012-10-10 17:38:13 +02001151static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1152{
1153 printf("... sample_read:\n");
1154
1155 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1156 printf("...... time enabled %016" PRIx64 "\n",
1157 sample->read.time_enabled);
1158
1159 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1160 printf("...... time running %016" PRIx64 "\n",
1161 sample->read.time_running);
1162
1163 if (read_format & PERF_FORMAT_GROUP) {
1164 u64 i;
1165
1166 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1167
1168 for (i = 0; i < sample->read.group.nr; i++) {
1169 struct sample_read_value *value;
1170
1171 value = &sample->read.group.values[i];
1172 printf("..... id %016" PRIx64
1173 ", value %016" PRIx64 "\n",
1174 value->id, value->value);
1175 }
1176 } else
1177 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1178 sample->read.one.id, sample->read.one.value);
1179}
1180
Arnaldo Carvalho de Melo9fa87272015-02-14 15:08:51 -03001181static void dump_event(struct perf_evlist *evlist, union perf_event *event,
Arnaldo Carvalho de Melo8d50e5b2011-01-29 13:02:00 -02001182 u64 file_offset, struct perf_sample *sample)
Thomas Gleixner9aefcab02010-12-07 12:48:47 +00001183{
1184 if (!dump_trace)
1185 return;
1186
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -02001187 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1188 file_offset, event->header.size, event->header.type);
Thomas Gleixner9aefcab02010-12-07 12:48:47 +00001189
1190 trace_event(event);
Thomas Richter93115d32019-01-17 10:37:17 -03001191 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1192 evlist->trace_event_sample_raw(evlist, event, sample);
Thomas Gleixner9aefcab02010-12-07 12:48:47 +00001193
1194 if (sample)
Arnaldo Carvalho de Melo9fa87272015-02-14 15:08:51 -03001195 perf_evlist__print_tstamp(evlist, event, sample);
Thomas Gleixner9aefcab02010-12-07 12:48:47 +00001196
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -02001197 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02001198 event->header.size, perf_event__name(event->header.type));
Thomas Gleixner9aefcab02010-12-07 12:48:47 +00001199}
1200
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001201static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
Arnaldo Carvalho de Melo8d50e5b2011-01-29 13:02:00 -02001202 struct perf_sample *sample)
Thomas Gleixner9aefcab02010-12-07 12:48:47 +00001203{
Arnaldo Carvalho de Melo7f3be652012-08-01 19:15:52 -03001204 u64 sample_type;
1205
Arnaldo Carvalho de Meloddbc24b2010-12-09 12:20:20 -02001206 if (!dump_trace)
1207 return;
1208
Don Zickus0ea590a2014-02-25 22:43:46 -05001209 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -02001210 event->header.misc, sample->pid, sample->tid, sample->ip,
David Ahern7cec0922011-05-30 13:08:23 -06001211 sample->period, sample->addr);
Thomas Gleixner9aefcab02010-12-07 12:48:47 +00001212
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001213 sample_type = evsel->attr.sample_type;
Arnaldo Carvalho de Melo7f3be652012-08-01 19:15:52 -03001214
Arnaldo Carvalho de Melo27de9b22018-05-28 16:00:29 -03001215 if (evsel__has_callchain(evsel))
Kan Liang384b6052015-01-05 13:23:05 -05001216 callchain__printf(evsel, sample);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001217
Arnaldo Carvalho de Meloacf2abb2016-04-18 10:35:03 -03001218 if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001219 branch_stack__printf(sample);
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001220
1221 if (sample_type & PERF_SAMPLE_REGS_USER)
Jiri Olsa352ea452014-01-07 13:47:25 +01001222 regs_user__printf(sample);
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001223
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02001224 if (sample_type & PERF_SAMPLE_REGS_INTR)
1225 regs_intr__printf(sample);
1226
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001227 if (sample_type & PERF_SAMPLE_STACK_USER)
1228 stack_user__printf(&sample->user_stack);
Andi Kleen05484292013-01-24 16:10:29 +01001229
1230 if (sample_type & PERF_SAMPLE_WEIGHT)
1231 printf("... weight: %" PRIu64 "\n", sample->weight);
Stephane Eranian98a3b322013-01-24 16:10:35 +01001232
1233 if (sample_type & PERF_SAMPLE_DATA_SRC)
1234 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
Jiri Olsa9ede4732012-10-10 17:38:13 +02001235
Kan Liang8780fb22017-08-29 13:11:09 -04001236 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1237 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1238
Andi Kleen475eeab2013-09-20 07:40:43 -07001239 if (sample_type & PERF_SAMPLE_TRANSACTION)
1240 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1241
Jiri Olsa9ede4732012-10-10 17:38:13 +02001242 if (sample_type & PERF_SAMPLE_READ)
1243 sample_read__printf(sample, evsel->attr.read_format);
Thomas Gleixner9aefcab02010-12-07 12:48:47 +00001244}
1245
Jiri Olsadac7f6b2017-08-24 18:27:32 +02001246static void dump_read(struct perf_evsel *evsel, union perf_event *event)
1247{
1248 struct read_event *read_event = &event->read;
1249 u64 read_format;
1250
1251 if (!dump_trace)
1252 return;
1253
1254 printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
Arnaldo Carvalho de Melofc50e0b2019-07-03 16:12:51 -03001255 perf_evsel__name(evsel),
Jiri Olsadac7f6b2017-08-24 18:27:32 +02001256 event->read.value);
1257
Leo Yanf3c8d902019-07-02 18:34:17 +08001258 if (!evsel)
1259 return;
1260
Jiri Olsadac7f6b2017-08-24 18:27:32 +02001261 read_format = evsel->attr.read_format;
1262
1263 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1264 printf("... time enabled : %" PRIu64 "\n", read_event->time_enabled);
1265
1266 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1267 printf("... time running : %" PRIu64 "\n", read_event->time_running);
1268
1269 if (read_format & PERF_FORMAT_ID)
1270 printf("... id : %" PRIu64 "\n", read_event->id);
1271}
1272
Arnaldo Carvalho de Melo54245fd2015-02-14 14:26:15 -03001273static struct machine *machines__find_for_cpumode(struct machines *machines,
Adrian Hunteref893252013-08-27 11:23:06 +03001274 union perf_event *event,
1275 struct perf_sample *sample)
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02001276{
Dongsheng Yangad85ace2013-12-20 13:41:47 -05001277 struct machine *machine;
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02001278
David Ahern7c0f4a42012-07-20 17:25:48 -06001279 if (perf_guest &&
Arnaldo Carvalho de Melo473398a2016-03-22 18:23:43 -03001280 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1281 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
Nikunj A. Dadhania7fb0a5e2012-04-09 13:52:23 +05301282 u32 pid;
1283
Stephane Eranian5c5e8542013-08-21 12:10:25 +02001284 if (event->header.type == PERF_RECORD_MMAP
1285 || event->header.type == PERF_RECORD_MMAP2)
Nikunj A. Dadhania7fb0a5e2012-04-09 13:52:23 +05301286 pid = event->mmap.pid;
1287 else
Adrian Hunteref893252013-08-27 11:23:06 +03001288 pid = sample->pid;
Nikunj A. Dadhania7fb0a5e2012-04-09 13:52:23 +05301289
Arnaldo Carvalho de Melo54245fd2015-02-14 14:26:15 -03001290 machine = machines__find(machines, pid);
Dongsheng Yangad85ace2013-12-20 13:41:47 -05001291 if (!machine)
Ravi Bangoria3caeaa52015-12-07 12:25:02 +05301292 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
Dongsheng Yangad85ace2013-12-20 13:41:47 -05001293 return machine;
Nikunj A. Dadhania7fb0a5e2012-04-09 13:52:23 +05301294 }
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02001295
Arnaldo Carvalho de Melo54245fd2015-02-14 14:26:15 -03001296 return &machines->host;
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02001297}
1298
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001299static int deliver_sample_value(struct perf_evlist *evlist,
Jiri Olsae4caec02012-10-10 18:52:24 +02001300 struct perf_tool *tool,
1301 union perf_event *event,
1302 struct perf_sample *sample,
1303 struct sample_read_value *v,
1304 struct machine *machine)
1305{
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001306 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
Jiri Olsae4caec02012-10-10 18:52:24 +02001307
Jiri Olsae4caec02012-10-10 18:52:24 +02001308 if (sid) {
1309 sample->id = v->id;
1310 sample->period = v->value - sid->period;
1311 sid->period = v->value;
1312 }
1313
1314 if (!sid || sid->evsel == NULL) {
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001315 ++evlist->stats.nr_unknown_id;
Jiri Olsae4caec02012-10-10 18:52:24 +02001316 return 0;
1317 }
1318
Jiri Olsa529c1a92019-02-20 13:27:55 +01001319 /*
1320 * There's no reason to deliver sample
1321 * for zero period, bail out.
1322 */
1323 if (!sample->period)
1324 return 0;
1325
Jiri Olsae4caec02012-10-10 18:52:24 +02001326 return tool->sample(tool, event, sample, sid->evsel, machine);
1327}
1328
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001329static int deliver_sample_group(struct perf_evlist *evlist,
Jiri Olsae4caec02012-10-10 18:52:24 +02001330 struct perf_tool *tool,
1331 union perf_event *event,
1332 struct perf_sample *sample,
1333 struct machine *machine)
1334{
1335 int ret = -EINVAL;
1336 u64 i;
1337
1338 for (i = 0; i < sample->read.group.nr; i++) {
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001339 ret = deliver_sample_value(evlist, tool, event, sample,
Jiri Olsae4caec02012-10-10 18:52:24 +02001340 &sample->read.group.values[i],
1341 machine);
1342 if (ret)
1343 break;
1344 }
1345
1346 return ret;
1347}
1348
1349static int
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001350 perf_evlist__deliver_sample(struct perf_evlist *evlist,
Jiri Olsae4caec02012-10-10 18:52:24 +02001351 struct perf_tool *tool,
1352 union perf_event *event,
1353 struct perf_sample *sample,
1354 struct perf_evsel *evsel,
1355 struct machine *machine)
1356{
1357 /* We know evsel != NULL. */
1358 u64 sample_type = evsel->attr.sample_type;
1359 u64 read_format = evsel->attr.read_format;
1360
Soramichi AKIYAMAd94386f2017-01-17 22:22:33 +09001361 /* Standard sample delivery. */
Jiri Olsae4caec02012-10-10 18:52:24 +02001362 if (!(sample_type & PERF_SAMPLE_READ))
1363 return tool->sample(tool, event, sample, evsel, machine);
1364
1365 /* For PERF_SAMPLE_READ we have either single or group mode. */
1366 if (read_format & PERF_FORMAT_GROUP)
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001367 return deliver_sample_group(evlist, tool, event, sample,
Jiri Olsae4caec02012-10-10 18:52:24 +02001368 machine);
1369 else
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001370 return deliver_sample_value(evlist, tool, event, sample,
Jiri Olsae4caec02012-10-10 18:52:24 +02001371 &sample->read.one, machine);
1372}
1373
Arnaldo Carvalho de Melod10eb1e2015-03-03 12:20:38 -03001374static int machines__deliver_event(struct machines *machines,
1375 struct perf_evlist *evlist,
1376 union perf_event *event,
1377 struct perf_sample *sample,
1378 struct perf_tool *tool, u64 file_offset)
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001379{
Arnaldo Carvalho de Melo9e69c212011-03-15 15:44:01 -03001380 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02001381 struct machine *machine;
Arnaldo Carvalho de Melo9e69c212011-03-15 15:44:01 -03001382
Arnaldo Carvalho de Melo9fa87272015-02-14 15:08:51 -03001383 dump_event(evlist, event, file_offset, sample);
Thomas Gleixner532e7262010-12-07 12:48:55 +00001384
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001385 evsel = perf_evlist__id2evsel(evlist, sample->id);
Arnaldo Carvalho de Melo7b275092011-10-29 12:15:04 -02001386
Arnaldo Carvalho de Melofa713a4e2015-03-03 11:48:12 -03001387 machine = machines__find_for_cpumode(machines, event, sample);
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02001388
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001389 switch (event->header.type) {
1390 case PERF_RECORD_SAMPLE:
Arnaldo Carvalho de Melo9e69c212011-03-15 15:44:01 -03001391 if (evsel == NULL) {
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001392 ++evlist->stats.nr_unknown_id;
Jiri Olsa67822062012-04-12 14:21:01 +02001393 return 0;
Arnaldo Carvalho de Melo9e69c212011-03-15 15:44:01 -03001394 }
Kan Liang1b29ac52015-09-03 08:31:00 -04001395 dump_sample(evsel, event, sample);
Joerg Roedel0c095712012-02-10 18:05:04 +01001396 if (machine == NULL) {
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001397 ++evlist->stats.nr_unprocessable_samples;
Jiri Olsa67822062012-04-12 14:21:01 +02001398 return 0;
Joerg Roedel0c095712012-02-10 18:05:04 +01001399 }
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001400 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001401 case PERF_RECORD_MMAP:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001402 return tool->mmap(tool, event, sample, machine);
Stephane Eranian5c5e8542013-08-21 12:10:25 +02001403 case PERF_RECORD_MMAP2:
Kan Liang930e6fc2015-06-17 09:51:10 -04001404 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1405 ++evlist->stats.nr_proc_map_timeout;
Stephane Eranian5c5e8542013-08-21 12:10:25 +02001406 return tool->mmap2(tool, event, sample, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001407 case PERF_RECORD_COMM:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001408 return tool->comm(tool, event, sample, machine);
Hari Bathinif3b36142017-03-08 02:11:43 +05301409 case PERF_RECORD_NAMESPACES:
1410 return tool->namespaces(tool, event, sample, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001411 case PERF_RECORD_FORK:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001412 return tool->fork(tool, event, sample, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001413 case PERF_RECORD_EXIT:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001414 return tool->exit(tool, event, sample, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001415 case PERF_RECORD_LOST:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001416 if (tool->lost == perf_event__process_lost)
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001417 evlist->stats.total_lost += event->lost.lost;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001418 return tool->lost(tool, event, sample, machine);
Kan Liangc4937a92015-05-10 15:13:15 -04001419 case PERF_RECORD_LOST_SAMPLES:
1420 if (tool->lost_samples == perf_event__process_lost_samples)
1421 evlist->stats.total_lost_samples += event->lost_samples.lost;
1422 return tool->lost_samples(tool, event, sample, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001423 case PERF_RECORD_READ:
Jiri Olsadac7f6b2017-08-24 18:27:32 +02001424 dump_read(evsel, event);
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001425 return tool->read(tool, event, sample, evsel, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001426 case PERF_RECORD_THROTTLE:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001427 return tool->throttle(tool, event, sample, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001428 case PERF_RECORD_UNTHROTTLE:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001429 return tool->unthrottle(tool, event, sample, machine);
Adrian Hunter4a96f7a2015-04-30 17:37:29 +03001430 case PERF_RECORD_AUX:
Alexander Shishkin05a1f472017-03-16 18:41:59 +02001431 if (tool->aux == perf_event__process_aux) {
1432 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1433 evlist->stats.total_aux_lost += 1;
1434 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1435 evlist->stats.total_aux_partial += 1;
1436 }
Adrian Hunter4a96f7a2015-04-30 17:37:29 +03001437 return tool->aux(tool, event, sample, machine);
Adrian Hunter0ad21f62015-04-30 17:37:30 +03001438 case PERF_RECORD_ITRACE_START:
1439 return tool->itrace_start(tool, event, sample, machine);
Adrian Hunter02860392015-07-21 12:44:03 +03001440 case PERF_RECORD_SWITCH:
1441 case PERF_RECORD_SWITCH_CPU_WIDE:
1442 return tool->context_switch(tool, event, sample, machine);
Song Liu9aa0bfa2019-01-17 08:15:17 -08001443 case PERF_RECORD_KSYMBOL:
1444 return tool->ksymbol(tool, event, sample, machine);
Song Liu45178a92019-01-17 08:15:18 -08001445 case PERF_RECORD_BPF_EVENT:
1446 return tool->bpf_event(tool, event, sample, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001447 default:
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001448 ++evlist->stats.nr_unknown_events;
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001449 return -1;
1450 }
1451}
1452
Adrian Hunterc4468702015-04-09 18:53:48 +03001453static int perf_session__deliver_event(struct perf_session *session,
1454 union perf_event *event,
Adrian Hunterc4468702015-04-09 18:53:48 +03001455 struct perf_tool *tool,
1456 u64 file_offset)
1457{
Jiri Olsa93d10af2017-08-03 13:21:14 +02001458 struct perf_sample sample;
Adrian Hunterc4468702015-04-09 18:53:48 +03001459 int ret;
1460
Jiri Olsa93d10af2017-08-03 13:21:14 +02001461 ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1462 if (ret) {
1463 pr_err("Can't parse sample, err = %d\n", ret);
1464 return ret;
1465 }
1466
1467 ret = auxtrace__process_event(session, event, &sample, tool);
Adrian Hunterc4468702015-04-09 18:53:48 +03001468 if (ret < 0)
1469 return ret;
1470 if (ret > 0)
1471 return 0;
1472
1473 return machines__deliver_event(&session->machines, session->evlist,
Jiri Olsa93d10af2017-08-03 13:21:14 +02001474 event, &sample, tool, file_offset);
Adrian Hunterc4468702015-04-09 18:53:48 +03001475}
1476
Adrian Hunterd5652d82014-07-23 22:19:58 +03001477static s64 perf_session__process_user_event(struct perf_session *session,
1478 union perf_event *event,
Adrian Hunterd5652d82014-07-23 22:19:58 +03001479 u64 file_offset)
Thomas Gleixnerba74f062010-12-07 12:49:01 +00001480{
Arnaldo Carvalho de Melod704ebd2015-03-03 12:37:54 -03001481 struct ordered_events *oe = &session->ordered_events;
Arnaldo Carvalho de Melo9870d782015-03-31 12:48:16 -03001482 struct perf_tool *tool = session->tool;
Arnaldo Carvalho de Melof250b092017-11-23 15:35:04 -03001483 struct perf_sample sample = { .time = 0, };
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001484 int fd = perf_data__fd(session->data);
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02001485 int err;
1486
Alexey Budankov61a77732019-03-18 20:45:11 +03001487 if (event->header.type != PERF_RECORD_COMPRESSED ||
1488 tool->compressed == perf_session__process_compressed_event_stub)
1489 dump_event(session->evlist, event, file_offset, &sample);
Thomas Gleixnerba74f062010-12-07 12:49:01 +00001490
1491 /* These events are processed right away */
1492 switch (event->header.type) {
1493 case PERF_RECORD_HEADER_ATTR:
Adrian Hunter47c3d102013-07-04 16:20:21 +03001494 err = tool->attr(tool, event, &session->evlist);
Adrian Huntercfe1c412014-07-31 09:00:45 +03001495 if (err == 0) {
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -03001496 perf_session__set_id_hdr_size(session);
Adrian Huntercfe1c412014-07-31 09:00:45 +03001497 perf_session__set_comm_exec(session);
1498 }
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02001499 return err;
Jiri Olsaffe777252015-10-25 15:51:36 +01001500 case PERF_RECORD_EVENT_UPDATE:
1501 return tool->event_update(tool, event, &session->evlist);
Jiri Olsaf67697b2014-02-04 15:37:48 +01001502 case PERF_RECORD_HEADER_EVENT_TYPE:
1503 /*
1504 * Depreceated, but we need to handle it for sake
1505 * of old data files create in pipe mode.
1506 */
1507 return 0;
Thomas Gleixnerba74f062010-12-07 12:49:01 +00001508 case PERF_RECORD_HEADER_TRACING_DATA:
1509 /* setup for reading amidst mmap */
Jiri Olsacc9784bd2013-10-15 16:27:34 +02001510 lseek(fd, file_offset, SEEK_SET);
Jiri Olsa89f16882018-09-13 14:54:03 +02001511 return tool->tracing_data(session, event);
Thomas Gleixnerba74f062010-12-07 12:49:01 +00001512 case PERF_RECORD_HEADER_BUILD_ID:
Jiri Olsa89f16882018-09-13 14:54:03 +02001513 return tool->build_id(session, event);
Thomas Gleixnerba74f062010-12-07 12:49:01 +00001514 case PERF_RECORD_FINISHED_ROUND:
Arnaldo Carvalho de Melod704ebd2015-03-03 12:37:54 -03001515 return tool->finished_round(tool, event, oe);
Adrian Hunter3c659ee2014-10-27 15:49:22 +02001516 case PERF_RECORD_ID_INDEX:
Jiri Olsa89f16882018-09-13 14:54:03 +02001517 return tool->id_index(session, event);
Adrian Huntera16ac022015-04-09 18:53:43 +03001518 case PERF_RECORD_AUXTRACE_INFO:
Jiri Olsa89f16882018-09-13 14:54:03 +02001519 return tool->auxtrace_info(session, event);
Adrian Huntera16ac022015-04-09 18:53:43 +03001520 case PERF_RECORD_AUXTRACE:
1521 /* setup for reading amidst mmap */
1522 lseek(fd, file_offset + event->header.size, SEEK_SET);
Jiri Olsa73365552018-09-13 14:54:04 +02001523 return tool->auxtrace(session, event);
Adrian Huntere9bf54d2015-04-09 18:53:47 +03001524 case PERF_RECORD_AUXTRACE_ERROR:
Adrian Hunter85ed4722015-04-09 18:53:50 +03001525 perf_session__auxtrace_error_inc(session, event);
Jiri Olsa89f16882018-09-13 14:54:03 +02001526 return tool->auxtrace_error(session, event);
Jiri Olsa5f3339d2015-10-25 15:51:19 +01001527 case PERF_RECORD_THREAD_MAP:
Jiri Olsa89f16882018-09-13 14:54:03 +02001528 return tool->thread_map(session, event);
Jiri Olsa6640b6c2015-10-25 15:51:23 +01001529 case PERF_RECORD_CPU_MAP:
Jiri Olsa89f16882018-09-13 14:54:03 +02001530 return tool->cpu_map(session, event);
Jiri Olsa374fb9e2015-10-25 15:51:27 +01001531 case PERF_RECORD_STAT_CONFIG:
Jiri Olsa89f16882018-09-13 14:54:03 +02001532 return tool->stat_config(session, event);
Jiri Olsad80518c2015-10-25 15:51:30 +01001533 case PERF_RECORD_STAT:
Jiri Olsa89f16882018-09-13 14:54:03 +02001534 return tool->stat(session, event);
Jiri Olsa2d8f0f12015-10-25 15:51:33 +01001535 case PERF_RECORD_STAT_ROUND:
Jiri Olsa89f16882018-09-13 14:54:03 +02001536 return tool->stat_round(session, event);
Adrian Hunter46bc29b2016-03-08 10:38:44 +02001537 case PERF_RECORD_TIME_CONV:
1538 session->time_conv = event->time_conv;
Jiri Olsa89f16882018-09-13 14:54:03 +02001539 return tool->time_conv(session, event);
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07001540 case PERF_RECORD_HEADER_FEATURE:
Jiri Olsa89f16882018-09-13 14:54:03 +02001541 return tool->feature(session, event);
Alexey Budankov61a77732019-03-18 20:45:11 +03001542 case PERF_RECORD_COMPRESSED:
1543 err = tool->compressed(session, event, file_offset);
1544 if (err)
1545 dump_event(session->evlist, event, file_offset, &sample);
1546 return err;
Thomas Gleixnerba74f062010-12-07 12:49:01 +00001547 default:
1548 return -EINVAL;
1549 }
1550}
1551
Adrian Huntera2938292014-10-27 15:49:23 +02001552int perf_session__deliver_synth_event(struct perf_session *session,
1553 union perf_event *event,
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001554 struct perf_sample *sample)
Adrian Huntera2938292014-10-27 15:49:23 +02001555{
Arnaldo Carvalho de Melofa713a4e2015-03-03 11:48:12 -03001556 struct perf_evlist *evlist = session->evlist;
Arnaldo Carvalho de Melo9870d782015-03-31 12:48:16 -03001557 struct perf_tool *tool = session->tool;
Arnaldo Carvalho de Melofa713a4e2015-03-03 11:48:12 -03001558
1559 events_stats__inc(&evlist->stats, event->header.type);
Adrian Huntera2938292014-10-27 15:49:23 +02001560
1561 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001562 return perf_session__process_user_event(session, event, 0);
Adrian Huntera2938292014-10-27 15:49:23 +02001563
Arnaldo Carvalho de Melofa713a4e2015-03-03 11:48:12 -03001564 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
Adrian Huntera2938292014-10-27 15:49:23 +02001565}
1566
Jiri Olsa268fb202012-05-30 14:23:43 +02001567static void event_swap(union perf_event *event, bool sample_id_all)
1568{
1569 perf_event__swap_op swap;
1570
1571 swap = perf_event__swap_ops[event->header.type];
1572 if (swap)
1573 swap(event, sample_id_all);
1574}
1575
Adrian Hunter5a52f332014-07-31 09:00:57 +03001576int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1577 void *buf, size_t buf_sz,
1578 union perf_event **event_ptr,
1579 struct perf_sample *sample)
1580{
1581 union perf_event *event;
1582 size_t hdr_sz, rest;
1583 int fd;
1584
1585 if (session->one_mmap && !session->header.needs_swap) {
1586 event = file_offset - session->one_mmap_offset +
1587 session->one_mmap_addr;
1588 goto out_parse_sample;
1589 }
1590
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001591 if (perf_data__is_pipe(session->data))
Adrian Hunter5a52f332014-07-31 09:00:57 +03001592 return -1;
1593
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001594 fd = perf_data__fd(session->data);
Adrian Hunter5a52f332014-07-31 09:00:57 +03001595 hdr_sz = sizeof(struct perf_event_header);
1596
1597 if (buf_sz < hdr_sz)
1598 return -1;
1599
1600 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
Adrian Hunter554e92e2015-05-19 16:05:45 +03001601 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
Adrian Hunter5a52f332014-07-31 09:00:57 +03001602 return -1;
1603
1604 event = (union perf_event *)buf;
1605
1606 if (session->header.needs_swap)
1607 perf_event_header__bswap(&event->header);
1608
Adrian Hunter554e92e2015-05-19 16:05:45 +03001609 if (event->header.size < hdr_sz || event->header.size > buf_sz)
Adrian Hunter5a52f332014-07-31 09:00:57 +03001610 return -1;
1611
1612 rest = event->header.size - hdr_sz;
1613
Adrian Hunter554e92e2015-05-19 16:05:45 +03001614 if (readn(fd, buf, rest) != (ssize_t)rest)
Adrian Hunter5a52f332014-07-31 09:00:57 +03001615 return -1;
1616
1617 if (session->header.needs_swap)
1618 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1619
1620out_parse_sample:
1621
1622 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1623 perf_evlist__parse_sample(session->evlist, event, sample))
1624 return -1;
1625
1626 *event_ptr = event;
1627
1628 return 0;
1629}
1630
Adrian Hunterd5652d82014-07-23 22:19:58 +03001631static s64 perf_session__process_event(struct perf_session *session,
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001632 union perf_event *event, u64 file_offset)
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001633{
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001634 struct perf_evlist *evlist = session->evlist;
Arnaldo Carvalho de Melo9870d782015-03-31 12:48:16 -03001635 struct perf_tool *tool = session->tool;
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001636 int ret;
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -02001637
Jiri Olsa268fb202012-05-30 14:23:43 +02001638 if (session->header.needs_swap)
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001639 event_swap(event, perf_evlist__sample_id_all(evlist));
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -02001640
Thomas Gleixner9aefcab02010-12-07 12:48:47 +00001641 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1642 return -EINVAL;
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -02001643
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001644 events_stats__inc(&evlist->stats, event->header.type);
Thomas Gleixner9aefcab02010-12-07 12:48:47 +00001645
1646 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001647 return perf_session__process_user_event(session, event, file_offset);
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001648
Jiri Olsa0a8cb852014-07-06 14:18:21 +02001649 if (tool->ordered_events) {
Mathieu Poirier631e8f02018-01-10 13:31:52 -07001650 u64 timestamp = -1ULL;
Jiri Olsa93d10af2017-08-03 13:21:14 +02001651
1652 ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
Mathieu Poirier631e8f02018-01-10 13:31:52 -07001653 if (ret && ret != -1)
Jiri Olsa93d10af2017-08-03 13:21:14 +02001654 return ret;
1655
1656 ret = perf_session__queue_event(session, event, timestamp, file_offset);
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001657 if (ret != -ETIME)
1658 return ret;
1659 }
1660
Jiri Olsa93d10af2017-08-03 13:21:14 +02001661 return perf_session__deliver_event(session, event, tool, file_offset);
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001662}
1663
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03001664void perf_event_header__bswap(struct perf_event_header *hdr)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02001665{
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03001666 hdr->type = bswap_32(hdr->type);
1667 hdr->misc = bswap_16(hdr->misc);
1668 hdr->size = bswap_16(hdr->size);
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02001669}
1670
Arnaldo Carvalho de Melob424eba2011-11-09 13:24:25 -02001671struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1672{
Adrian Hunter1fcb8762014-07-14 13:02:25 +03001673 return machine__findnew_thread(&session->machines.host, -1, pid);
Arnaldo Carvalho de Melob424eba2011-11-09 13:24:25 -02001674}
1675
Adrian Hunterb25756d2018-12-21 14:06:20 +02001676/*
1677 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1678 * So here a single thread is created for that, but actually there is a separate
1679 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1680 * is only 1. That causes problems for some tools, requiring workarounds. For
1681 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1682 */
Masami Hiramatsu9d8b1722015-12-09 11:11:23 +09001683int perf_session__register_idle_thread(struct perf_session *session)
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001684{
Adrian Hunter1fcb8762014-07-14 13:02:25 +03001685 struct thread *thread;
Masami Hiramatsu9d8b1722015-12-09 11:11:23 +09001686 int err = 0;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001687
Adrian Hunter1fcb8762014-07-14 13:02:25 +03001688 thread = machine__findnew_thread(&session->machines.host, 0, 0);
Frederic Weisbecker162f0be2013-09-11 16:18:24 +02001689 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001690 pr_err("problem inserting idle task.\n");
Masami Hiramatsu9d8b1722015-12-09 11:11:23 +09001691 err = -1;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001692 }
1693
Hari Bathinif3b36142017-03-08 02:11:43 +05301694 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1695 pr_err("problem inserting idle task.\n");
1696 err = -1;
1697 }
1698
Masami Hiramatsu9d8b1722015-12-09 11:11:23 +09001699 /* machine__findnew_thread() got the thread, so put it */
1700 thread__put(thread);
1701 return err;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001702}
1703
Wang Nanf06149c2016-07-14 08:34:46 +00001704static void
1705perf_session__warn_order(const struct perf_session *session)
1706{
1707 const struct ordered_events *oe = &session->ordered_events;
1708 struct perf_evsel *evsel;
1709 bool should_warn = true;
1710
1711 evlist__for_each_entry(session->evlist, evsel) {
1712 if (evsel->attr.write_backward)
1713 should_warn = false;
1714 }
1715
1716 if (!should_warn)
1717 return;
1718 if (oe->nr_unordered_events != 0)
1719 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1720}
1721
Arnaldo Carvalho de Melo9870d782015-03-31 12:48:16 -03001722static void perf_session__warn_about_errors(const struct perf_session *session)
Arnaldo Carvalho de Melo11095992011-01-04 16:25:15 -02001723{
Arnaldo Carvalho de Melo9870d782015-03-31 12:48:16 -03001724 const struct events_stats *stats = &session->evlist->stats;
Arnaldo Carvalho de Melo9870d782015-03-31 12:48:16 -03001725
1726 if (session->tool->lost == perf_event__process_lost &&
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001727 stats->nr_events[PERF_RECORD_LOST] != 0) {
Arnaldo Carvalho de Melo7b275092011-10-29 12:15:04 -02001728 ui__warning("Processed %d events and lost %d chunks!\n\n"
1729 "Check IO/CPU overload!\n\n",
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001730 stats->nr_events[0],
1731 stats->nr_events[PERF_RECORD_LOST]);
Arnaldo Carvalho de Melo11095992011-01-04 16:25:15 -02001732 }
1733
Kan Liangc4937a92015-05-10 15:13:15 -04001734 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1735 double drop_rate;
1736
1737 drop_rate = (double)stats->total_lost_samples /
1738 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1739 if (drop_rate > 0.05) {
Arnaldo Carvalho de Melo41a43da2018-04-05 14:34:09 -03001740 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
Kan Liangc4937a92015-05-10 15:13:15 -04001741 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1742 drop_rate * 100.0);
1743 }
1744 }
1745
Adrian Huntera38f48e2015-09-25 16:15:37 +03001746 if (session->tool->aux == perf_event__process_aux &&
1747 stats->total_aux_lost != 0) {
1748 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1749 stats->total_aux_lost,
1750 stats->nr_events[PERF_RECORD_AUX]);
1751 }
1752
Alexander Shishkin05a1f472017-03-16 18:41:59 +02001753 if (session->tool->aux == perf_event__process_aux &&
1754 stats->total_aux_partial != 0) {
1755 bool vmm_exclusive = false;
1756
1757 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1758 &vmm_exclusive);
1759
1760 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1761 "Are you running a KVM guest in the background?%s\n\n",
1762 stats->total_aux_partial,
1763 stats->nr_events[PERF_RECORD_AUX],
1764 vmm_exclusive ?
1765 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1766 "will reduce the gaps to only guest's timeslices." :
1767 "");
1768 }
1769
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001770 if (stats->nr_unknown_events != 0) {
Arnaldo Carvalho de Melo11095992011-01-04 16:25:15 -02001771 ui__warning("Found %u unknown events!\n\n"
1772 "Is this an older tool processing a perf.data "
1773 "file generated by a more recent tool?\n\n"
1774 "If that is not the case, consider "
1775 "reporting to linux-kernel@vger.kernel.org.\n\n",
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001776 stats->nr_unknown_events);
Arnaldo Carvalho de Melo11095992011-01-04 16:25:15 -02001777 }
1778
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001779 if (stats->nr_unknown_id != 0) {
Arnaldo Carvalho de Melo9e69c212011-03-15 15:44:01 -03001780 ui__warning("%u samples with id not present in the header\n",
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001781 stats->nr_unknown_id);
Arnaldo Carvalho de Melo9e69c212011-03-15 15:44:01 -03001782 }
1783
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001784 if (stats->nr_invalid_chains != 0) {
Arnaldo Carvalho de Melo75be9892015-02-14 14:50:11 -03001785 ui__warning("Found invalid callchains!\n\n"
1786 "%u out of %u events were discarded for this reason.\n\n"
1787 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001788 stats->nr_invalid_chains,
1789 stats->nr_events[PERF_RECORD_SAMPLE]);
Arnaldo Carvalho de Melo75be9892015-02-14 14:50:11 -03001790 }
Joerg Roedel0c095712012-02-10 18:05:04 +01001791
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001792 if (stats->nr_unprocessable_samples != 0) {
Joerg Roedel0c095712012-02-10 18:05:04 +01001793 ui__warning("%u unprocessable samples recorded.\n"
1794 "Do you have a KVM guest running and not using 'perf kvm'?\n",
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001795 stats->nr_unprocessable_samples);
Joerg Roedel0c095712012-02-10 18:05:04 +01001796 }
Jiri Olsaf61ff6c2014-11-26 16:39:31 +01001797
Wang Nanf06149c2016-07-14 08:34:46 +00001798 perf_session__warn_order(session);
Adrian Hunter85ed4722015-04-09 18:53:50 +03001799
1800 events_stats__auxtrace_error_warn(stats);
Kan Liang930e6fc2015-06-17 09:51:10 -04001801
1802 if (stats->nr_proc_map_timeout != 0) {
1803 ui__warning("%d map information files for pre-existing threads were\n"
1804 "not processed, if there are samples for addresses they\n"
1805 "will not be resolved, you may find out which are these\n"
1806 "threads by running with -v and redirecting the output\n"
Kan Liang9d9cad72015-06-17 09:51:11 -04001807 "to a file.\n"
1808 "The time limit to process proc map is too short?\n"
1809 "Increase it by --proc-map-timeout\n",
Kan Liang930e6fc2015-06-17 09:51:10 -04001810 stats->nr_proc_map_timeout);
1811 }
Arnaldo Carvalho de Melo11095992011-01-04 16:25:15 -02001812}
1813
Adrian Huntera5499b32015-05-29 16:33:30 +03001814static int perf_session__flush_thread_stack(struct thread *thread,
1815 void *p __maybe_unused)
1816{
1817 return thread_stack__flush(thread);
1818}
1819
1820static int perf_session__flush_thread_stacks(struct perf_session *session)
1821{
1822 return machines__for_each_thread(&session->machines,
1823 perf_session__flush_thread_stack,
1824 NULL);
1825}
1826
Tom Zanussi8dc58102010-04-01 23:59:15 -05001827volatile int session_done;
1828
Alexey Budankovcb62c6f2019-03-18 20:45:11 +03001829static int __perf_session__process_decomp_events(struct perf_session *session);
1830
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001831static int __perf_session__process_pipe_events(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05001832{
Arnaldo Carvalho de Melofa713a4e2015-03-03 11:48:12 -03001833 struct ordered_events *oe = &session->ordered_events;
Arnaldo Carvalho de Melo9870d782015-03-31 12:48:16 -03001834 struct perf_tool *tool = session->tool;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001835 int fd = perf_data__fd(session->data);
Stephane Eranian444d2862012-05-15 13:28:12 +02001836 union perf_event *event;
1837 uint32_t size, cur_size = 0;
1838 void *buf = NULL;
Adrian Hunterd5652d82014-07-23 22:19:58 +03001839 s64 skip = 0;
Tom Zanussi8dc58102010-04-01 23:59:15 -05001840 u64 head;
Jiri Olsa727ebd52013-11-28 11:30:14 +01001841 ssize_t err;
Tom Zanussi8dc58102010-04-01 23:59:15 -05001842 void *p;
1843
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001844 perf_tool__fill_defaults(tool);
Tom Zanussi8dc58102010-04-01 23:59:15 -05001845
1846 head = 0;
Stephane Eranian444d2862012-05-15 13:28:12 +02001847 cur_size = sizeof(union perf_event);
1848
1849 buf = malloc(cur_size);
1850 if (!buf)
1851 return -errno;
David Carrillo-Cisneros1e0d4f02017-04-10 13:14:27 -07001852 ordered_events__set_copy_on_queue(oe, true);
Tom Zanussi8dc58102010-04-01 23:59:15 -05001853more:
Stephane Eranian444d2862012-05-15 13:28:12 +02001854 event = buf;
Jiri Olsacc9784bd2013-10-15 16:27:34 +02001855 err = readn(fd, event, sizeof(struct perf_event_header));
Tom Zanussi8dc58102010-04-01 23:59:15 -05001856 if (err <= 0) {
1857 if (err == 0)
1858 goto done;
1859
1860 pr_err("failed to read event header\n");
1861 goto out_err;
1862 }
1863
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03001864 if (session->header.needs_swap)
Stephane Eranian444d2862012-05-15 13:28:12 +02001865 perf_event_header__bswap(&event->header);
Tom Zanussi8dc58102010-04-01 23:59:15 -05001866
Stephane Eranian444d2862012-05-15 13:28:12 +02001867 size = event->header.size;
Adrian Hunter27389d72013-07-04 16:20:27 +03001868 if (size < sizeof(struct perf_event_header)) {
1869 pr_err("bad event header size\n");
1870 goto out_err;
1871 }
Tom Zanussi8dc58102010-04-01 23:59:15 -05001872
Stephane Eranian444d2862012-05-15 13:28:12 +02001873 if (size > cur_size) {
1874 void *new = realloc(buf, size);
1875 if (!new) {
1876 pr_err("failed to allocate memory to read event\n");
1877 goto out_err;
1878 }
1879 buf = new;
1880 cur_size = size;
1881 event = buf;
1882 }
1883 p = event;
Tom Zanussi8dc58102010-04-01 23:59:15 -05001884 p += sizeof(struct perf_event_header);
1885
Tom Zanussi794e43b2010-05-05 00:27:40 -05001886 if (size - sizeof(struct perf_event_header)) {
Jiri Olsacc9784bd2013-10-15 16:27:34 +02001887 err = readn(fd, p, size - sizeof(struct perf_event_header));
Tom Zanussi794e43b2010-05-05 00:27:40 -05001888 if (err <= 0) {
1889 if (err == 0) {
1890 pr_err("unexpected end of event stream\n");
1891 goto done;
1892 }
Tom Zanussi8dc58102010-04-01 23:59:15 -05001893
Tom Zanussi794e43b2010-05-05 00:27:40 -05001894 pr_err("failed to read event data\n");
1895 goto out_err;
1896 }
Tom Zanussi8dc58102010-04-01 23:59:15 -05001897 }
1898
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001899 if ((skip = perf_session__process_event(session, event, head)) < 0) {
Jiri Olsa9389a462012-04-16 20:42:51 +02001900 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
Stephane Eranian444d2862012-05-15 13:28:12 +02001901 head, event->header.size, event->header.type);
Jiri Olsa9389a462012-04-16 20:42:51 +02001902 err = -EINVAL;
1903 goto out_err;
Tom Zanussi8dc58102010-04-01 23:59:15 -05001904 }
1905
1906 head += size;
1907
Tom Zanussi8dc58102010-04-01 23:59:15 -05001908 if (skip > 0)
1909 head += skip;
1910
Alexey Budankovcb62c6f2019-03-18 20:45:11 +03001911 err = __perf_session__process_decomp_events(session);
1912 if (err)
1913 goto out_err;
1914
Tom Zanussi8dc58102010-04-01 23:59:15 -05001915 if (!session_done())
1916 goto more;
1917done:
Adrian Hunter8c16b642013-10-18 15:29:02 +03001918 /* do the final flush for ordered samples */
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001919 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
Adrian Hunterc4468702015-04-09 18:53:48 +03001920 if (err)
1921 goto out_err;
1922 err = auxtrace__flush_events(session, tool);
Adrian Huntera5499b32015-05-29 16:33:30 +03001923 if (err)
1924 goto out_err;
1925 err = perf_session__flush_thread_stacks(session);
Tom Zanussi8dc58102010-04-01 23:59:15 -05001926out_err:
Stephane Eranian444d2862012-05-15 13:28:12 +02001927 free(buf);
Jiri Olsa075ca1e2018-01-07 17:03:54 +01001928 if (!tool->no_warn)
1929 perf_session__warn_about_errors(session);
Jiri Olsaadc56ed2014-06-10 22:50:03 +02001930 ordered_events__free(&session->ordered_events);
Adrian Hunterc4468702015-04-09 18:53:48 +03001931 auxtrace__free_events(session);
Tom Zanussi8dc58102010-04-01 23:59:15 -05001932 return err;
1933}
1934
Frederic Weisbecker998bedc2011-05-23 13:06:28 +02001935static union perf_event *
1936fetch_mmaped_event(struct perf_session *session,
1937 u64 head, size_t mmap_size, char *buf)
1938{
1939 union perf_event *event;
1940
1941 /*
1942 * Ensure we have enough space remaining to read
1943 * the size of the event in the headers.
1944 */
1945 if (head + sizeof(event->header) > mmap_size)
1946 return NULL;
1947
1948 event = (union perf_event *)(buf + head);
1949
1950 if (session->header.needs_swap)
1951 perf_event_header__bswap(&event->header);
1952
Adrian Hunter27389d72013-07-04 16:20:27 +03001953 if (head + event->header.size > mmap_size) {
1954 /* We're not fetching the event so swap back again */
1955 if (session->header.needs_swap)
1956 perf_event_header__bswap(&event->header);
Frederic Weisbecker998bedc2011-05-23 13:06:28 +02001957 return NULL;
Adrian Hunter27389d72013-07-04 16:20:27 +03001958 }
Frederic Weisbecker998bedc2011-05-23 13:06:28 +02001959
1960 return event;
1961}
1962
Alexey Budankovcb62c6f2019-03-18 20:45:11 +03001963static int __perf_session__process_decomp_events(struct perf_session *session)
1964{
1965 s64 skip;
1966 u64 size, file_pos = 0;
1967 struct decomp *decomp = session->decomp_last;
1968
1969 if (!decomp)
1970 return 0;
1971
1972 while (decomp->head < decomp->size && !session_done()) {
1973 union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data);
1974
1975 if (!event)
1976 break;
1977
1978 size = event->header.size;
1979
1980 if (size < sizeof(struct perf_event_header) ||
1981 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1982 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1983 decomp->file_pos + decomp->head, event->header.size, event->header.type);
1984 return -EINVAL;
1985 }
1986
1987 if (skip)
1988 size += skip;
1989
1990 decomp->head += size;
1991 }
1992
1993 return 0;
1994}
1995
David Miller35d48dd2012-11-10 14:12:19 -05001996/*
1997 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1998 * slices. On 32bit we use 32MB.
1999 */
2000#if BITS_PER_LONG == 64
2001#define MMAP_SIZE ULLONG_MAX
2002#define NUM_MMAPS 1
2003#else
2004#define MMAP_SIZE (32 * 1024 * 1024ULL)
2005#define NUM_MMAPS 128
2006#endif
2007
Jiri Olsae51f8062019-03-08 14:47:40 +01002008struct reader;
2009
2010typedef s64 (*reader_cb_t)(struct perf_session *session,
2011 union perf_event *event,
2012 u64 file_offset);
2013
Jiri Olsa82715eb2019-01-10 11:12:58 +01002014struct reader {
Jiri Olsae51f8062019-03-08 14:47:40 +01002015 int fd;
2016 u64 data_size;
2017 u64 data_offset;
2018 reader_cb_t process;
Jiri Olsa82715eb2019-01-10 11:12:58 +01002019};
2020
Jiri Olsa3c7b67b2019-01-10 11:13:01 +01002021static int
2022reader__process_events(struct reader *rd, struct perf_session *session,
2023 struct ui_progress *prog)
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02002024{
Jiri Olsa3c7b67b2019-01-10 11:13:01 +01002025 u64 data_size = rd->data_size;
Adrian Hunterd5652d82014-07-23 22:19:58 +03002026 u64 head, page_offset, file_offset, file_pos, size;
Jiri Olsa3c7b67b2019-01-10 11:13:01 +01002027 int err = 0, mmap_prot, mmap_flags, map_idx = 0;
Arnaldo Carvalho de Melo0c1fe6b2012-10-06 14:57:10 -03002028 size_t mmap_size;
David Miller35d48dd2012-11-10 14:12:19 -05002029 char *buf, *mmaps[NUM_MMAPS];
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02002030 union perf_event *event;
Adrian Hunterd5652d82014-07-23 22:19:58 +03002031 s64 skip;
Thomas Gleixner0331ee02010-11-30 17:49:38 +00002032
Jiri Olsa3c7b67b2019-01-10 11:13:01 +01002033 page_offset = page_size * (rd->data_offset / page_size);
Thomas Gleixner0331ee02010-11-30 17:49:38 +00002034 file_offset = page_offset;
Jiri Olsa3c7b67b2019-01-10 11:13:01 +01002035 head = rd->data_offset - page_offset;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02002036
Jiri Olsa3c7b67b2019-01-10 11:13:01 +01002037 ui_progress__init_size(prog, data_size, "Processing events...");
Mark Rutland381c02f2015-09-16 18:18:49 +01002038
Jiri Olsa3c7b67b2019-01-10 11:13:01 +01002039 data_size += rd->data_offset;
Thomas Gleixner55b446292010-11-30 17:49:46 +00002040
David Miller35d48dd2012-11-10 14:12:19 -05002041 mmap_size = MMAP_SIZE;
Jiri Olsa4f5a4732019-01-10 11:12:57 +01002042 if (mmap_size > data_size) {
2043 mmap_size = data_size;
Adrian Hunter919d86d2014-07-14 13:02:51 +03002044 session->one_mmap = true;
2045 }
Thomas Gleixner55b446292010-11-30 17:49:46 +00002046
Thomas Gleixnerfe174202010-11-30 17:49:49 +00002047 memset(mmaps, 0, sizeof(mmaps));
2048
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002049 mmap_prot = PROT_READ;
2050 mmap_flags = MAP_SHARED;
2051
Thomas Gleixner0331ee02010-11-30 17:49:38 +00002052 if (session->header.needs_swap) {
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002053 mmap_prot |= PROT_WRITE;
2054 mmap_flags = MAP_PRIVATE;
2055 }
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02002056remap:
Jiri Olsa3c7b67b2019-01-10 11:13:01 +01002057 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
Thomas Gleixner55b446292010-11-30 17:49:46 +00002058 file_offset);
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02002059 if (buf == MAP_FAILED) {
2060 pr_err("failed to mmap file\n");
2061 err = -errno;
Jiri Olsa3c7b67b2019-01-10 11:13:01 +01002062 goto out;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02002063 }
Thomas Gleixnerfe174202010-11-30 17:49:49 +00002064 mmaps[map_idx] = buf;
2065 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
Thomas Gleixnerd6513282010-11-30 17:49:44 +00002066 file_pos = file_offset + head;
Adrian Hunter919d86d2014-07-14 13:02:51 +03002067 if (session->one_mmap) {
2068 session->one_mmap_addr = buf;
2069 session->one_mmap_offset = file_offset;
2070 }
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02002071
2072more:
Frederic Weisbecker998bedc2011-05-23 13:06:28 +02002073 event = fetch_mmaped_event(session, head, mmap_size, buf);
2074 if (!event) {
Thomas Gleixnerfe174202010-11-30 17:49:49 +00002075 if (mmaps[map_idx]) {
2076 munmap(mmaps[map_idx], mmap_size);
2077 mmaps[map_idx] = NULL;
2078 }
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02002079
Thomas Gleixner0331ee02010-11-30 17:49:38 +00002080 page_offset = page_size * (head / page_size);
2081 file_offset += page_offset;
2082 head -= page_offset;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02002083 goto remap;
2084 }
2085
2086 size = event->header.size;
2087
Thomas Richter167e4182019-04-23 12:53:03 +02002088 skip = -EINVAL;
2089
Adrian Hunter27389d72013-07-04 16:20:27 +03002090 if (size < sizeof(struct perf_event_header) ||
Jiri Olsae51f8062019-03-08 14:47:40 +01002091 (skip = rd->process(session, event, file_pos)) < 0) {
Thomas Richter167e4182019-04-23 12:53:03 +02002092 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
Jiri Olsa9389a462012-04-16 20:42:51 +02002093 file_offset + head, event->header.size,
Thomas Richter167e4182019-04-23 12:53:03 +02002094 event->header.type, strerror(-skip));
2095 err = skip;
Jiri Olsa3c7b67b2019-01-10 11:13:01 +01002096 goto out;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02002097 }
2098
Adrian Hunter6f917c72014-07-23 22:19:57 +03002099 if (skip)
2100 size += skip;
2101
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02002102 head += size;
Thomas Gleixnerd6513282010-11-30 17:49:44 +00002103 file_pos += size;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02002104
Alexey Budankovcb62c6f2019-03-18 20:45:11 +03002105 err = __perf_session__process_decomp_events(session);
2106 if (err)
2107 goto out;
2108
Jiri Olsa3c7b67b2019-01-10 11:13:01 +01002109 ui_progress__update(prog, size);
Thomas Gleixner55b446292010-11-30 17:49:46 +00002110
Arnaldo Carvalho de Melo33e940a2013-09-17 16:34:28 -03002111 if (session_done())
Adrian Hunter8c16b642013-10-18 15:29:02 +03002112 goto out;
Arnaldo Carvalho de Melo33e940a2013-09-17 16:34:28 -03002113
Jiri Olsa4f5a4732019-01-10 11:12:57 +01002114 if (file_pos < data_size)
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02002115 goto more;
Thomas Gleixnerd6513282010-11-30 17:49:44 +00002116
Adrian Hunter8c16b642013-10-18 15:29:02 +03002117out:
Jiri Olsa3c7b67b2019-01-10 11:13:01 +01002118 return err;
2119}
2120
Jiri Olsae51f8062019-03-08 14:47:40 +01002121static s64 process_simple(struct perf_session *session,
2122 union perf_event *event,
2123 u64 file_offset)
2124{
2125 return perf_session__process_event(session, event, file_offset);
2126}
2127
Jiri Olsa3c7b67b2019-01-10 11:13:01 +01002128static int __perf_session__process_events(struct perf_session *session)
2129{
2130 struct reader rd = {
2131 .fd = perf_data__fd(session->data),
2132 .data_size = session->header.data_size,
2133 .data_offset = session->header.data_offset,
Jiri Olsae51f8062019-03-08 14:47:40 +01002134 .process = process_simple,
Jiri Olsa3c7b67b2019-01-10 11:13:01 +01002135 };
2136 struct ordered_events *oe = &session->ordered_events;
2137 struct perf_tool *tool = session->tool;
2138 struct ui_progress prog;
2139 int err;
2140
2141 perf_tool__fill_defaults(tool);
2142
2143 if (rd.data_size == 0)
2144 return -1;
2145
2146 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2147
2148 err = reader__process_events(&rd, session, &prog);
2149 if (err)
2150 goto out_err;
Frederic Weisbeckerc61e52e2010-04-24 00:04:12 +02002151 /* do the final flush for ordered samples */
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03002152 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
Adrian Hunterc4468702015-04-09 18:53:48 +03002153 if (err)
2154 goto out_err;
2155 err = auxtrace__flush_events(session, tool);
Adrian Huntera5499b32015-05-29 16:33:30 +03002156 if (err)
2157 goto out_err;
2158 err = perf_session__flush_thread_stacks(session);
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02002159out_err:
Namhyung Kima5580f32012-11-13 22:30:34 +09002160 ui_progress__finish();
Jiri Olsa075ca1e2018-01-07 17:03:54 +01002161 if (!tool->no_warn)
2162 perf_session__warn_about_errors(session);
Wang Nanb26dc732016-04-13 08:21:04 +00002163 /*
2164 * We may switching perf.data output, make ordered_events
2165 * reusable.
2166 */
2167 ordered_events__reinit(&session->ordered_events);
Adrian Hunterc4468702015-04-09 18:53:48 +03002168 auxtrace__free_events(session);
Adrian Hunter919d86d2014-07-14 13:02:51 +03002169 session->one_mmap = false;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02002170 return err;
2171}
Arnaldo Carvalho de Melo27295592009-12-27 21:37:01 -02002172
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03002173int perf_session__process_events(struct perf_session *session)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02002174{
Masami Hiramatsu9d8b1722015-12-09 11:11:23 +09002175 if (perf_session__register_idle_thread(session) < 0)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02002176 return -ENOMEM;
2177
Jiri Olsa7ba4da12019-01-10 11:12:56 +01002178 if (perf_data__is_pipe(session->data))
2179 return __perf_session__process_pipe_events(session);
Dave Martin88ca8952010-07-27 11:46:12 -03002180
Jiri Olsa7ba4da12019-01-10 11:12:56 +01002181 return __perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02002182}
2183
Arnaldo Carvalho de Melo7f3be652012-08-01 19:15:52 -03002184bool perf_session__has_traces(struct perf_session *session, const char *msg)
Arnaldo Carvalho de Melo27295592009-12-27 21:37:01 -02002185{
David Ahern93ea01c292013-08-07 22:50:58 -04002186 struct perf_evsel *evsel;
2187
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002188 evlist__for_each_entry(session->evlist, evsel) {
David Ahern93ea01c292013-08-07 22:50:58 -04002189 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
2190 return true;
Arnaldo Carvalho de Melo27295592009-12-27 21:37:01 -02002191 }
2192
David Ahern93ea01c292013-08-07 22:50:58 -04002193 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2194 return false;
Arnaldo Carvalho de Melo27295592009-12-27 21:37:01 -02002195}
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -02002196
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -03002197int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -02002198{
2199 char *bracket;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08002200 struct ref_reloc_sym *ref;
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -03002201 struct kmap *kmap;
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -02002202
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08002203 ref = zalloc(sizeof(struct ref_reloc_sym));
2204 if (ref == NULL)
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -02002205 return -ENOMEM;
2206
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08002207 ref->name = strdup(symbol_name);
2208 if (ref->name == NULL) {
2209 free(ref);
2210 return -ENOMEM;
2211 }
2212
2213 bracket = strchr(ref->name, ']');
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -02002214 if (bracket)
2215 *bracket = '\0';
2216
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08002217 ref->addr = addr;
Arnaldo Carvalho de Melo9de89fe2010-02-03 16:52:00 -02002218
Arnaldo Carvalho de Melo3183f8c2018-04-26 16:52:34 -03002219 kmap = map__kmap(map);
2220 if (kmap)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08002221 kmap->ref_reloc_sym = ref;
Arnaldo Carvalho de Melo9de89fe2010-02-03 16:52:00 -02002222
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -02002223 return 0;
2224}
Arnaldo Carvalho de Melo1f626bc2010-05-09 19:57:08 -03002225
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03002226size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
Arnaldo Carvalho de Melo1f626bc2010-05-09 19:57:08 -03002227{
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03002228 return machines__fprintf_dsos(&session->machines, fp);
Arnaldo Carvalho de Melo1f626bc2010-05-09 19:57:08 -03002229}
Arnaldo Carvalho de Melof8690972010-05-19 13:41:23 -03002230
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03002231size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
Arnaldo Carvalho de Melo417c2ff2012-12-07 09:53:58 -03002232 bool (skip)(struct dso *dso, int parm), int parm)
Arnaldo Carvalho de Melof8690972010-05-19 13:41:23 -03002233{
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03002234 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
Arnaldo Carvalho de Melof8690972010-05-19 13:41:23 -03002235}
Arnaldo Carvalho de Meloe248de32011-03-05 21:40:06 -03002236
2237size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2238{
Adrian Hunterc4468702015-04-09 18:53:48 +03002239 size_t ret;
2240 const char *msg = "";
2241
2242 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2243 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2244
Adrian Hunterfe692ac2015-06-23 10:52:49 +03002245 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
Arnaldo Carvalho de Meloe248de32011-03-05 21:40:06 -03002246
Arnaldo Carvalho de Melo75be9892015-02-14 14:50:11 -03002247 ret += events_stats__fprintf(&session->evlist->stats, fp);
Arnaldo Carvalho de Meloe248de32011-03-05 21:40:06 -03002248 return ret;
2249}
David Ahernc0230b22011-03-09 22:23:27 -07002250
Arnaldo Carvalho de Melob424eba2011-11-09 13:24:25 -02002251size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2252{
2253 /*
2254 * FIXME: Here we have to actually print all the machines in this
2255 * session, not just the host...
2256 */
Arnaldo Carvalho de Melo876650e62012-12-18 19:15:48 -03002257 return machine__fprintf(&session->machines.host, fp);
Arnaldo Carvalho de Melob424eba2011-11-09 13:24:25 -02002258}
2259
David Ahern9cbdb702011-04-06 21:54:20 -06002260struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
2261 unsigned int type)
2262{
2263 struct perf_evsel *pos;
2264
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002265 evlist__for_each_entry(session->evlist, pos) {
David Ahern9cbdb702011-04-06 21:54:20 -06002266 if (pos->attr.type == type)
2267 return pos;
2268 }
2269 return NULL;
2270}
2271
Anton Blanchard5d67be92011-07-04 21:57:50 +10002272int perf_session__cpu_bitmap(struct perf_session *session,
2273 const char *cpu_list, unsigned long *cpu_bitmap)
2274{
Stanislav Fomichev8bac41c2014-01-20 15:39:39 +04002275 int i, err = -1;
Anton Blanchard5d67be92011-07-04 21:57:50 +10002276 struct cpu_map *map;
2277
2278 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2279 struct perf_evsel *evsel;
2280
2281 evsel = perf_session__find_first_evtype(session, i);
2282 if (!evsel)
2283 continue;
2284
2285 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
2286 pr_err("File does not contain CPU events. "
Adrian Hunter30795462017-05-26 11:17:19 +03002287 "Remove -C option to proceed.\n");
Anton Blanchard5d67be92011-07-04 21:57:50 +10002288 return -1;
2289 }
2290 }
2291
2292 map = cpu_map__new(cpu_list);
David Ahern47fbe532011-11-13 10:45:27 -07002293 if (map == NULL) {
2294 pr_err("Invalid cpu_list\n");
2295 return -1;
2296 }
Anton Blanchard5d67be92011-07-04 21:57:50 +10002297
2298 for (i = 0; i < map->nr; i++) {
2299 int cpu = map->map[i];
2300
2301 if (cpu >= MAX_NR_CPUS) {
2302 pr_err("Requested CPU %d too large. "
2303 "Consider raising MAX_NR_CPUS\n", cpu);
Stanislav Fomichev8bac41c2014-01-20 15:39:39 +04002304 goto out_delete_map;
Anton Blanchard5d67be92011-07-04 21:57:50 +10002305 }
2306
2307 set_bit(cpu, cpu_bitmap);
2308 }
2309
Stanislav Fomichev8bac41c2014-01-20 15:39:39 +04002310 err = 0;
2311
2312out_delete_map:
Jiri Olsaf30a79b2015-06-23 00:36:04 +02002313 cpu_map__put(map);
Stanislav Fomichev8bac41c2014-01-20 15:39:39 +04002314 return err;
Anton Blanchard5d67be92011-07-04 21:57:50 +10002315}
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002316
2317void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2318 bool full)
2319{
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002320 if (session == NULL || fp == NULL)
2321 return;
2322
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002323 fprintf(fp, "# ========\n");
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002324 perf_header__fprintf_info(session, fp, full);
2325 fprintf(fp, "# ========\n#\n");
2326}
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03002327
2328
2329int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2330 const struct perf_evsel_str_handler *assocs,
2331 size_t nr_assocs)
2332{
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03002333 struct perf_evsel *evsel;
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03002334 size_t i;
2335 int err;
2336
2337 for (i = 0; i < nr_assocs; i++) {
Arnaldo Carvalho de Meloccf53ea2013-09-06 15:19:01 -03002338 /*
2339 * Adding a handler for an event not in the session,
2340 * just ignore it.
2341 */
2342 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03002343 if (evsel == NULL)
Arnaldo Carvalho de Meloccf53ea2013-09-06 15:19:01 -03002344 continue;
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03002345
2346 err = -EEXIST;
Arnaldo Carvalho de Melo744a9712013-11-06 10:17:38 -03002347 if (evsel->handler != NULL)
Arnaldo Carvalho de Meloccf53ea2013-09-06 15:19:01 -03002348 goto out;
Arnaldo Carvalho de Melo744a9712013-11-06 10:17:38 -03002349 evsel->handler = assocs[i].handler;
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03002350 }
2351
2352 err = 0;
2353out:
2354 return err;
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03002355}
Adrian Hunter3c659ee2014-10-27 15:49:22 +02002356
Jiri Olsa89f16882018-09-13 14:54:03 +02002357int perf_event__process_id_index(struct perf_session *session,
2358 union perf_event *event)
Adrian Hunter3c659ee2014-10-27 15:49:22 +02002359{
2360 struct perf_evlist *evlist = session->evlist;
2361 struct id_index_event *ie = &event->id_index;
2362 size_t i, nr, max_nr;
2363
2364 max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2365 sizeof(struct id_index_entry);
2366 nr = ie->nr;
2367 if (nr > max_nr)
2368 return -EINVAL;
2369
2370 if (dump_trace)
2371 fprintf(stdout, " nr: %zu\n", nr);
2372
2373 for (i = 0; i < nr; i++) {
2374 struct id_index_entry *e = &ie->entries[i];
2375 struct perf_sample_id *sid;
2376
2377 if (dump_trace) {
2378 fprintf(stdout, " ... id: %"PRIu64, e->id);
2379 fprintf(stdout, " idx: %"PRIu64, e->idx);
2380 fprintf(stdout, " cpu: %"PRId64, e->cpu);
2381 fprintf(stdout, " tid: %"PRId64"\n", e->tid);
2382 }
2383
2384 sid = perf_evlist__id2sid(evlist, e->id);
2385 if (!sid)
2386 return -ENOENT;
2387 sid->idx = e->idx;
2388 sid->cpu = e->cpu;
2389 sid->tid = e->tid;
2390 }
2391 return 0;
2392}
2393
2394int perf_event__synthesize_id_index(struct perf_tool *tool,
2395 perf_event__handler_t process,
2396 struct perf_evlist *evlist,
2397 struct machine *machine)
2398{
2399 union perf_event *ev;
2400 struct perf_evsel *evsel;
2401 size_t nr = 0, i = 0, sz, max_nr, n;
2402 int err;
2403
2404 pr_debug2("Synthesizing id index\n");
2405
2406 max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2407 sizeof(struct id_index_entry);
2408
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002409 evlist__for_each_entry(evlist, evsel)
Adrian Hunter3c659ee2014-10-27 15:49:22 +02002410 nr += evsel->ids;
2411
2412 n = nr > max_nr ? max_nr : nr;
2413 sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2414 ev = zalloc(sz);
2415 if (!ev)
2416 return -ENOMEM;
2417
2418 ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2419 ev->id_index.header.size = sz;
2420 ev->id_index.nr = n;
2421
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002422 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter3c659ee2014-10-27 15:49:22 +02002423 u32 j;
2424
2425 for (j = 0; j < evsel->ids; j++) {
2426 struct id_index_entry *e;
2427 struct perf_sample_id *sid;
2428
2429 if (i >= n) {
2430 err = process(tool, ev, NULL, machine);
2431 if (err)
2432 goto out_err;
2433 nr -= n;
2434 i = 0;
2435 }
2436
2437 e = &ev->id_index.entries[i++];
2438
2439 e->id = evsel->id[j];
2440
2441 sid = perf_evlist__id2sid(evlist, e->id);
2442 if (!sid) {
2443 free(ev);
2444 return -ENOENT;
2445 }
2446
2447 e->idx = sid->idx;
2448 e->cpu = sid->cpu;
2449 e->tid = sid->tid;
2450 }
2451 }
2452
2453 sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2454 ev->id_index.header.size = sz;
2455 ev->id_index.nr = nr;
2456
2457 err = process(tool, ev, NULL, machine);
2458out_err:
2459 free(ev);
2460
2461 return err;
2462}