blob: b0c34dda30a0625c7f767103dd2a45f6762b9d29 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -03002#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -03003#include <inttypes.h>
Arnaldo Carvalho de Meloa0675582017-04-17 16:51:59 -03004#include "string2.h"
Arnaldo Carvalho de Melo391e4202017-04-19 18:51:14 -03005#include <sys/param.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02006#include <sys/types.h>
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02007#include <byteswap.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02008#include <unistd.h>
9#include <stdio.h>
10#include <stdlib.h>
Arnaldo Carvalho de Melo03536312017-06-16 12:18:27 -030011#include <linux/compiler.h>
Frederic Weisbecker8671dab2009-11-11 04:51:03 +010012#include <linux/list.h>
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -020013#include <linux/kernel.h>
Robert Richterb1e5a9b2011-12-07 10:02:57 +010014#include <linux/bitops.h>
Arnaldo Carvalho de Melofc6a1722019-06-25 21:33:14 -030015#include <linux/string.h>
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -070016#include <linux/stringify.h>
Arnaldo Carvalho de Melo7f7c5362019-07-04 11:32:27 -030017#include <linux/zalloc.h>
Arnaldo Carvalho de Melo7a8ef4c2017-04-19 20:57:47 -030018#include <sys/stat.h>
Stephane Eranianfbe96f22011-09-30 15:40:40 +020019#include <sys/utsname.h>
Jin Yao60115182017-12-08 21:13:41 +080020#include <linux/time64.h>
Jiri Olsae2091ce2018-03-07 16:50:08 +010021#include <dirent.h>
Song Liu606f9722019-03-11 22:30:43 -070022#include <bpf/libbpf.h>
Jiri Olsa9c3516d2019-07-21 13:24:30 +020023#include <perf/cpumap.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020024
Arnaldo Carvalho de Melo4a3cec82019-08-30 11:11:01 -030025#include "dso.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020026#include "evlist.h"
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030027#include "evsel.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020028#include "header.h"
Arnaldo Carvalho de Melo98521b32017-04-25 15:45:35 -030029#include "memswap.h"
Frederic Weisbecker03456a12009-10-06 23:36:47 +020030#include "trace-event.h"
Arnaldo Carvalho de Melo301a0b02009-12-13 19:50:25 -020031#include "session.h"
Frederic Weisbecker8671dab2009-11-11 04:51:03 +010032#include "symbol.h"
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +010033#include "debug.h"
Stephane Eranianfbe96f22011-09-30 15:40:40 +020034#include "cpumap.h"
Robert Richter50a96672012-08-16 21:10:24 +020035#include "pmu.h"
Jiri Olsa7dbf4dc2012-09-10 18:50:19 +020036#include "vdso.h"
Namhyung Kima1ae5652012-09-24 17:14:59 +090037#include "strbuf.h"
Jiri Olsaebb296c2012-10-27 23:18:28 +020038#include "build-id.h"
Jiri Olsacc9784bd2013-10-15 16:27:34 +020039#include "data.h"
Jiri Olsa720e98b2016-02-16 16:01:43 +010040#include <api/fs/fs.h>
41#include "asm/bug.h"
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -070042#include "tool.h"
Jin Yao60115182017-12-08 21:13:41 +080043#include "time-utils.h"
Jiri Olsae2091ce2018-03-07 16:50:08 +010044#include "units.h"
Arnaldo Carvalho de Melo2da39f12019-08-27 11:51:18 -030045#include "util.h"
Jiri Olsa5135d5e2019-02-19 10:58:13 +010046#include "cputopo.h"
Song Liu606f9722019-03-11 22:30:43 -070047#include "bpf-event.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020048
Arnaldo Carvalho de Melo3052ba52019-06-25 17:27:31 -030049#include <linux/ctype.h>
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -030050
Stephane Eranian73323f52012-02-02 13:54:44 +010051/*
52 * magic2 = "PERFILE2"
53 * must be a numerical value to let the endianness
54 * determine the memory layout. That way we are able
55 * to detect endianness when reading the perf.data file
56 * back.
57 *
58 * we check for legacy (PERFFILE) format.
59 */
60static const char *__perf_magic1 = "PERFFILE";
61static const u64 __perf_magic2 = 0x32454c4946524550ULL;
62static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020063
Stephane Eranian73323f52012-02-02 13:54:44 +010064#define PERF_MAGIC __perf_magic2
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020065
Soramichi AKIYAMAd25ed5d2017-01-17 00:22:37 +090066const char perf_version_string[] = PERF_VERSION;
67
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020068struct perf_file_attr {
Ingo Molnarcdd6c482009-09-21 12:02:48 +020069 struct perf_event_attr attr;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020070 struct perf_file_section ids;
71};
72
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -070073struct feat_fd {
74 struct perf_header *ph;
75 int fd;
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -070076 void *buf; /* Either buf != NULL or fd >= 0 */
David Carrillo-Cisneros62552452017-07-17 21:25:42 -070077 ssize_t offset;
78 size_t size;
Jiri Olsa32dcd022019-07-21 13:23:51 +020079 struct evsel *events;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -070080};
81
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030082void perf_header__set_feat(struct perf_header *header, int feat)
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020083{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030084 set_bit(feat, header->adds_features);
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020085}
86
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030087void perf_header__clear_feat(struct perf_header *header, int feat)
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -020088{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030089 clear_bit(feat, header->adds_features);
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -020090}
91
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030092bool perf_header__has_feat(const struct perf_header *header, int feat)
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020093{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030094 return test_bit(feat, header->adds_features);
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020095}
96
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -070097static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
98{
99 ssize_t ret = writen(ff->fd, buf, size);
100
101 if (ret != (ssize_t)size)
102 return ret < 0 ? (int)ret : -1;
103 return 0;
104}
105
106static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
107{
108 /* struct perf_event_header::size is u16 */
109 const size_t max_size = 0xffff - sizeof(struct perf_event_header);
110 size_t new_size = ff->size;
111 void *addr;
112
113 if (size + ff->offset > max_size)
114 return -E2BIG;
115
116 while (size > (new_size - ff->offset))
117 new_size <<= 1;
118 new_size = min(max_size, new_size);
119
120 if (ff->size < new_size) {
121 addr = realloc(ff->buf, new_size);
122 if (!addr)
123 return -ENOMEM;
124 ff->buf = addr;
125 ff->size = new_size;
126 }
127
128 memcpy(ff->buf + ff->offset, buf, size);
129 ff->offset += size;
130
131 return 0;
132}
133
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -0700134/* Return: 0 if succeded, -ERR if failed. */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700135int do_write(struct feat_fd *ff, const void *buf, size_t size)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200136{
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700137 if (!ff->buf)
138 return __do_write_fd(ff, buf, size);
139 return __do_write_buf(ff, buf, size);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200140}
141
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -0700142/* Return: 0 if succeded, -ERR if failed. */
Jiri Olsae2091ce2018-03-07 16:50:08 +0100143static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
144{
145 u64 *p = (u64 *) set;
146 int i, ret;
147
148 ret = do_write(ff, &size, sizeof(size));
149 if (ret < 0)
150 return ret;
151
152 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
153 ret = do_write(ff, p + i, sizeof(*p));
154 if (ret < 0)
155 return ret;
156 }
157
158 return 0;
159}
160
161/* Return: 0 if succeded, -ERR if failed. */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700162int write_padded(struct feat_fd *ff, const void *bf,
163 size_t count, size_t count_aligned)
Arnaldo Carvalho de Melof92cb242010-01-04 16:19:28 -0200164{
165 static const char zero_buf[NAME_ALIGN];
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700166 int err = do_write(ff, bf, count);
Arnaldo Carvalho de Melof92cb242010-01-04 16:19:28 -0200167
168 if (!err)
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700169 err = do_write(ff, zero_buf, count_aligned - count);
Arnaldo Carvalho de Melof92cb242010-01-04 16:19:28 -0200170
171 return err;
172}
173
Kan Liang2bb00d22015-09-01 09:58:12 -0400174#define string_size(str) \
175 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
176
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -0700177/* Return: 0 if succeded, -ERR if failed. */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700178static int do_write_string(struct feat_fd *ff, const char *str)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200179{
180 u32 len, olen;
181 int ret;
182
183 olen = strlen(str) + 1;
Irina Tirdea9ac3e482012-09-11 01:15:01 +0300184 len = PERF_ALIGN(olen, NAME_ALIGN);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200185
186 /* write len, incl. \0 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700187 ret = do_write(ff, &len, sizeof(len));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200188 if (ret < 0)
189 return ret;
190
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700191 return write_padded(ff, str, olen, len);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200192}
193
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700194static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700195{
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700196 ssize_t ret = readn(ff->fd, addr, size);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700197
198 if (ret != size)
199 return ret < 0 ? (int)ret : -1;
200 return 0;
201}
202
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700203static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
204{
205 if (size > (ssize_t)ff->size - ff->offset)
206 return -1;
207
208 memcpy(addr, ff->buf + ff->offset, size);
209 ff->offset += size;
210
211 return 0;
212
213}
214
215static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
216{
217 if (!ff->buf)
218 return __do_read_fd(ff, addr, size);
219 return __do_read_buf(ff, addr, size);
220}
221
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700222static int do_read_u32(struct feat_fd *ff, u32 *addr)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700223{
224 int ret;
225
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700226 ret = __do_read(ff, addr, sizeof(*addr));
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700227 if (ret)
228 return ret;
229
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700230 if (ff->ph->needs_swap)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700231 *addr = bswap_32(*addr);
232 return 0;
233}
234
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700235static int do_read_u64(struct feat_fd *ff, u64 *addr)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700236{
237 int ret;
238
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700239 ret = __do_read(ff, addr, sizeof(*addr));
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700240 if (ret)
241 return ret;
242
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700243 if (ff->ph->needs_swap)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700244 *addr = bswap_64(*addr);
245 return 0;
246}
247
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700248static char *do_read_string(struct feat_fd *ff)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200249{
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200250 u32 len;
251 char *buf;
252
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700253 if (do_read_u32(ff, &len))
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200254 return NULL;
255
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200256 buf = malloc(len);
257 if (!buf)
258 return NULL;
259
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700260 if (!__do_read(ff, buf, len)) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200261 /*
262 * strings are padded by zeroes
263 * thus the actual strlen of buf
264 * may be less than len
265 */
266 return buf;
267 }
268
269 free(buf);
270 return NULL;
271}
272
Jiri Olsae2091ce2018-03-07 16:50:08 +0100273/* Return: 0 if succeded, -ERR if failed. */
274static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
275{
276 unsigned long *set;
277 u64 size, *p;
278 int i, ret;
279
280 ret = do_read_u64(ff, &size);
281 if (ret)
282 return ret;
283
284 set = bitmap_alloc(size);
285 if (!set)
286 return -ENOMEM;
287
Jiri Olsae2091ce2018-03-07 16:50:08 +0100288 p = (u64 *) set;
289
290 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
291 ret = do_read_u64(ff, p + i);
292 if (ret < 0) {
293 free(set);
294 return ret;
295 }
296 }
297
298 *pset = set;
299 *psize = size;
300 return 0;
301}
302
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700303static int write_tracing_data(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200304 struct evlist *evlist)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200305{
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700306 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
307 return -1;
308
Jiri Olsace9036a2019-07-21 13:24:23 +0200309 return read_tracing_data(ff->fd, &evlist->core.entries);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200310}
311
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700312static int write_build_id(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200313 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200314{
315 struct perf_session *session;
316 int err;
317
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700318 session = container_of(ff->ph, struct perf_session, header);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200319
Robert Richtere20960c2011-12-07 10:02:55 +0100320 if (!perf_session__read_build_ids(session, true))
321 return -1;
322
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700323 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
324 return -1;
325
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700326 err = perf_session__write_buildid_table(session, ff);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200327 if (err < 0) {
328 pr_debug("failed to write buildid table\n");
329 return err;
330 }
Namhyung Kim73c5d222014-11-07 22:57:56 +0900331 perf_session__cache_build_ids(session);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200332
333 return 0;
334}
335
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700336static int write_hostname(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200337 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200338{
339 struct utsname uts;
340 int ret;
341
342 ret = uname(&uts);
343 if (ret < 0)
344 return -1;
345
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700346 return do_write_string(ff, uts.nodename);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200347}
348
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700349static int write_osrelease(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200350 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200351{
352 struct utsname uts;
353 int ret;
354
355 ret = uname(&uts);
356 if (ret < 0)
357 return -1;
358
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700359 return do_write_string(ff, uts.release);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200360}
361
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700362static int write_arch(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200363 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200364{
365 struct utsname uts;
366 int ret;
367
368 ret = uname(&uts);
369 if (ret < 0)
370 return -1;
371
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700372 return do_write_string(ff, uts.machine);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200373}
374
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700375static int write_version(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200376 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200377{
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700378 return do_write_string(ff, perf_version_string);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200379}
380
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700381static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200382{
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200383 FILE *file;
384 char *buf = NULL;
385 char *s, *p;
Wang Nan493c3032014-10-24 09:45:26 +0800386 const char *search = cpuinfo_proc;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200387 size_t len = 0;
388 int ret = -1;
389
390 if (!search)
391 return -1;
392
393 file = fopen("/proc/cpuinfo", "r");
394 if (!file)
395 return -1;
396
397 while (getline(&buf, &len, file) > 0) {
398 ret = strncmp(buf, search, strlen(search));
399 if (!ret)
400 break;
401 }
402
Wang Naned307752014-10-16 11:08:29 +0800403 if (ret) {
404 ret = -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200405 goto done;
Wang Naned307752014-10-16 11:08:29 +0800406 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200407
408 s = buf;
409
410 p = strchr(buf, ':');
411 if (p && *(p+1) == ' ' && *(p+2))
412 s = p + 2;
413 p = strchr(s, '\n');
414 if (p)
415 *p = '\0';
416
417 /* squash extra space characters (branding string) */
418 p = s;
419 while (*p) {
420 if (isspace(*p)) {
421 char *r = p + 1;
Arnaldo Carvalho de Melofc6a1722019-06-25 21:33:14 -0300422 char *q = skip_spaces(r);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200423 *p = ' ';
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200424 if (q != (p+1))
425 while ((*r++ = *q++));
426 }
427 p++;
428 }
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700429 ret = do_write_string(ff, s);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200430done:
431 free(buf);
432 fclose(file);
433 return ret;
434}
435
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700436static int write_cpudesc(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200437 struct evlist *evlist __maybe_unused)
Wang Nan493c3032014-10-24 09:45:26 +0800438{
Arnaldo Carvalho de Meloa7749402019-08-29 14:40:28 -0300439#if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__)
440#define CPUINFO_PROC { "cpu", }
441#elif defined(__s390__)
442#define CPUINFO_PROC { "vendor_id", }
443#elif defined(__sh__)
444#define CPUINFO_PROC { "cpu type", }
445#elif defined(__alpha__) || defined(__mips__)
446#define CPUINFO_PROC { "cpu model", }
447#elif defined(__arm__)
448#define CPUINFO_PROC { "model name", "Processor", }
449#elif defined(__arc__)
450#define CPUINFO_PROC { "Processor", }
451#elif defined(__xtensa__)
452#define CPUINFO_PROC { "core ID", }
453#else
454#define CPUINFO_PROC { "model name", }
455#endif
Wang Nan493c3032014-10-24 09:45:26 +0800456 const char *cpuinfo_procs[] = CPUINFO_PROC;
Arnaldo Carvalho de Meloa7749402019-08-29 14:40:28 -0300457#undef CPUINFO_PROC
Wang Nan493c3032014-10-24 09:45:26 +0800458 unsigned int i;
459
460 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
461 int ret;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700462 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
Wang Nan493c3032014-10-24 09:45:26 +0800463 if (ret >= 0)
464 return ret;
465 }
466 return -1;
467}
468
469
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700470static int write_nrcpus(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200471 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200472{
473 long nr;
474 u32 nrc, nra;
475 int ret;
476
Jan Stancekda8a58b2017-02-17 12:10:26 +0100477 nrc = cpu__max_present_cpu();
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200478
479 nr = sysconf(_SC_NPROCESSORS_ONLN);
480 if (nr < 0)
481 return -1;
482
483 nra = (u32)(nr & UINT_MAX);
484
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700485 ret = do_write(ff, &nrc, sizeof(nrc));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200486 if (ret < 0)
487 return ret;
488
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700489 return do_write(ff, &nra, sizeof(nra));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200490}
491
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700492static int write_event_desc(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200493 struct evlist *evlist)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200494{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200495 struct evsel *evsel;
Namhyung Kim74ba9e12012-09-05 14:02:47 +0900496 u32 nre, nri, sz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200497 int ret;
498
Jiri Olsa6484d2f2019-07-21 13:24:28 +0200499 nre = evlist->core.nr_entries;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200500
501 /*
502 * write number of events
503 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700504 ret = do_write(ff, &nre, sizeof(nre));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200505 if (ret < 0)
506 return ret;
507
508 /*
509 * size of perf_event_attr struct
510 */
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200511 sz = (u32)sizeof(evsel->core.attr);
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700512 ret = do_write(ff, &sz, sizeof(sz));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200513 if (ret < 0)
514 return ret;
515
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300516 evlist__for_each_entry(evlist, evsel) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200517 ret = do_write(ff, &evsel->core.attr, sz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200518 if (ret < 0)
519 return ret;
520 /*
521 * write number of unique id per event
522 * there is one id per instance of an event
523 *
524 * copy into an nri to be independent of the
525 * type of ids,
526 */
Robert Richter6606f872012-08-16 21:10:19 +0200527 nri = evsel->ids;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700528 ret = do_write(ff, &nri, sizeof(nri));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200529 if (ret < 0)
530 return ret;
531
532 /*
533 * write event string as passed on cmdline
534 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700535 ret = do_write_string(ff, perf_evsel__name(evsel));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200536 if (ret < 0)
537 return ret;
538 /*
539 * write unique ids for this event
540 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700541 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200542 if (ret < 0)
543 return ret;
544 }
545 return 0;
546}
547
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700548static int write_cmdline(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200549 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200550{
Andi Kleen94816ad2019-02-24 07:37:19 -0800551 char pbuf[MAXPATHLEN], *buf;
552 int i, ret, n;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200553
Tommi Rantala55f771282017-03-22 15:06:24 +0200554 /* actual path to perf binary */
Andi Kleen94816ad2019-02-24 07:37:19 -0800555 buf = perf_exe(pbuf, MAXPATHLEN);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200556
557 /* account for binary path */
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300558 n = perf_env.nr_cmdline + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200559
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700560 ret = do_write(ff, &n, sizeof(n));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200561 if (ret < 0)
562 return ret;
563
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700564 ret = do_write_string(ff, buf);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200565 if (ret < 0)
566 return ret;
567
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300568 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700569 ret = do_write_string(ff, perf_env.cmdline_argv[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200570 if (ret < 0)
571 return ret;
572 }
573 return 0;
574}
575
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200576
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700577static int write_cpu_topology(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200578 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200579{
Jiri Olsa5135d5e2019-02-19 10:58:13 +0100580 struct cpu_topology *tp;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200581 u32 i;
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300582 int ret, j;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200583
Jiri Olsa5135d5e2019-02-19 10:58:13 +0100584 tp = cpu_topology__new();
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200585 if (!tp)
586 return -1;
587
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700588 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200589 if (ret < 0)
590 goto done;
591
592 for (i = 0; i < tp->core_sib; i++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700593 ret = do_write_string(ff, tp->core_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200594 if (ret < 0)
595 goto done;
596 }
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700597 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200598 if (ret < 0)
599 goto done;
600
601 for (i = 0; i < tp->thread_sib; i++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700602 ret = do_write_string(ff, tp->thread_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200603 if (ret < 0)
604 break;
605 }
Kan Liang2bb00d22015-09-01 09:58:12 -0400606
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300607 ret = perf_env__read_cpu_topology_map(&perf_env);
608 if (ret < 0)
609 goto done;
610
611 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700612 ret = do_write(ff, &perf_env.cpu[j].core_id,
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300613 sizeof(perf_env.cpu[j].core_id));
Kan Liang2bb00d22015-09-01 09:58:12 -0400614 if (ret < 0)
615 return ret;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700616 ret = do_write(ff, &perf_env.cpu[j].socket_id,
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300617 sizeof(perf_env.cpu[j].socket_id));
Kan Liang2bb00d22015-09-01 09:58:12 -0400618 if (ret < 0)
619 return ret;
620 }
Kan Liangacae8b32019-06-04 15:50:41 -0700621
622 if (!tp->die_sib)
623 goto done;
624
625 ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib));
626 if (ret < 0)
627 goto done;
628
629 for (i = 0; i < tp->die_sib; i++) {
630 ret = do_write_string(ff, tp->die_siblings[i]);
631 if (ret < 0)
632 goto done;
633 }
634
635 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
636 ret = do_write(ff, &perf_env.cpu[j].die_id,
637 sizeof(perf_env.cpu[j].die_id));
638 if (ret < 0)
639 return ret;
640 }
641
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200642done:
Jiri Olsa5135d5e2019-02-19 10:58:13 +0100643 cpu_topology__delete(tp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200644 return ret;
645}
646
647
648
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700649static int write_total_mem(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200650 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200651{
652 char *buf = NULL;
653 FILE *fp;
654 size_t len = 0;
655 int ret = -1, n;
656 uint64_t mem;
657
658 fp = fopen("/proc/meminfo", "r");
659 if (!fp)
660 return -1;
661
662 while (getline(&buf, &len, fp) > 0) {
663 ret = strncmp(buf, "MemTotal:", 9);
664 if (!ret)
665 break;
666 }
667 if (!ret) {
668 n = sscanf(buf, "%*s %"PRIu64, &mem);
669 if (n == 1)
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700670 ret = do_write(ff, &mem, sizeof(mem));
Wang Naned307752014-10-16 11:08:29 +0800671 } else
672 ret = -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200673 free(buf);
674 fclose(fp);
675 return ret;
676}
677
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700678static int write_numa_topology(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200679 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200680{
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100681 struct numa_topology *tp;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200682 int ret = -1;
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100683 u32 i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200684
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100685 tp = numa_topology__new();
686 if (!tp)
687 return -ENOMEM;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200688
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100689 ret = do_write(ff, &tp->nr, sizeof(u32));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200690 if (ret < 0)
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100691 goto err;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200692
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100693 for (i = 0; i < tp->nr; i++) {
694 struct numa_topology_node *n = &tp->nodes[i];
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200695
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100696 ret = do_write(ff, &n->node, sizeof(u32));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200697 if (ret < 0)
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100698 goto err;
699
700 ret = do_write(ff, &n->mem_total, sizeof(u64));
701 if (ret)
702 goto err;
703
704 ret = do_write(ff, &n->mem_free, sizeof(u64));
705 if (ret)
706 goto err;
707
708 ret = do_write_string(ff, n->cpus);
709 if (ret < 0)
710 goto err;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200711 }
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100712
713 ret = 0;
714
715err:
716 numa_topology__delete(tp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200717 return ret;
718}
719
720/*
Robert Richter50a96672012-08-16 21:10:24 +0200721 * File format:
722 *
723 * struct pmu_mappings {
724 * u32 pmu_num;
725 * struct pmu_map {
726 * u32 type;
727 * char name[];
728 * }[pmu_num];
729 * };
730 */
731
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700732static int write_pmu_mappings(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200733 struct evlist *evlist __maybe_unused)
Robert Richter50a96672012-08-16 21:10:24 +0200734{
735 struct perf_pmu *pmu = NULL;
David Carrillo-Cisnerosa02c3952017-07-17 21:25:44 -0700736 u32 pmu_num = 0;
Namhyung Kim5323f602012-12-17 15:38:54 +0900737 int ret;
Robert Richter50a96672012-08-16 21:10:24 +0200738
David Carrillo-Cisnerosa02c3952017-07-17 21:25:44 -0700739 /*
740 * Do a first pass to count number of pmu to avoid lseek so this
741 * works in pipe mode as well.
742 */
743 while ((pmu = perf_pmu__scan(pmu))) {
744 if (!pmu->name)
745 continue;
746 pmu_num++;
747 }
748
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700749 ret = do_write(ff, &pmu_num, sizeof(pmu_num));
Namhyung Kim5323f602012-12-17 15:38:54 +0900750 if (ret < 0)
751 return ret;
Robert Richter50a96672012-08-16 21:10:24 +0200752
753 while ((pmu = perf_pmu__scan(pmu))) {
754 if (!pmu->name)
755 continue;
Namhyung Kim5323f602012-12-17 15:38:54 +0900756
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700757 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
Namhyung Kim5323f602012-12-17 15:38:54 +0900758 if (ret < 0)
759 return ret;
760
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700761 ret = do_write_string(ff, pmu->name);
Namhyung Kim5323f602012-12-17 15:38:54 +0900762 if (ret < 0)
763 return ret;
Robert Richter50a96672012-08-16 21:10:24 +0200764 }
765
Robert Richter50a96672012-08-16 21:10:24 +0200766 return 0;
767}
768
769/*
Namhyung Kima8bb5592013-01-22 18:09:31 +0900770 * File format:
771 *
772 * struct group_descs {
773 * u32 nr_groups;
774 * struct group_desc {
775 * char name[];
776 * u32 leader_idx;
777 * u32 nr_members;
778 * }[nr_groups];
779 * };
780 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700781static int write_group_desc(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200782 struct evlist *evlist)
Namhyung Kima8bb5592013-01-22 18:09:31 +0900783{
784 u32 nr_groups = evlist->nr_groups;
Jiri Olsa32dcd022019-07-21 13:23:51 +0200785 struct evsel *evsel;
Namhyung Kima8bb5592013-01-22 18:09:31 +0900786 int ret;
787
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700788 ret = do_write(ff, &nr_groups, sizeof(nr_groups));
Namhyung Kima8bb5592013-01-22 18:09:31 +0900789 if (ret < 0)
790 return ret;
791
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300792 evlist__for_each_entry(evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +0900793 if (perf_evsel__is_group_leader(evsel) &&
Jiri Olsa5643b1a2019-07-21 13:24:46 +0200794 evsel->core.nr_members > 1) {
Namhyung Kima8bb5592013-01-22 18:09:31 +0900795 const char *name = evsel->group_name ?: "{anon_group}";
796 u32 leader_idx = evsel->idx;
Jiri Olsa5643b1a2019-07-21 13:24:46 +0200797 u32 nr_members = evsel->core.nr_members;
Namhyung Kima8bb5592013-01-22 18:09:31 +0900798
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700799 ret = do_write_string(ff, name);
Namhyung Kima8bb5592013-01-22 18:09:31 +0900800 if (ret < 0)
801 return ret;
802
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700803 ret = do_write(ff, &leader_idx, sizeof(leader_idx));
Namhyung Kima8bb5592013-01-22 18:09:31 +0900804 if (ret < 0)
805 return ret;
806
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700807 ret = do_write(ff, &nr_members, sizeof(nr_members));
Namhyung Kima8bb5592013-01-22 18:09:31 +0900808 if (ret < 0)
809 return ret;
810 }
811 }
812 return 0;
813}
814
815/*
Kan Liangf4a07422018-11-21 08:49:39 -0800816 * Return the CPU id as a raw string.
817 *
818 * Each architecture should provide a more precise id string that
819 * can be use to match the architecture's "mapfile".
820 */
821char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
822{
823 return NULL;
824}
825
826/* Return zero when the cpuid from the mapfile.csv matches the
827 * cpuid string generated on this platform.
828 * Otherwise return non-zero.
829 */
830int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
831{
832 regex_t re;
833 regmatch_t pmatch[1];
834 int match;
835
836 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
837 /* Warn unable to generate match particular string. */
838 pr_info("Invalid regular expression %s\n", mapcpuid);
839 return 1;
840 }
841
842 match = !regexec(&re, cpuid, 1, pmatch, 0);
843 regfree(&re);
844 if (match) {
845 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
846
847 /* Verify the entire string matched. */
848 if (match_len == strlen(cpuid))
849 return 0;
850 }
851 return 1;
852}
853
854/*
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200855 * default get_cpuid(): nothing gets recorded
Jiada Wang7a759cd2017-04-09 20:02:37 -0700856 * actual implementation must be in arch/$(SRCARCH)/util/header.c
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200857 */
Rui Teng11d8f872016-07-28 10:05:57 +0800858int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200859{
860 return -1;
861}
862
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700863static int write_cpuid(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200864 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200865{
866 char buffer[64];
867 int ret;
868
869 ret = get_cpuid(buffer, sizeof(buffer));
Jiri Olsaa9aeb872019-02-13 13:32:43 +0100870 if (ret)
871 return -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200872
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700873 return do_write_string(ff, buffer);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200874}
875
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700876static int write_branch_stack(struct feat_fd *ff __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +0200877 struct evlist *evlist __maybe_unused)
Stephane Eranian330aa672012-03-08 23:47:46 +0100878{
879 return 0;
880}
881
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700882static int write_auxtrace(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200883 struct evlist *evlist __maybe_unused)
Adrian Hunter4025ea42015-04-09 18:53:41 +0300884{
Adrian Hunter99fa2982015-04-30 17:37:25 +0300885 struct perf_session *session;
886 int err;
887
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700888 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
889 return -1;
890
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700891 session = container_of(ff->ph, struct perf_session, header);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300892
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700893 err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300894 if (err < 0)
895 pr_err("Failed to write auxtrace index\n");
896 return err;
Adrian Hunter4025ea42015-04-09 18:53:41 +0300897}
898
Alexey Budankovcf790512018-10-09 17:36:24 +0300899static int write_clockid(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200900 struct evlist *evlist __maybe_unused)
Alexey Budankovcf790512018-10-09 17:36:24 +0300901{
902 return do_write(ff, &ff->ph->env.clockid_res_ns,
903 sizeof(ff->ph->env.clockid_res_ns));
904}
905
Jiri Olsa258031c2019-03-08 14:47:39 +0100906static int write_dir_format(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200907 struct evlist *evlist __maybe_unused)
Jiri Olsa258031c2019-03-08 14:47:39 +0100908{
909 struct perf_session *session;
910 struct perf_data *data;
911
912 session = container_of(ff->ph, struct perf_session, header);
913 data = session->data;
914
915 if (WARN_ON(!perf_data__is_dir(data)))
916 return -1;
917
918 return do_write(ff, &data->dir.version, sizeof(data->dir.version));
919}
920
Song Liu606f9722019-03-11 22:30:43 -0700921#ifdef HAVE_LIBBPF_SUPPORT
922static int write_bpf_prog_info(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200923 struct evlist *evlist __maybe_unused)
Song Liu606f9722019-03-11 22:30:43 -0700924{
925 struct perf_env *env = &ff->ph->env;
926 struct rb_root *root;
927 struct rb_node *next;
928 int ret;
929
930 down_read(&env->bpf_progs.lock);
931
932 ret = do_write(ff, &env->bpf_progs.infos_cnt,
933 sizeof(env->bpf_progs.infos_cnt));
934 if (ret < 0)
935 goto out;
936
937 root = &env->bpf_progs.infos;
938 next = rb_first(root);
939 while (next) {
940 struct bpf_prog_info_node *node;
941 size_t len;
942
943 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
944 next = rb_next(&node->rb_node);
945 len = sizeof(struct bpf_prog_info_linear) +
946 node->info_linear->data_len;
947
948 /* before writing to file, translate address to offset */
949 bpf_program__bpil_addr_to_offs(node->info_linear);
950 ret = do_write(ff, node->info_linear, len);
951 /*
952 * translate back to address even when do_write() fails,
953 * so that this function never changes the data.
954 */
955 bpf_program__bpil_offs_to_addr(node->info_linear);
956 if (ret < 0)
957 goto out;
958 }
959out:
960 up_read(&env->bpf_progs.lock);
961 return ret;
962}
963#else // HAVE_LIBBPF_SUPPORT
964static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +0200965 struct evlist *evlist __maybe_unused)
Song Liu606f9722019-03-11 22:30:43 -0700966{
967 return 0;
968}
969#endif // HAVE_LIBBPF_SUPPORT
970
Song Liua70a112312019-03-11 22:30:45 -0700971static int write_bpf_btf(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200972 struct evlist *evlist __maybe_unused)
Song Liua70a112312019-03-11 22:30:45 -0700973{
974 struct perf_env *env = &ff->ph->env;
975 struct rb_root *root;
976 struct rb_node *next;
977 int ret;
978
979 down_read(&env->bpf_progs.lock);
980
981 ret = do_write(ff, &env->bpf_progs.btfs_cnt,
982 sizeof(env->bpf_progs.btfs_cnt));
983
984 if (ret < 0)
985 goto out;
986
987 root = &env->bpf_progs.btfs;
988 next = rb_first(root);
989 while (next) {
990 struct btf_node *node;
991
992 node = rb_entry(next, struct btf_node, rb_node);
993 next = rb_next(&node->rb_node);
994 ret = do_write(ff, &node->id,
995 sizeof(u32) * 2 + node->data_size);
996 if (ret < 0)
997 goto out;
998 }
999out:
1000 up_read(&env->bpf_progs.lock);
1001 return ret;
1002}
1003
Jiri Olsa720e98b2016-02-16 16:01:43 +01001004static int cpu_cache_level__sort(const void *a, const void *b)
1005{
1006 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
1007 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
1008
1009 return cache_a->level - cache_b->level;
1010}
1011
1012static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
1013{
1014 if (a->level != b->level)
1015 return false;
1016
1017 if (a->line_size != b->line_size)
1018 return false;
1019
1020 if (a->sets != b->sets)
1021 return false;
1022
1023 if (a->ways != b->ways)
1024 return false;
1025
1026 if (strcmp(a->type, b->type))
1027 return false;
1028
1029 if (strcmp(a->size, b->size))
1030 return false;
1031
1032 if (strcmp(a->map, b->map))
1033 return false;
1034
1035 return true;
1036}
1037
1038static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1039{
1040 char path[PATH_MAX], file[PATH_MAX];
1041 struct stat st;
1042 size_t len;
1043
1044 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1045 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1046
1047 if (stat(file, &st))
1048 return 1;
1049
1050 scnprintf(file, PATH_MAX, "%s/level", path);
1051 if (sysfs__read_int(file, (int *) &cache->level))
1052 return -1;
1053
1054 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1055 if (sysfs__read_int(file, (int *) &cache->line_size))
1056 return -1;
1057
1058 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1059 if (sysfs__read_int(file, (int *) &cache->sets))
1060 return -1;
1061
1062 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1063 if (sysfs__read_int(file, (int *) &cache->ways))
1064 return -1;
1065
1066 scnprintf(file, PATH_MAX, "%s/type", path);
1067 if (sysfs__read_str(file, &cache->type, &len))
1068 return -1;
1069
1070 cache->type[len] = 0;
Arnaldo Carvalho de Melo13c230a2019-06-26 12:13:13 -03001071 cache->type = strim(cache->type);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001072
1073 scnprintf(file, PATH_MAX, "%s/size", path);
1074 if (sysfs__read_str(file, &cache->size, &len)) {
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001075 zfree(&cache->type);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001076 return -1;
1077 }
1078
1079 cache->size[len] = 0;
Arnaldo Carvalho de Melo13c230a2019-06-26 12:13:13 -03001080 cache->size = strim(cache->size);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001081
1082 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1083 if (sysfs__read_str(file, &cache->map, &len)) {
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001084 zfree(&cache->map);
1085 zfree(&cache->type);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001086 return -1;
1087 }
1088
1089 cache->map[len] = 0;
Arnaldo Carvalho de Melo13c230a2019-06-26 12:13:13 -03001090 cache->map = strim(cache->map);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001091 return 0;
1092}
1093
1094static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1095{
1096 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1097}
1098
1099static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
1100{
1101 u32 i, cnt = 0;
1102 long ncpus;
1103 u32 nr, cpu;
1104 u16 level;
1105
1106 ncpus = sysconf(_SC_NPROCESSORS_CONF);
1107 if (ncpus < 0)
1108 return -1;
1109
1110 nr = (u32)(ncpus & UINT_MAX);
1111
1112 for (cpu = 0; cpu < nr; cpu++) {
1113 for (level = 0; level < 10; level++) {
1114 struct cpu_cache_level c;
1115 int err;
1116
1117 err = cpu_cache_level__read(&c, cpu, level);
1118 if (err < 0)
1119 return err;
1120
1121 if (err == 1)
1122 break;
1123
1124 for (i = 0; i < cnt; i++) {
1125 if (cpu_cache_level__cmp(&c, &caches[i]))
1126 break;
1127 }
1128
1129 if (i == cnt)
1130 caches[cnt++] = c;
1131 else
1132 cpu_cache_level__free(&c);
1133
1134 if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1135 goto out;
1136 }
1137 }
1138 out:
1139 *cntp = cnt;
1140 return 0;
1141}
1142
Kyle Meyerdc841872019-08-27 16:43:51 -05001143#define MAX_CACHE_LVL 4
Jiri Olsa720e98b2016-02-16 16:01:43 +01001144
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001145static int write_cache(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +02001146 struct evlist *evlist __maybe_unused)
Jiri Olsa720e98b2016-02-16 16:01:43 +01001147{
Kyle Meyerdc841872019-08-27 16:43:51 -05001148 u32 max_caches = cpu__max_cpu() * MAX_CACHE_LVL;
1149 struct cpu_cache_level caches[max_caches];
Jiri Olsa720e98b2016-02-16 16:01:43 +01001150 u32 cnt = 0, i, version = 1;
1151 int ret;
1152
Kyle Meyerdc841872019-08-27 16:43:51 -05001153 ret = build_caches(caches, max_caches, &cnt);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001154 if (ret)
1155 goto out;
1156
1157 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1158
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001159 ret = do_write(ff, &version, sizeof(u32));
Jiri Olsa720e98b2016-02-16 16:01:43 +01001160 if (ret < 0)
1161 goto out;
1162
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001163 ret = do_write(ff, &cnt, sizeof(u32));
Jiri Olsa720e98b2016-02-16 16:01:43 +01001164 if (ret < 0)
1165 goto out;
1166
1167 for (i = 0; i < cnt; i++) {
1168 struct cpu_cache_level *c = &caches[i];
1169
1170 #define _W(v) \
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001171 ret = do_write(ff, &c->v, sizeof(u32)); \
Jiri Olsa720e98b2016-02-16 16:01:43 +01001172 if (ret < 0) \
1173 goto out;
1174
1175 _W(level)
1176 _W(line_size)
1177 _W(sets)
1178 _W(ways)
1179 #undef _W
1180
1181 #define _W(v) \
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001182 ret = do_write_string(ff, (const char *) c->v); \
Jiri Olsa720e98b2016-02-16 16:01:43 +01001183 if (ret < 0) \
1184 goto out;
1185
1186 _W(type)
1187 _W(size)
1188 _W(map)
1189 #undef _W
1190 }
1191
1192out:
1193 for (i = 0; i < cnt; i++)
1194 cpu_cache_level__free(&caches[i]);
1195 return ret;
1196}
1197
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001198static int write_stat(struct feat_fd *ff __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +02001199 struct evlist *evlist __maybe_unused)
Jiri Olsaffa517a2015-10-25 15:51:43 +01001200{
1201 return 0;
1202}
1203
Jin Yao60115182017-12-08 21:13:41 +08001204static int write_sample_time(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +02001205 struct evlist *evlist)
Jin Yao60115182017-12-08 21:13:41 +08001206{
1207 int ret;
1208
1209 ret = do_write(ff, &evlist->first_sample_time,
1210 sizeof(evlist->first_sample_time));
1211 if (ret < 0)
1212 return ret;
1213
1214 return do_write(ff, &evlist->last_sample_time,
1215 sizeof(evlist->last_sample_time));
1216}
1217
Jiri Olsae2091ce2018-03-07 16:50:08 +01001218
1219static int memory_node__read(struct memory_node *n, unsigned long idx)
1220{
1221 unsigned int phys, size = 0;
1222 char path[PATH_MAX];
1223 struct dirent *ent;
1224 DIR *dir;
1225
1226#define for_each_memory(mem, dir) \
1227 while ((ent = readdir(dir))) \
1228 if (strcmp(ent->d_name, ".") && \
1229 strcmp(ent->d_name, "..") && \
1230 sscanf(ent->d_name, "memory%u", &mem) == 1)
1231
1232 scnprintf(path, PATH_MAX,
1233 "%s/devices/system/node/node%lu",
1234 sysfs__mountpoint(), idx);
1235
1236 dir = opendir(path);
1237 if (!dir) {
1238 pr_warning("failed: cant' open memory sysfs data\n");
1239 return -1;
1240 }
1241
1242 for_each_memory(phys, dir) {
1243 size = max(phys, size);
1244 }
1245
1246 size++;
1247
1248 n->set = bitmap_alloc(size);
1249 if (!n->set) {
1250 closedir(dir);
1251 return -ENOMEM;
1252 }
1253
Jiri Olsae2091ce2018-03-07 16:50:08 +01001254 n->node = idx;
1255 n->size = size;
1256
1257 rewinddir(dir);
1258
1259 for_each_memory(phys, dir) {
1260 set_bit(phys, n->set);
1261 }
1262
1263 closedir(dir);
1264 return 0;
1265}
1266
1267static int memory_node__sort(const void *a, const void *b)
1268{
1269 const struct memory_node *na = a;
1270 const struct memory_node *nb = b;
1271
1272 return na->node - nb->node;
1273}
1274
1275static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
1276{
1277 char path[PATH_MAX];
1278 struct dirent *ent;
1279 DIR *dir;
1280 u64 cnt = 0;
1281 int ret = 0;
1282
1283 scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1284 sysfs__mountpoint());
1285
1286 dir = opendir(path);
1287 if (!dir) {
Thomas Richter4f75f1cb2018-04-12 15:32:46 +02001288 pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1289 __func__, path);
Jiri Olsae2091ce2018-03-07 16:50:08 +01001290 return -1;
1291 }
1292
1293 while (!ret && (ent = readdir(dir))) {
1294 unsigned int idx;
1295 int r;
1296
1297 if (!strcmp(ent->d_name, ".") ||
1298 !strcmp(ent->d_name, ".."))
1299 continue;
1300
1301 r = sscanf(ent->d_name, "node%u", &idx);
1302 if (r != 1)
1303 continue;
1304
1305 if (WARN_ONCE(cnt >= size,
1306 "failed to write MEM_TOPOLOGY, way too many nodes\n"))
1307 return -1;
1308
1309 ret = memory_node__read(&nodes[cnt++], idx);
1310 }
1311
1312 *cntp = cnt;
1313 closedir(dir);
1314
1315 if (!ret)
1316 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1317
1318 return ret;
1319}
1320
1321#define MAX_MEMORY_NODES 2000
1322
1323/*
1324 * The MEM_TOPOLOGY holds physical memory map for every
1325 * node in system. The format of data is as follows:
1326 *
1327 * 0 - version | for future changes
1328 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1329 * 16 - count | number of nodes
1330 *
1331 * For each node we store map of physical indexes for
1332 * each node:
1333 *
1334 * 32 - node id | node index
1335 * 40 - size | size of bitmap
1336 * 48 - bitmap | bitmap of memory indexes that belongs to node
1337 */
1338static int write_mem_topology(struct feat_fd *ff __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +02001339 struct evlist *evlist __maybe_unused)
Jiri Olsae2091ce2018-03-07 16:50:08 +01001340{
1341 static struct memory_node nodes[MAX_MEMORY_NODES];
1342 u64 bsize, version = 1, i, nr;
1343 int ret;
1344
1345 ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1346 (unsigned long long *) &bsize);
1347 if (ret)
1348 return ret;
1349
1350 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
1351 if (ret)
1352 return ret;
1353
1354 ret = do_write(ff, &version, sizeof(version));
1355 if (ret < 0)
1356 goto out;
1357
1358 ret = do_write(ff, &bsize, sizeof(bsize));
1359 if (ret < 0)
1360 goto out;
1361
1362 ret = do_write(ff, &nr, sizeof(nr));
1363 if (ret < 0)
1364 goto out;
1365
1366 for (i = 0; i < nr; i++) {
1367 struct memory_node *n = &nodes[i];
1368
1369 #define _W(v) \
1370 ret = do_write(ff, &n->v, sizeof(n->v)); \
1371 if (ret < 0) \
1372 goto out;
1373
1374 _W(node)
1375 _W(size)
1376
1377 #undef _W
1378
1379 ret = do_write_bitmap(ff, n->set, n->size);
1380 if (ret < 0)
1381 goto out;
1382 }
1383
1384out:
1385 return ret;
1386}
1387
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001388static int write_compressed(struct feat_fd *ff __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +02001389 struct evlist *evlist __maybe_unused)
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001390{
1391 int ret;
1392
1393 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
1394 if (ret)
1395 return ret;
1396
1397 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
1398 if (ret)
1399 return ret;
1400
1401 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
1402 if (ret)
1403 return ret;
1404
1405 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
1406 if (ret)
1407 return ret;
1408
1409 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
1410}
1411
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001412static void print_hostname(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001413{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001414 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001415}
1416
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001417static void print_osrelease(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001418{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001419 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001420}
1421
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001422static void print_arch(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001423{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001424 fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001425}
1426
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001427static void print_cpudesc(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001428{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001429 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001430}
1431
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001432static void print_nrcpus(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001433{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001434 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1435 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001436}
1437
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001438static void print_version(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001439{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001440 fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001441}
1442
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001443static void print_cmdline(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001444{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001445 int nr, i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001446
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001447 nr = ff->ph->env.nr_cmdline;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001448
1449 fprintf(fp, "# cmdline : ");
1450
Alexey Budankovf92da712018-06-04 09:50:56 +03001451 for (i = 0; i < nr; i++) {
1452 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1453 if (!argv_i) {
1454 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1455 } else {
1456 char *mem = argv_i;
1457 do {
1458 char *quote = strchr(argv_i, '\'');
1459 if (!quote)
1460 break;
1461 *quote++ = '\0';
1462 fprintf(fp, "%s\\\'", argv_i);
1463 argv_i = quote;
1464 } while (1);
1465 fprintf(fp, "%s ", argv_i);
1466 free(mem);
1467 }
1468 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001469 fputc('\n', fp);
1470}
1471
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001472static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001473{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001474 struct perf_header *ph = ff->ph;
1475 int cpu_nr = ph->env.nr_cpus_avail;
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001476 int nr, i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001477 char *str;
1478
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001479 nr = ph->env.nr_sibling_cores;
1480 str = ph->env.sibling_cores;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001481
1482 for (i = 0; i < nr; i++) {
Kan Liange05a8992019-06-04 15:50:43 -07001483 fprintf(fp, "# sibling sockets : %s\n", str);
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001484 str += strlen(str) + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001485 }
1486
Kan Liangacae8b32019-06-04 15:50:41 -07001487 if (ph->env.nr_sibling_dies) {
1488 nr = ph->env.nr_sibling_dies;
1489 str = ph->env.sibling_dies;
1490
1491 for (i = 0; i < nr; i++) {
1492 fprintf(fp, "# sibling dies : %s\n", str);
1493 str += strlen(str) + 1;
1494 }
1495 }
1496
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001497 nr = ph->env.nr_sibling_threads;
1498 str = ph->env.sibling_threads;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001499
1500 for (i = 0; i < nr; i++) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001501 fprintf(fp, "# sibling threads : %s\n", str);
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001502 str += strlen(str) + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001503 }
Kan Liang2bb00d22015-09-01 09:58:12 -04001504
Kan Liangacae8b32019-06-04 15:50:41 -07001505 if (ph->env.nr_sibling_dies) {
1506 if (ph->env.cpu != NULL) {
1507 for (i = 0; i < cpu_nr; i++)
1508 fprintf(fp, "# CPU %d: Core ID %d, "
1509 "Die ID %d, Socket ID %d\n",
1510 i, ph->env.cpu[i].core_id,
1511 ph->env.cpu[i].die_id,
1512 ph->env.cpu[i].socket_id);
1513 } else
1514 fprintf(fp, "# Core ID, Die ID and Socket ID "
1515 "information is not available\n");
1516 } else {
1517 if (ph->env.cpu != NULL) {
1518 for (i = 0; i < cpu_nr; i++)
1519 fprintf(fp, "# CPU %d: Core ID %d, "
1520 "Socket ID %d\n",
1521 i, ph->env.cpu[i].core_id,
1522 ph->env.cpu[i].socket_id);
1523 } else
1524 fprintf(fp, "# Core ID and Socket ID "
1525 "information is not available\n");
1526 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001527}
1528
Alexey Budankovcf790512018-10-09 17:36:24 +03001529static void print_clockid(struct feat_fd *ff, FILE *fp)
1530{
1531 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1532 ff->ph->env.clockid_res_ns * 1000);
1533}
1534
Jiri Olsa258031c2019-03-08 14:47:39 +01001535static void print_dir_format(struct feat_fd *ff, FILE *fp)
1536{
1537 struct perf_session *session;
1538 struct perf_data *data;
1539
1540 session = container_of(ff->ph, struct perf_session, header);
1541 data = session->data;
1542
1543 fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
1544}
1545
Song Liu606f9722019-03-11 22:30:43 -07001546static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
1547{
1548 struct perf_env *env = &ff->ph->env;
1549 struct rb_root *root;
1550 struct rb_node *next;
1551
1552 down_read(&env->bpf_progs.lock);
1553
1554 root = &env->bpf_progs.infos;
1555 next = rb_first(root);
1556
1557 while (next) {
1558 struct bpf_prog_info_node *node;
1559
1560 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1561 next = rb_next(&node->rb_node);
Song Liuf8dfeae2019-03-19 09:54:54 -07001562
1563 bpf_event__print_bpf_prog_info(&node->info_linear->info,
1564 env, fp);
Song Liu606f9722019-03-11 22:30:43 -07001565 }
1566
1567 up_read(&env->bpf_progs.lock);
1568}
1569
Song Liua70a112312019-03-11 22:30:45 -07001570static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
1571{
1572 struct perf_env *env = &ff->ph->env;
1573 struct rb_root *root;
1574 struct rb_node *next;
1575
1576 down_read(&env->bpf_progs.lock);
1577
1578 root = &env->bpf_progs.btfs;
1579 next = rb_first(root);
1580
1581 while (next) {
1582 struct btf_node *node;
1583
1584 node = rb_entry(next, struct btf_node, rb_node);
1585 next = rb_next(&node->rb_node);
1586 fprintf(fp, "# btf info of id %u\n", node->id);
1587 }
1588
1589 up_read(&env->bpf_progs.lock);
1590}
1591
Jiri Olsa32dcd022019-07-21 13:23:51 +02001592static void free_event_desc(struct evsel *events)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001593{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001594 struct evsel *evsel;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001595
1596 if (!events)
1597 return;
1598
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001599 for (evsel = events; evsel->core.attr.size; evsel++) {
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -03001600 zfree(&evsel->name);
1601 zfree(&evsel->id);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001602 }
1603
1604 free(events);
1605}
1606
Jiri Olsa32dcd022019-07-21 13:23:51 +02001607static struct evsel *read_event_desc(struct feat_fd *ff)
Robert Richter4e1b9c62012-08-16 21:10:22 +02001608{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001609 struct evsel *evsel, *events = NULL;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001610 u64 *id;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001611 void *buf = NULL;
Stephane Eranian62db9062012-02-09 23:21:07 +01001612 u32 nre, sz, nr, i, j;
Stephane Eranian62db9062012-02-09 23:21:07 +01001613 size_t msz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001614
1615 /* number of events */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001616 if (do_read_u32(ff, &nre))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001617 goto error;
1618
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001619 if (do_read_u32(ff, &sz))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001620 goto error;
1621
Stephane Eranian62db9062012-02-09 23:21:07 +01001622 /* buffer to hold on file attr struct */
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001623 buf = malloc(sz);
1624 if (!buf)
1625 goto error;
1626
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001627 /* the last event terminates with evsel->core.attr.size == 0: */
Robert Richter4e1b9c62012-08-16 21:10:22 +02001628 events = calloc(nre + 1, sizeof(*events));
1629 if (!events)
1630 goto error;
1631
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001632 msz = sizeof(evsel->core.attr);
Jiri Olsa9fafd982012-03-20 19:15:39 +01001633 if (sz < msz)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001634 msz = sz;
1635
Robert Richter4e1b9c62012-08-16 21:10:22 +02001636 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1637 evsel->idx = i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001638
Stephane Eranian62db9062012-02-09 23:21:07 +01001639 /*
1640 * must read entire on-file attr struct to
1641 * sync up with layout.
1642 */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001643 if (__do_read(ff, buf, sz))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001644 goto error;
1645
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001646 if (ff->ph->needs_swap)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001647 perf_event__attr_swap(buf);
1648
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001649 memcpy(&evsel->core.attr, buf, msz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001650
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001651 if (do_read_u32(ff, &nr))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001652 goto error;
1653
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001654 if (ff->ph->needs_swap)
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001655 evsel->needs_swap = true;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001656
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001657 evsel->name = do_read_string(ff);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07001658 if (!evsel->name)
1659 goto error;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001660
1661 if (!nr)
1662 continue;
1663
1664 id = calloc(nr, sizeof(*id));
1665 if (!id)
1666 goto error;
1667 evsel->ids = nr;
1668 evsel->id = id;
1669
1670 for (j = 0 ; j < nr; j++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001671 if (do_read_u64(ff, id))
Robert Richter4e1b9c62012-08-16 21:10:22 +02001672 goto error;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001673 id++;
1674 }
1675 }
1676out:
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -03001677 free(buf);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001678 return events;
1679error:
Markus Elfring4cc97612015-06-25 17:12:32 +02001680 free_event_desc(events);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001681 events = NULL;
1682 goto out;
1683}
1684
Peter Zijlstra2c5e8c52015-04-07 11:09:54 +02001685static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
Arnaldo Carvalho de Melo03536312017-06-16 12:18:27 -03001686 void *priv __maybe_unused)
Peter Zijlstra2c5e8c52015-04-07 11:09:54 +02001687{
1688 return fprintf(fp, ", %s = %s", name, val);
1689}
1690
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001691static void print_event_desc(struct feat_fd *ff, FILE *fp)
Robert Richter4e1b9c62012-08-16 21:10:22 +02001692{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001693 struct evsel *evsel, *events;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001694 u32 j;
1695 u64 *id;
1696
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07001697 if (ff->events)
1698 events = ff->events;
1699 else
1700 events = read_event_desc(ff);
1701
Robert Richter4e1b9c62012-08-16 21:10:22 +02001702 if (!events) {
1703 fprintf(fp, "# event desc: not available or unable to read\n");
1704 return;
1705 }
1706
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001707 for (evsel = events; evsel->core.attr.size; evsel++) {
Robert Richter4e1b9c62012-08-16 21:10:22 +02001708 fprintf(fp, "# event : name = %s, ", evsel->name);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001709
Robert Richter4e1b9c62012-08-16 21:10:22 +02001710 if (evsel->ids) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001711 fprintf(fp, ", id = {");
Robert Richter4e1b9c62012-08-16 21:10:22 +02001712 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1713 if (j)
1714 fputc(',', fp);
1715 fprintf(fp, " %"PRIu64, *id);
1716 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001717 fprintf(fp, " }");
Robert Richter4e1b9c62012-08-16 21:10:22 +02001718 }
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001719
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001720 perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001721
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001722 fputc('\n', fp);
1723 }
Robert Richter4e1b9c62012-08-16 21:10:22 +02001724
1725 free_event_desc(events);
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07001726 ff->events = NULL;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001727}
1728
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001729static void print_total_mem(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001730{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001731 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001732}
1733
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001734static void print_numa_topology(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001735{
Jiri Olsac60da222016-07-04 14:16:20 +02001736 int i;
1737 struct numa_node *n;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001738
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001739 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1740 n = &ff->ph->env.numa_nodes[i];
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001741
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001742 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1743 " free = %"PRIu64" kB\n",
Jiri Olsac60da222016-07-04 14:16:20 +02001744 n->node, n->mem_total, n->mem_free);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001745
Jiri Olsac60da222016-07-04 14:16:20 +02001746 fprintf(fp, "# node%u cpu list : ", n->node);
1747 cpu_map__fprintf(n->map, fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001748 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001749}
1750
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001751static void print_cpuid(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001752{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001753 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001754}
1755
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001756static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
Stephane Eranian330aa672012-03-08 23:47:46 +01001757{
1758 fprintf(fp, "# contains samples with branch stack\n");
1759}
1760
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001761static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
Adrian Hunter4025ea42015-04-09 18:53:41 +03001762{
1763 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1764}
1765
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001766static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
Jiri Olsaffa517a2015-10-25 15:51:43 +01001767{
1768 fprintf(fp, "# contains stat data\n");
1769}
1770
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001771static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
Jiri Olsa720e98b2016-02-16 16:01:43 +01001772{
1773 int i;
1774
1775 fprintf(fp, "# CPU cache info:\n");
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001776 for (i = 0; i < ff->ph->env.caches_cnt; i++) {
Jiri Olsa720e98b2016-02-16 16:01:43 +01001777 fprintf(fp, "# ");
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001778 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001779 }
1780}
1781
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001782static void print_compressed(struct feat_fd *ff, FILE *fp)
1783{
1784 fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
1785 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
1786 ff->ph->env.comp_level, ff->ph->env.comp_ratio);
1787}
1788
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001789static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
Robert Richter50a96672012-08-16 21:10:24 +02001790{
1791 const char *delimiter = "# pmu mappings: ";
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001792 char *str, *tmp;
Robert Richter50a96672012-08-16 21:10:24 +02001793 u32 pmu_num;
1794 u32 type;
1795
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001796 pmu_num = ff->ph->env.nr_pmu_mappings;
Robert Richter50a96672012-08-16 21:10:24 +02001797 if (!pmu_num) {
1798 fprintf(fp, "# pmu mappings: not available\n");
1799 return;
1800 }
1801
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001802 str = ff->ph->env.pmu_mappings;
Namhyung Kimbe4a2de2012-09-05 14:02:49 +09001803
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001804 while (pmu_num) {
1805 type = strtoul(str, &tmp, 0);
1806 if (*tmp != ':')
1807 goto error;
1808
1809 str = tmp + 1;
1810 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1811
Robert Richter50a96672012-08-16 21:10:24 +02001812 delimiter = ", ";
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001813 str += strlen(str) + 1;
1814 pmu_num--;
Robert Richter50a96672012-08-16 21:10:24 +02001815 }
1816
1817 fprintf(fp, "\n");
1818
1819 if (!pmu_num)
1820 return;
1821error:
1822 fprintf(fp, "# pmu mappings: unable to read\n");
1823}
1824
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001825static void print_group_desc(struct feat_fd *ff, FILE *fp)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001826{
1827 struct perf_session *session;
Jiri Olsa32dcd022019-07-21 13:23:51 +02001828 struct evsel *evsel;
Namhyung Kima8bb5592013-01-22 18:09:31 +09001829 u32 nr = 0;
1830
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001831 session = container_of(ff->ph, struct perf_session, header);
Namhyung Kima8bb5592013-01-22 18:09:31 +09001832
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001833 evlist__for_each_entry(session->evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09001834 if (perf_evsel__is_group_leader(evsel) &&
Jiri Olsa5643b1a2019-07-21 13:24:46 +02001835 evsel->core.nr_members > 1) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09001836 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1837 perf_evsel__name(evsel));
1838
Jiri Olsa5643b1a2019-07-21 13:24:46 +02001839 nr = evsel->core.nr_members - 1;
Namhyung Kima8bb5592013-01-22 18:09:31 +09001840 } else if (nr) {
1841 fprintf(fp, ",%s", perf_evsel__name(evsel));
1842
1843 if (--nr == 0)
1844 fprintf(fp, "}\n");
1845 }
1846 }
1847}
1848
Jin Yao60115182017-12-08 21:13:41 +08001849static void print_sample_time(struct feat_fd *ff, FILE *fp)
1850{
1851 struct perf_session *session;
1852 char time_buf[32];
1853 double d;
1854
1855 session = container_of(ff->ph, struct perf_session, header);
1856
1857 timestamp__scnprintf_usec(session->evlist->first_sample_time,
1858 time_buf, sizeof(time_buf));
1859 fprintf(fp, "# time of first sample : %s\n", time_buf);
1860
1861 timestamp__scnprintf_usec(session->evlist->last_sample_time,
1862 time_buf, sizeof(time_buf));
1863 fprintf(fp, "# time of last sample : %s\n", time_buf);
1864
1865 d = (double)(session->evlist->last_sample_time -
1866 session->evlist->first_sample_time) / NSEC_PER_MSEC;
1867
1868 fprintf(fp, "# sample duration : %10.3f ms\n", d);
1869}
1870
Jiri Olsae2091ce2018-03-07 16:50:08 +01001871static void memory_node__fprintf(struct memory_node *n,
1872 unsigned long long bsize, FILE *fp)
1873{
1874 char buf_map[100], buf_size[50];
1875 unsigned long long size;
1876
1877 size = bsize * bitmap_weight(n->set, n->size);
1878 unit_number__scnprintf(buf_size, 50, size);
1879
1880 bitmap_scnprintf(n->set, n->size, buf_map, 100);
1881 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
1882}
1883
1884static void print_mem_topology(struct feat_fd *ff, FILE *fp)
1885{
1886 struct memory_node *nodes;
1887 int i, nr;
1888
1889 nodes = ff->ph->env.memory_nodes;
1890 nr = ff->ph->env.nr_memory_nodes;
1891
1892 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
1893 nr, ff->ph->env.memory_bsize);
1894
1895 for (i = 0; i < nr; i++) {
1896 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
1897 }
1898}
1899
Jiri Olsa72932372019-08-28 15:57:16 +02001900static int __event_process_build_id(struct perf_record_header_build_id *bev,
Robert Richter08d95bd2012-02-10 15:41:55 +01001901 char *filename,
1902 struct perf_session *session)
1903{
1904 int err = -1;
Robert Richter08d95bd2012-02-10 15:41:55 +01001905 struct machine *machine;
Wang Nan1f121b02015-06-03 08:52:21 +00001906 u16 cpumode;
Robert Richter08d95bd2012-02-10 15:41:55 +01001907 struct dso *dso;
1908 enum dso_kernel_type dso_type;
1909
1910 machine = perf_session__findnew_machine(session, bev->pid);
1911 if (!machine)
1912 goto out;
1913
Wang Nan1f121b02015-06-03 08:52:21 +00001914 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
Robert Richter08d95bd2012-02-10 15:41:55 +01001915
Wang Nan1f121b02015-06-03 08:52:21 +00001916 switch (cpumode) {
Robert Richter08d95bd2012-02-10 15:41:55 +01001917 case PERF_RECORD_MISC_KERNEL:
1918 dso_type = DSO_TYPE_KERNEL;
Robert Richter08d95bd2012-02-10 15:41:55 +01001919 break;
1920 case PERF_RECORD_MISC_GUEST_KERNEL:
1921 dso_type = DSO_TYPE_GUEST_KERNEL;
Robert Richter08d95bd2012-02-10 15:41:55 +01001922 break;
1923 case PERF_RECORD_MISC_USER:
1924 case PERF_RECORD_MISC_GUEST_USER:
1925 dso_type = DSO_TYPE_USER;
Robert Richter08d95bd2012-02-10 15:41:55 +01001926 break;
1927 default:
1928 goto out;
1929 }
1930
Arnaldo Carvalho de Meloaa7cc2a2015-05-29 11:31:12 -03001931 dso = machine__findnew_dso(machine, filename);
Robert Richter08d95bd2012-02-10 15:41:55 +01001932 if (dso != NULL) {
Masami Hiramatsub5d8bbe2016-05-11 22:51:59 +09001933 char sbuild_id[SBUILD_ID_SIZE];
Robert Richter08d95bd2012-02-10 15:41:55 +01001934
1935 dso__set_build_id(dso, &bev->build_id);
1936
Namhyung Kim1deec1b2017-05-31 21:01:03 +09001937 if (dso_type != DSO_TYPE_USER) {
1938 struct kmod_path m = { .name = NULL, };
1939
1940 if (!kmod_path__parse_name(&m, filename) && m.kmod)
Namhyung Kim6b335e82017-05-31 21:01:04 +09001941 dso__set_module_info(dso, &m, machine);
Namhyung Kim1deec1b2017-05-31 21:01:03 +09001942 else
1943 dso->kernel = dso_type;
1944
1945 free(m.name);
1946 }
Robert Richter08d95bd2012-02-10 15:41:55 +01001947
1948 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1949 sbuild_id);
1950 pr_debug("build id event received for %s: %s\n",
1951 dso->long_name, sbuild_id);
Arnaldo Carvalho de Melod3a7c482015-06-02 11:53:26 -03001952 dso__put(dso);
Robert Richter08d95bd2012-02-10 15:41:55 +01001953 }
1954
1955 err = 0;
1956out:
1957 return err;
1958}
1959
1960static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1961 int input, u64 offset, u64 size)
1962{
1963 struct perf_session *session = container_of(header, struct perf_session, header);
1964 struct {
1965 struct perf_event_header header;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03001966 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
Robert Richter08d95bd2012-02-10 15:41:55 +01001967 char filename[0];
1968 } old_bev;
Jiri Olsa72932372019-08-28 15:57:16 +02001969 struct perf_record_header_build_id bev;
Robert Richter08d95bd2012-02-10 15:41:55 +01001970 char filename[PATH_MAX];
1971 u64 limit = offset + size;
1972
1973 while (offset < limit) {
1974 ssize_t len;
1975
Namhyung Kim5323f602012-12-17 15:38:54 +09001976 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
Robert Richter08d95bd2012-02-10 15:41:55 +01001977 return -1;
1978
1979 if (header->needs_swap)
1980 perf_event_header__bswap(&old_bev.header);
1981
1982 len = old_bev.header.size - sizeof(old_bev);
Namhyung Kim5323f602012-12-17 15:38:54 +09001983 if (readn(input, filename, len) != len)
Robert Richter08d95bd2012-02-10 15:41:55 +01001984 return -1;
1985
1986 bev.header = old_bev.header;
1987
1988 /*
1989 * As the pid is the missing value, we need to fill
1990 * it properly. The header.misc value give us nice hint.
1991 */
1992 bev.pid = HOST_KERNEL_ID;
1993 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1994 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1995 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1996
1997 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1998 __event_process_build_id(&bev, filename, session);
1999
2000 offset += bev.header.size;
2001 }
2002
2003 return 0;
2004}
2005
2006static int perf_header__read_build_ids(struct perf_header *header,
2007 int input, u64 offset, u64 size)
2008{
2009 struct perf_session *session = container_of(header, struct perf_session, header);
Jiri Olsa72932372019-08-28 15:57:16 +02002010 struct perf_record_header_build_id bev;
Robert Richter08d95bd2012-02-10 15:41:55 +01002011 char filename[PATH_MAX];
2012 u64 limit = offset + size, orig_offset = offset;
2013 int err = -1;
2014
2015 while (offset < limit) {
2016 ssize_t len;
2017
Namhyung Kim5323f602012-12-17 15:38:54 +09002018 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
Robert Richter08d95bd2012-02-10 15:41:55 +01002019 goto out;
2020
2021 if (header->needs_swap)
2022 perf_event_header__bswap(&bev.header);
2023
2024 len = bev.header.size - sizeof(bev);
Namhyung Kim5323f602012-12-17 15:38:54 +09002025 if (readn(input, filename, len) != len)
Robert Richter08d95bd2012-02-10 15:41:55 +01002026 goto out;
2027 /*
2028 * The a1645ce1 changeset:
2029 *
2030 * "perf: 'perf kvm' tool for monitoring guest performance from host"
2031 *
Jiri Olsa72932372019-08-28 15:57:16 +02002032 * Added a field to struct perf_record_header_build_id that broke the file
Robert Richter08d95bd2012-02-10 15:41:55 +01002033 * format.
2034 *
2035 * Since the kernel build-id is the first entry, process the
2036 * table using the old format if the well known
2037 * '[kernel.kallsyms]' string for the kernel build-id has the
2038 * first 4 characters chopped off (where the pid_t sits).
2039 */
2040 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
2041 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
2042 return -1;
2043 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
2044 }
2045
2046 __event_process_build_id(&bev, filename, session);
2047
2048 offset += bev.header.size;
2049 }
2050 err = 0;
2051out:
2052 return err;
2053}
2054
David Carrillo-Cisnerosdfaa1582017-07-17 21:25:35 -07002055/* Macro for features that simply need to read and store a string. */
2056#define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002057static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
David Carrillo-Cisnerosdfaa1582017-07-17 21:25:35 -07002058{\
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002059 ff->ph->env.__feat_env = do_read_string(ff); \
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002060 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
David Carrillo-Cisnerosdfaa1582017-07-17 21:25:35 -07002061}
2062
2063FEAT_PROCESS_STR_FUN(hostname, hostname);
2064FEAT_PROCESS_STR_FUN(osrelease, os_release);
2065FEAT_PROCESS_STR_FUN(version, version);
2066FEAT_PROCESS_STR_FUN(arch, arch);
2067FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
2068FEAT_PROCESS_STR_FUN(cpuid, cpuid);
2069
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002070static int process_tracing_data(struct feat_fd *ff, void *data)
Robert Richterf1c67db2012-02-10 15:41:56 +01002071{
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002072 ssize_t ret = trace_report(ff->fd, data, false);
2073
Namhyung Kim3dce2ce2013-03-21 16:18:48 +09002074 return ret < 0 ? -1 : 0;
Robert Richterf1c67db2012-02-10 15:41:56 +01002075}
2076
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002077static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
Robert Richterf1c67db2012-02-10 15:41:56 +01002078{
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002079 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
Robert Richterf1c67db2012-02-10 15:41:56 +01002080 pr_debug("Failed to read buildids, continuing...\n");
2081 return 0;
2082}
2083
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002084static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002085{
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002086 int ret;
2087 u32 nr_cpus_avail, nr_cpus_online;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002088
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002089 ret = do_read_u32(ff, &nr_cpus_avail);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002090 if (ret)
2091 return ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002092
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002093 ret = do_read_u32(ff, &nr_cpus_online);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002094 if (ret)
2095 return ret;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002096 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
2097 ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002098 return 0;
2099}
2100
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002101static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002102{
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002103 u64 total_mem;
2104 int ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002105
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002106 ret = do_read_u64(ff, &total_mem);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002107 if (ret)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002108 return -1;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002109 ff->ph->env.total_mem = (unsigned long long)total_mem;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002110 return 0;
2111}
2112
Jiri Olsa32dcd022019-07-21 13:23:51 +02002113static struct evsel *
Jiri Olsa63503db2019-07-21 13:23:52 +02002114perf_evlist__find_by_index(struct evlist *evlist, int idx)
Robert Richter7c2f7af2012-08-16 21:10:23 +02002115{
Jiri Olsa32dcd022019-07-21 13:23:51 +02002116 struct evsel *evsel;
Robert Richter7c2f7af2012-08-16 21:10:23 +02002117
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002118 evlist__for_each_entry(evlist, evsel) {
Robert Richter7c2f7af2012-08-16 21:10:23 +02002119 if (evsel->idx == idx)
2120 return evsel;
2121 }
2122
2123 return NULL;
2124}
2125
2126static void
Jiri Olsa63503db2019-07-21 13:23:52 +02002127perf_evlist__set_event_name(struct evlist *evlist,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002128 struct evsel *event)
Robert Richter7c2f7af2012-08-16 21:10:23 +02002129{
Jiri Olsa32dcd022019-07-21 13:23:51 +02002130 struct evsel *evsel;
Robert Richter7c2f7af2012-08-16 21:10:23 +02002131
2132 if (!event->name)
2133 return;
2134
2135 evsel = perf_evlist__find_by_index(evlist, event->idx);
2136 if (!evsel)
2137 return;
2138
2139 if (evsel->name)
2140 return;
2141
2142 evsel->name = strdup(event->name);
2143}
2144
2145static int
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002146process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
Robert Richter7c2f7af2012-08-16 21:10:23 +02002147{
Namhyung Kim3d7eb862012-09-24 17:15:01 +09002148 struct perf_session *session;
Jiri Olsa32dcd022019-07-21 13:23:51 +02002149 struct evsel *evsel, *events = read_event_desc(ff);
Robert Richter7c2f7af2012-08-16 21:10:23 +02002150
2151 if (!events)
2152 return 0;
2153
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002154 session = container_of(ff->ph, struct perf_session, header);
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07002155
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002156 if (session->data->is_pipe) {
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07002157 /* Save events for reading later by print_event_desc,
2158 * since they can't be read again in pipe mode. */
2159 ff->events = events;
2160 }
2161
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002162 for (evsel = events; evsel->core.attr.size; evsel++)
Robert Richter7c2f7af2012-08-16 21:10:23 +02002163 perf_evlist__set_event_name(session->evlist, evsel);
2164
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002165 if (!session->data->is_pipe)
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07002166 free_event_desc(events);
Robert Richter7c2f7af2012-08-16 21:10:23 +02002167
2168 return 0;
2169}
2170
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002171static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002172{
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002173 char *str, *cmdline = NULL, **argv = NULL;
2174 u32 nr, i, len = 0;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002175
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002176 if (do_read_u32(ff, &nr))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002177 return -1;
2178
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002179 ff->ph->env.nr_cmdline = nr;
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002180
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002181 cmdline = zalloc(ff->size + nr + 1);
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002182 if (!cmdline)
2183 return -1;
2184
2185 argv = zalloc(sizeof(char *) * (nr + 1));
2186 if (!argv)
2187 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002188
2189 for (i = 0; i < nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002190 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002191 if (!str)
2192 goto error;
2193
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002194 argv[i] = cmdline + len;
2195 memcpy(argv[i], str, strlen(str) + 1);
2196 len += strlen(str) + 1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002197 free(str);
2198 }
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002199 ff->ph->env.cmdline = cmdline;
2200 ff->ph->env.cmdline_argv = (const char **) argv;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002201 return 0;
2202
2203error:
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002204 free(argv);
2205 free(cmdline);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002206 return -1;
2207}
2208
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002209static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002210{
Namhyung Kima1ae5652012-09-24 17:14:59 +09002211 u32 nr, i;
2212 char *str;
2213 struct strbuf sb;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002214 int cpu_nr = ff->ph->env.nr_cpus_avail;
Kan Liang2bb00d22015-09-01 09:58:12 -04002215 u64 size = 0;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002216 struct perf_header *ph = ff->ph;
Thomas Richter01766222018-06-11 09:31:52 +02002217 bool do_core_id_test = true;
Kan Liang2bb00d22015-09-01 09:58:12 -04002218
2219 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
2220 if (!ph->env.cpu)
2221 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002222
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002223 if (do_read_u32(ff, &nr))
Kan Liang2bb00d22015-09-01 09:58:12 -04002224 goto free_cpu;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002225
Namhyung Kima1ae5652012-09-24 17:14:59 +09002226 ph->env.nr_sibling_cores = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04002227 size += sizeof(u32);
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002228 if (strbuf_init(&sb, 128) < 0)
2229 goto free_cpu;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002230
2231 for (i = 0; i < nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002232 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002233 if (!str)
2234 goto error;
2235
2236 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002237 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2238 goto error;
Kan Liang2bb00d22015-09-01 09:58:12 -04002239 size += string_size(str);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002240 free(str);
2241 }
2242 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
2243
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002244 if (do_read_u32(ff, &nr))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002245 return -1;
2246
Namhyung Kima1ae5652012-09-24 17:14:59 +09002247 ph->env.nr_sibling_threads = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04002248 size += sizeof(u32);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002249
2250 for (i = 0; i < nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002251 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002252 if (!str)
2253 goto error;
2254
2255 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002256 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2257 goto error;
Kan Liang2bb00d22015-09-01 09:58:12 -04002258 size += string_size(str);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002259 free(str);
2260 }
2261 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
Kan Liang2bb00d22015-09-01 09:58:12 -04002262
2263 /*
2264 * The header may be from old perf,
2265 * which doesn't include core id and socket id information.
2266 */
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002267 if (ff->size <= size) {
Kan Liang2bb00d22015-09-01 09:58:12 -04002268 zfree(&ph->env.cpu);
2269 return 0;
2270 }
2271
Thomas Richter01766222018-06-11 09:31:52 +02002272 /* On s390 the socket_id number is not related to the numbers of cpus.
2273 * The socket_id number might be higher than the numbers of cpus.
2274 * This depends on the configuration.
Tan Xiaojun0a4d8fb2019-08-02 11:48:57 +08002275 * AArch64 is the same.
Thomas Richter01766222018-06-11 09:31:52 +02002276 */
Tan Xiaojun0a4d8fb2019-08-02 11:48:57 +08002277 if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
2278 || !strncmp(ph->env.arch, "aarch64", 7)))
Thomas Richter01766222018-06-11 09:31:52 +02002279 do_core_id_test = false;
2280
Kan Liang2bb00d22015-09-01 09:58:12 -04002281 for (i = 0; i < (u32)cpu_nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002282 if (do_read_u32(ff, &nr))
Kan Liang2bb00d22015-09-01 09:58:12 -04002283 goto free_cpu;
2284
Kan Liang2bb00d22015-09-01 09:58:12 -04002285 ph->env.cpu[i].core_id = nr;
Kan Liangacae8b32019-06-04 15:50:41 -07002286 size += sizeof(u32);
Kan Liang2bb00d22015-09-01 09:58:12 -04002287
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002288 if (do_read_u32(ff, &nr))
Kan Liang2bb00d22015-09-01 09:58:12 -04002289 goto free_cpu;
2290
Thomas Richter01766222018-06-11 09:31:52 +02002291 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
Kan Liang2bb00d22015-09-01 09:58:12 -04002292 pr_debug("socket_id number is too big."
2293 "You may need to upgrade the perf tool.\n");
2294 goto free_cpu;
2295 }
2296
2297 ph->env.cpu[i].socket_id = nr;
Kan Liangacae8b32019-06-04 15:50:41 -07002298 size += sizeof(u32);
2299 }
2300
2301 /*
2302 * The header may be from old perf,
2303 * which doesn't include die information.
2304 */
2305 if (ff->size <= size)
2306 return 0;
2307
2308 if (do_read_u32(ff, &nr))
2309 return -1;
2310
2311 ph->env.nr_sibling_dies = nr;
2312 size += sizeof(u32);
2313
2314 for (i = 0; i < nr; i++) {
2315 str = do_read_string(ff);
2316 if (!str)
2317 goto error;
2318
2319 /* include a NULL character at the end */
2320 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2321 goto error;
2322 size += string_size(str);
2323 free(str);
2324 }
2325 ph->env.sibling_dies = strbuf_detach(&sb, NULL);
2326
2327 for (i = 0; i < (u32)cpu_nr; i++) {
2328 if (do_read_u32(ff, &nr))
2329 goto free_cpu;
2330
2331 ph->env.cpu[i].die_id = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04002332 }
2333
Namhyung Kima1ae5652012-09-24 17:14:59 +09002334 return 0;
2335
2336error:
2337 strbuf_release(&sb);
Kan Liang2bb00d22015-09-01 09:58:12 -04002338free_cpu:
2339 zfree(&ph->env.cpu);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002340 return -1;
2341}
2342
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002343static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002344{
Jiri Olsac60da222016-07-04 14:16:20 +02002345 struct numa_node *nodes, *n;
Jiri Olsac60da222016-07-04 14:16:20 +02002346 u32 nr, i;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002347 char *str;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002348
2349 /* nr nodes */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002350 if (do_read_u32(ff, &nr))
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002351 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002352
Jiri Olsac60da222016-07-04 14:16:20 +02002353 nodes = zalloc(sizeof(*nodes) * nr);
2354 if (!nodes)
2355 return -ENOMEM;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002356
2357 for (i = 0; i < nr; i++) {
Jiri Olsac60da222016-07-04 14:16:20 +02002358 n = &nodes[i];
2359
Namhyung Kima1ae5652012-09-24 17:14:59 +09002360 /* node number */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002361 if (do_read_u32(ff, &n->node))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002362 goto error;
2363
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002364 if (do_read_u64(ff, &n->mem_total))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002365 goto error;
2366
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002367 if (do_read_u64(ff, &n->mem_free))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002368 goto error;
2369
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002370 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002371 if (!str)
2372 goto error;
2373
Jiri Olsa9c3516d2019-07-21 13:24:30 +02002374 n->map = perf_cpu_map__new(str);
Jiri Olsac60da222016-07-04 14:16:20 +02002375 if (!n->map)
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002376 goto error;
Jiri Olsac60da222016-07-04 14:16:20 +02002377
Namhyung Kima1ae5652012-09-24 17:14:59 +09002378 free(str);
2379 }
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002380 ff->ph->env.nr_numa_nodes = nr;
2381 ff->ph->env.numa_nodes = nodes;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002382 return 0;
2383
2384error:
Jiri Olsac60da222016-07-04 14:16:20 +02002385 free(nodes);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002386 return -1;
2387}
2388
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002389static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002390{
Namhyung Kima1ae5652012-09-24 17:14:59 +09002391 char *name;
2392 u32 pmu_num;
2393 u32 type;
2394 struct strbuf sb;
2395
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002396 if (do_read_u32(ff, &pmu_num))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002397 return -1;
2398
Namhyung Kima1ae5652012-09-24 17:14:59 +09002399 if (!pmu_num) {
2400 pr_debug("pmu mappings not available\n");
2401 return 0;
2402 }
2403
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002404 ff->ph->env.nr_pmu_mappings = pmu_num;
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002405 if (strbuf_init(&sb, 128) < 0)
2406 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002407
2408 while (pmu_num) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002409 if (do_read_u32(ff, &type))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002410 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002411
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002412 name = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002413 if (!name)
2414 goto error;
2415
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002416 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2417 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002418 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002419 if (strbuf_add(&sb, "", 1) < 0)
2420 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002421
Kan Liange0838e02015-09-10 11:03:05 -03002422 if (!strcmp(name, "msr"))
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002423 ff->ph->env.msr_pmu_type = type;
Kan Liange0838e02015-09-10 11:03:05 -03002424
Namhyung Kima1ae5652012-09-24 17:14:59 +09002425 free(name);
2426 pmu_num--;
2427 }
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002428 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002429 return 0;
2430
2431error:
2432 strbuf_release(&sb);
2433 return -1;
2434}
2435
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002436static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima8bb5592013-01-22 18:09:31 +09002437{
2438 size_t ret = -1;
2439 u32 i, nr, nr_groups;
2440 struct perf_session *session;
Jiri Olsa32dcd022019-07-21 13:23:51 +02002441 struct evsel *evsel, *leader = NULL;
Namhyung Kima8bb5592013-01-22 18:09:31 +09002442 struct group_desc {
2443 char *name;
2444 u32 leader_idx;
2445 u32 nr_members;
2446 } *desc;
2447
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002448 if (do_read_u32(ff, &nr_groups))
Namhyung Kima8bb5592013-01-22 18:09:31 +09002449 return -1;
2450
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002451 ff->ph->env.nr_groups = nr_groups;
Namhyung Kima8bb5592013-01-22 18:09:31 +09002452 if (!nr_groups) {
2453 pr_debug("group desc not available\n");
2454 return 0;
2455 }
2456
2457 desc = calloc(nr_groups, sizeof(*desc));
2458 if (!desc)
2459 return -1;
2460
2461 for (i = 0; i < nr_groups; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002462 desc[i].name = do_read_string(ff);
Namhyung Kima8bb5592013-01-22 18:09:31 +09002463 if (!desc[i].name)
2464 goto out_free;
2465
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002466 if (do_read_u32(ff, &desc[i].leader_idx))
Namhyung Kima8bb5592013-01-22 18:09:31 +09002467 goto out_free;
2468
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002469 if (do_read_u32(ff, &desc[i].nr_members))
Namhyung Kima8bb5592013-01-22 18:09:31 +09002470 goto out_free;
Namhyung Kima8bb5592013-01-22 18:09:31 +09002471 }
2472
2473 /*
2474 * Rebuild group relationship based on the group_desc
2475 */
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002476 session = container_of(ff->ph, struct perf_session, header);
Namhyung Kima8bb5592013-01-22 18:09:31 +09002477 session->evlist->nr_groups = nr_groups;
2478
2479 i = nr = 0;
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002480 evlist__for_each_entry(session->evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09002481 if (evsel->idx == (int) desc[i].leader_idx) {
2482 evsel->leader = evsel;
2483 /* {anon_group} is a dummy name */
Namhyung Kim210e8122013-11-18 11:20:43 +09002484 if (strcmp(desc[i].name, "{anon_group}")) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09002485 evsel->group_name = desc[i].name;
Namhyung Kim210e8122013-11-18 11:20:43 +09002486 desc[i].name = NULL;
2487 }
Jiri Olsa5643b1a2019-07-21 13:24:46 +02002488 evsel->core.nr_members = desc[i].nr_members;
Namhyung Kima8bb5592013-01-22 18:09:31 +09002489
2490 if (i >= nr_groups || nr > 0) {
2491 pr_debug("invalid group desc\n");
2492 goto out_free;
2493 }
2494
2495 leader = evsel;
Jiri Olsa5643b1a2019-07-21 13:24:46 +02002496 nr = evsel->core.nr_members - 1;
Namhyung Kima8bb5592013-01-22 18:09:31 +09002497 i++;
2498 } else if (nr) {
2499 /* This is a group member */
2500 evsel->leader = leader;
2501
2502 nr--;
2503 }
2504 }
2505
2506 if (i != nr_groups || nr != 0) {
2507 pr_debug("invalid group desc\n");
2508 goto out_free;
2509 }
2510
2511 ret = 0;
2512out_free:
Namhyung Kim50a27402013-11-18 11:20:44 +09002513 for (i = 0; i < nr_groups; i++)
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -03002514 zfree(&desc[i].name);
Namhyung Kima8bb5592013-01-22 18:09:31 +09002515 free(desc);
2516
2517 return ret;
2518}
2519
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002520static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
Adrian Hunter99fa2982015-04-30 17:37:25 +03002521{
2522 struct perf_session *session;
2523 int err;
2524
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002525 session = container_of(ff->ph, struct perf_session, header);
Adrian Hunter99fa2982015-04-30 17:37:25 +03002526
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002527 err = auxtrace_index__process(ff->fd, ff->size, session,
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002528 ff->ph->needs_swap);
Adrian Hunter99fa2982015-04-30 17:37:25 +03002529 if (err < 0)
2530 pr_err("Failed to process auxtrace index\n");
2531 return err;
2532}
2533
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002534static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
Jiri Olsa720e98b2016-02-16 16:01:43 +01002535{
2536 struct cpu_cache_level *caches;
2537 u32 cnt, i, version;
2538
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002539 if (do_read_u32(ff, &version))
Jiri Olsa720e98b2016-02-16 16:01:43 +01002540 return -1;
2541
Jiri Olsa720e98b2016-02-16 16:01:43 +01002542 if (version != 1)
2543 return -1;
2544
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002545 if (do_read_u32(ff, &cnt))
Jiri Olsa720e98b2016-02-16 16:01:43 +01002546 return -1;
2547
Jiri Olsa720e98b2016-02-16 16:01:43 +01002548 caches = zalloc(sizeof(*caches) * cnt);
2549 if (!caches)
2550 return -1;
2551
2552 for (i = 0; i < cnt; i++) {
2553 struct cpu_cache_level c;
2554
2555 #define _R(v) \
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002556 if (do_read_u32(ff, &c.v))\
Jiri Olsa720e98b2016-02-16 16:01:43 +01002557 goto out_free_caches; \
Jiri Olsa720e98b2016-02-16 16:01:43 +01002558
2559 _R(level)
2560 _R(line_size)
2561 _R(sets)
2562 _R(ways)
2563 #undef _R
2564
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002565 #define _R(v) \
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002566 c.v = do_read_string(ff); \
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002567 if (!c.v) \
Jiri Olsa720e98b2016-02-16 16:01:43 +01002568 goto out_free_caches;
2569
2570 _R(type)
2571 _R(size)
2572 _R(map)
2573 #undef _R
2574
2575 caches[i] = c;
2576 }
2577
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002578 ff->ph->env.caches = caches;
2579 ff->ph->env.caches_cnt = cnt;
Jiri Olsa720e98b2016-02-16 16:01:43 +01002580 return 0;
2581out_free_caches:
2582 free(caches);
2583 return -1;
2584}
2585
Jin Yao60115182017-12-08 21:13:41 +08002586static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2587{
2588 struct perf_session *session;
2589 u64 first_sample_time, last_sample_time;
2590 int ret;
2591
2592 session = container_of(ff->ph, struct perf_session, header);
2593
2594 ret = do_read_u64(ff, &first_sample_time);
2595 if (ret)
2596 return -1;
2597
2598 ret = do_read_u64(ff, &last_sample_time);
2599 if (ret)
2600 return -1;
2601
2602 session->evlist->first_sample_time = first_sample_time;
2603 session->evlist->last_sample_time = last_sample_time;
2604 return 0;
2605}
2606
Jiri Olsae2091ce2018-03-07 16:50:08 +01002607static int process_mem_topology(struct feat_fd *ff,
2608 void *data __maybe_unused)
2609{
2610 struct memory_node *nodes;
2611 u64 version, i, nr, bsize;
2612 int ret = -1;
2613
2614 if (do_read_u64(ff, &version))
2615 return -1;
2616
2617 if (version != 1)
2618 return -1;
2619
2620 if (do_read_u64(ff, &bsize))
2621 return -1;
2622
2623 if (do_read_u64(ff, &nr))
2624 return -1;
2625
2626 nodes = zalloc(sizeof(*nodes) * nr);
2627 if (!nodes)
2628 return -1;
2629
2630 for (i = 0; i < nr; i++) {
2631 struct memory_node n;
2632
2633 #define _R(v) \
2634 if (do_read_u64(ff, &n.v)) \
2635 goto out; \
2636
2637 _R(node)
2638 _R(size)
2639
2640 #undef _R
2641
2642 if (do_read_bitmap(ff, &n.set, &n.size))
2643 goto out;
2644
2645 nodes[i] = n;
2646 }
2647
2648 ff->ph->env.memory_bsize = bsize;
2649 ff->ph->env.memory_nodes = nodes;
2650 ff->ph->env.nr_memory_nodes = nr;
2651 ret = 0;
2652
2653out:
2654 if (ret)
2655 free(nodes);
2656 return ret;
2657}
2658
Alexey Budankovcf790512018-10-09 17:36:24 +03002659static int process_clockid(struct feat_fd *ff,
2660 void *data __maybe_unused)
2661{
2662 if (do_read_u64(ff, &ff->ph->env.clockid_res_ns))
2663 return -1;
2664
2665 return 0;
2666}
2667
Jiri Olsa258031c2019-03-08 14:47:39 +01002668static int process_dir_format(struct feat_fd *ff,
2669 void *_data __maybe_unused)
2670{
2671 struct perf_session *session;
2672 struct perf_data *data;
2673
2674 session = container_of(ff->ph, struct perf_session, header);
2675 data = session->data;
2676
2677 if (WARN_ON(!perf_data__is_dir(data)))
2678 return -1;
2679
2680 return do_read_u64(ff, &data->dir.version);
2681}
2682
Song Liu606f9722019-03-11 22:30:43 -07002683#ifdef HAVE_LIBBPF_SUPPORT
2684static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
2685{
2686 struct bpf_prog_info_linear *info_linear;
2687 struct bpf_prog_info_node *info_node;
2688 struct perf_env *env = &ff->ph->env;
2689 u32 count, i;
2690 int err = -1;
2691
2692 if (ff->ph->needs_swap) {
2693 pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
2694 return 0;
2695 }
2696
2697 if (do_read_u32(ff, &count))
2698 return -1;
2699
2700 down_write(&env->bpf_progs.lock);
2701
2702 for (i = 0; i < count; ++i) {
2703 u32 info_len, data_len;
2704
2705 info_linear = NULL;
2706 info_node = NULL;
2707 if (do_read_u32(ff, &info_len))
2708 goto out;
2709 if (do_read_u32(ff, &data_len))
2710 goto out;
2711
2712 if (info_len > sizeof(struct bpf_prog_info)) {
2713 pr_warning("detected invalid bpf_prog_info\n");
2714 goto out;
2715 }
2716
2717 info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
2718 data_len);
2719 if (!info_linear)
2720 goto out;
2721 info_linear->info_len = sizeof(struct bpf_prog_info);
2722 info_linear->data_len = data_len;
2723 if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
2724 goto out;
2725 if (__do_read(ff, &info_linear->info, info_len))
2726 goto out;
2727 if (info_len < sizeof(struct bpf_prog_info))
2728 memset(((void *)(&info_linear->info)) + info_len, 0,
2729 sizeof(struct bpf_prog_info) - info_len);
2730
2731 if (__do_read(ff, info_linear->data, data_len))
2732 goto out;
2733
2734 info_node = malloc(sizeof(struct bpf_prog_info_node));
2735 if (!info_node)
2736 goto out;
2737
2738 /* after reading from file, translate offset to address */
2739 bpf_program__bpil_offs_to_addr(info_linear);
2740 info_node->info_linear = info_linear;
2741 perf_env__insert_bpf_prog_info(env, info_node);
2742 }
2743
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002744 up_write(&env->bpf_progs.lock);
Song Liu606f9722019-03-11 22:30:43 -07002745 return 0;
2746out:
2747 free(info_linear);
2748 free(info_node);
2749 up_write(&env->bpf_progs.lock);
2750 return err;
2751}
2752#else // HAVE_LIBBPF_SUPPORT
2753static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
2754{
2755 return 0;
2756}
2757#endif // HAVE_LIBBPF_SUPPORT
2758
Song Liua70a112312019-03-11 22:30:45 -07002759static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
2760{
2761 struct perf_env *env = &ff->ph->env;
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002762 struct btf_node *node = NULL;
Song Liua70a112312019-03-11 22:30:45 -07002763 u32 count, i;
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002764 int err = -1;
Song Liua70a112312019-03-11 22:30:45 -07002765
2766 if (ff->ph->needs_swap) {
2767 pr_warning("interpreting btf from systems with endianity is not yet supported\n");
2768 return 0;
2769 }
2770
2771 if (do_read_u32(ff, &count))
2772 return -1;
2773
2774 down_write(&env->bpf_progs.lock);
2775
2776 for (i = 0; i < count; ++i) {
Song Liua70a112312019-03-11 22:30:45 -07002777 u32 id, data_size;
2778
2779 if (do_read_u32(ff, &id))
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002780 goto out;
Song Liua70a112312019-03-11 22:30:45 -07002781 if (do_read_u32(ff, &data_size))
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002782 goto out;
Song Liua70a112312019-03-11 22:30:45 -07002783
2784 node = malloc(sizeof(struct btf_node) + data_size);
2785 if (!node)
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002786 goto out;
Song Liua70a112312019-03-11 22:30:45 -07002787
2788 node->id = id;
2789 node->data_size = data_size;
2790
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002791 if (__do_read(ff, node->data, data_size))
2792 goto out;
Song Liua70a112312019-03-11 22:30:45 -07002793
2794 perf_env__insert_btf(env, node);
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002795 node = NULL;
Song Liua70a112312019-03-11 22:30:45 -07002796 }
2797
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002798 err = 0;
2799out:
Song Liua70a112312019-03-11 22:30:45 -07002800 up_write(&env->bpf_progs.lock);
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002801 free(node);
2802 return err;
Song Liua70a112312019-03-11 22:30:45 -07002803}
2804
Alexey Budankov42e1fd82019-03-18 20:41:33 +03002805static int process_compressed(struct feat_fd *ff,
2806 void *data __maybe_unused)
2807{
2808 if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
2809 return -1;
2810
2811 if (do_read_u32(ff, &(ff->ph->env.comp_type)))
2812 return -1;
2813
2814 if (do_read_u32(ff, &(ff->ph->env.comp_level)))
2815 return -1;
2816
2817 if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
2818 return -1;
2819
2820 if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
2821 return -1;
2822
2823 return 0;
2824}
2825
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002826struct feature_ops {
Jiri Olsa63503db2019-07-21 13:23:52 +02002827 int (*write)(struct feat_fd *ff, struct evlist *evlist);
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002828 void (*print)(struct feat_fd *ff, FILE *fp);
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002829 int (*process)(struct feat_fd *ff, void *data);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002830 const char *name;
2831 bool full_only;
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002832 bool synthesize;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002833};
2834
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002835#define FEAT_OPR(n, func, __full_only) \
2836 [HEADER_##n] = { \
2837 .name = __stringify(n), \
2838 .write = write_##func, \
2839 .print = print_##func, \
2840 .full_only = __full_only, \
2841 .process = process_##func, \
2842 .synthesize = true \
2843 }
2844
2845#define FEAT_OPN(n, func, __full_only) \
2846 [HEADER_##n] = { \
2847 .name = __stringify(n), \
2848 .write = write_##func, \
2849 .print = print_##func, \
2850 .full_only = __full_only, \
2851 .process = process_##func \
2852 }
Robert Richter8cdfa782011-12-07 10:02:56 +01002853
2854/* feature_ops not implemented: */
Stephane Eranian2eeaaa02012-05-15 13:28:13 +02002855#define print_tracing_data NULL
2856#define print_build_id NULL
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002857
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002858#define process_branch_stack NULL
2859#define process_stat NULL
2860
2861
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002862static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002863 FEAT_OPN(TRACING_DATA, tracing_data, false),
2864 FEAT_OPN(BUILD_ID, build_id, false),
2865 FEAT_OPR(HOSTNAME, hostname, false),
2866 FEAT_OPR(OSRELEASE, osrelease, false),
2867 FEAT_OPR(VERSION, version, false),
2868 FEAT_OPR(ARCH, arch, false),
2869 FEAT_OPR(NRCPUS, nrcpus, false),
2870 FEAT_OPR(CPUDESC, cpudesc, false),
2871 FEAT_OPR(CPUID, cpuid, false),
2872 FEAT_OPR(TOTAL_MEM, total_mem, false),
2873 FEAT_OPR(EVENT_DESC, event_desc, false),
2874 FEAT_OPR(CMDLINE, cmdline, false),
2875 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
2876 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
2877 FEAT_OPN(BRANCH_STACK, branch_stack, false),
2878 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
Jiri Olsae8fedff2018-07-12 15:52:02 +02002879 FEAT_OPR(GROUP_DESC, group_desc, false),
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002880 FEAT_OPN(AUXTRACE, auxtrace, false),
2881 FEAT_OPN(STAT, stat, false),
2882 FEAT_OPN(CACHE, cache, true),
Jin Yao60115182017-12-08 21:13:41 +08002883 FEAT_OPR(SAMPLE_TIME, sample_time, false),
Jiri Olsae2091ce2018-03-07 16:50:08 +01002884 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
Jiri Olsa258031c2019-03-08 14:47:39 +01002885 FEAT_OPR(CLOCKID, clockid, false),
Song Liu606f9722019-03-11 22:30:43 -07002886 FEAT_OPN(DIR_FORMAT, dir_format, false),
Song Liua70a112312019-03-11 22:30:45 -07002887 FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
2888 FEAT_OPR(BPF_BTF, bpf_btf, false),
Alexey Budankov42e1fd82019-03-18 20:41:33 +03002889 FEAT_OPR(COMPRESSED, compressed, false),
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002890};
2891
2892struct header_print_data {
2893 FILE *fp;
2894 bool full; /* extended list of headers */
2895};
2896
2897static int perf_file_section__fprintf_info(struct perf_file_section *section,
2898 struct perf_header *ph,
2899 int feat, int fd, void *data)
2900{
2901 struct header_print_data *hd = data;
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002902 struct feat_fd ff;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002903
2904 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2905 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2906 "%d, continuing...\n", section->offset, feat);
2907 return 0;
2908 }
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002909 if (feat >= HEADER_LAST_FEATURE) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002910 pr_warning("unknown feature %d\n", feat);
Robert Richterf7a8a132011-12-07 10:02:51 +01002911 return 0;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002912 }
2913 if (!feat_ops[feat].print)
2914 return 0;
2915
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002916 ff = (struct feat_fd) {
2917 .fd = fd,
2918 .ph = ph,
2919 };
2920
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002921 if (!feat_ops[feat].full_only || hd->full)
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002922 feat_ops[feat].print(&ff, hd->fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002923 else
2924 fprintf(hd->fp, "# %s info available, use -I to display\n",
2925 feat_ops[feat].name);
2926
2927 return 0;
2928}
2929
2930int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2931{
2932 struct header_print_data hd;
2933 struct perf_header *header = &session->header;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002934 int fd = perf_data__fd(session->data);
Jiri Olsaf45f5612016-10-10 09:03:07 +02002935 struct stat st;
Arnaldo Carvalho de Melo0afcf292018-12-11 16:11:54 -03002936 time_t stctime;
Jiri Olsaaabae162016-10-10 09:35:50 +02002937 int ret, bit;
Jiri Olsaf45f5612016-10-10 09:03:07 +02002938
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002939 hd.fp = fp;
2940 hd.full = full;
2941
Jiri Olsaf45f5612016-10-10 09:03:07 +02002942 ret = fstat(fd, &st);
2943 if (ret == -1)
2944 return -1;
2945
Arnaldo Carvalho de Melo0afcf292018-12-11 16:11:54 -03002946 stctime = st.st_ctime;
2947 fprintf(fp, "# captured on : %s", ctime(&stctime));
Jiri Olsae971a5a2018-03-07 16:50:03 +01002948
2949 fprintf(fp, "# header version : %u\n", header->version);
2950 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
2951 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
2952 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
Jiri Olsaf45f5612016-10-10 09:03:07 +02002953
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002954 perf_header__process_sections(header, fd, &hd,
2955 perf_file_section__fprintf_info);
Jiri Olsaaabae162016-10-10 09:35:50 +02002956
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002957 if (session->data->is_pipe)
David Carrillo-Cisnerosc9d1c932017-04-10 13:14:32 -07002958 return 0;
2959
Jiri Olsaaabae162016-10-10 09:35:50 +02002960 fprintf(fp, "# missing features: ");
2961 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2962 if (bit)
2963 fprintf(fp, "%s ", feat_ops[bit].name);
2964 }
2965
2966 fprintf(fp, "\n");
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002967 return 0;
2968}
2969
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002970static int do_write_feat(struct feat_fd *ff, int type,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002971 struct perf_file_section **p,
Jiri Olsa63503db2019-07-21 13:23:52 +02002972 struct evlist *evlist)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002973{
2974 int err;
2975 int ret = 0;
2976
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002977 if (perf_header__has_feat(ff->ph, type)) {
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002978 if (!feat_ops[type].write)
2979 return -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002980
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -07002981 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
2982 return -1;
2983
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002984 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002985
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002986 err = feat_ops[type].write(ff, evlist);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002987 if (err < 0) {
Jiri Olsa0c2aff42016-10-10 09:38:02 +02002988 pr_debug("failed to write feature %s\n", feat_ops[type].name);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002989
2990 /* undo anything written */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002991 lseek(ff->fd, (*p)->offset, SEEK_SET);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002992
2993 return -1;
2994 }
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002995 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002996 (*p)++;
2997 }
2998 return ret;
2999}
3000
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003001static int perf_header__adds_write(struct perf_header *header,
Jiri Olsa63503db2019-07-21 13:23:52 +02003002 struct evlist *evlist, int fd)
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003003{
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003004 int nr_sections;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003005 struct feat_fd ff;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02003006 struct perf_file_section *feat_sec, *p;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003007 int sec_size;
3008 u64 sec_start;
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003009 int feat;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02003010 int err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003011
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003012 ff = (struct feat_fd){
3013 .fd = fd,
3014 .ph = header,
3015 };
3016
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003017 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003018 if (!nr_sections)
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003019 return 0;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003020
Paul Gortmaker91b98802013-01-30 20:05:49 -05003021 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003022 if (feat_sec == NULL)
3023 return -ENOMEM;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003024
3025 sec_size = sizeof(*feat_sec) * nr_sections;
3026
Jiri Olsa8d541e92013-07-17 19:49:44 +02003027 sec_start = header->feat_offset;
Xiao Guangrongf887f302010-02-04 16:46:42 +08003028 lseek(fd, sec_start + sec_size, SEEK_SET);
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003029
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003030 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003031 if (do_write_feat(&ff, feat, &p, evlist))
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003032 perf_header__clear_feat(header, feat);
3033 }
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003034
Xiao Guangrongf887f302010-02-04 16:46:42 +08003035 lseek(fd, sec_start, SEEK_SET);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02003036 /*
3037 * may write more than needed due to dropped feature, but
Ingo Molnaradba1632018-12-03 11:22:00 +01003038 * this is okay, reader will skip the missing entries
Stephane Eranianfbe96f22011-09-30 15:40:40 +02003039 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003040 err = do_write(&ff, feat_sec, sec_size);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003041 if (err < 0)
3042 pr_debug("failed to write feature section\n");
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003043 free(feat_sec);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003044 return err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003045}
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003046
Tom Zanussi8dc58102010-04-01 23:59:15 -05003047int perf_header__write_pipe(int fd)
3048{
3049 struct perf_pipe_file_header f_header;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003050 struct feat_fd ff;
Tom Zanussi8dc58102010-04-01 23:59:15 -05003051 int err;
3052
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003053 ff = (struct feat_fd){ .fd = fd };
3054
Tom Zanussi8dc58102010-04-01 23:59:15 -05003055 f_header = (struct perf_pipe_file_header){
3056 .magic = PERF_MAGIC,
3057 .size = sizeof(f_header),
3058 };
3059
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003060 err = do_write(&ff, &f_header, sizeof(f_header));
Tom Zanussi8dc58102010-04-01 23:59:15 -05003061 if (err < 0) {
3062 pr_debug("failed to write perf pipe header\n");
3063 return err;
3064 }
3065
3066 return 0;
3067}
3068
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003069int perf_session__write_header(struct perf_session *session,
Jiri Olsa63503db2019-07-21 13:23:52 +02003070 struct evlist *evlist,
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003071 int fd, bool at_exit)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003072{
3073 struct perf_file_header f_header;
3074 struct perf_file_attr f_attr;
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003075 struct perf_header *header = &session->header;
Jiri Olsa32dcd022019-07-21 13:23:51 +02003076 struct evsel *evsel;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003077 struct feat_fd ff;
Jiri Olsa944d62b2013-07-17 19:49:43 +02003078 u64 attr_offset;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003079 int err;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003080
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003081 ff = (struct feat_fd){ .fd = fd};
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003082 lseek(fd, sizeof(f_header), SEEK_SET);
3083
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03003084 evlist__for_each_entry(session->evlist, evsel) {
Robert Richter6606f872012-08-16 21:10:19 +02003085 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003086 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003087 if (err < 0) {
3088 pr_debug("failed to write perf header\n");
3089 return err;
3090 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003091 }
3092
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003093 attr_offset = lseek(ff.fd, 0, SEEK_CUR);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003094
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03003095 evlist__for_each_entry(evlist, evsel) {
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003096 f_attr = (struct perf_file_attr){
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003097 .attr = evsel->core.attr,
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003098 .ids = {
Robert Richter6606f872012-08-16 21:10:19 +02003099 .offset = evsel->id_offset,
3100 .size = evsel->ids * sizeof(u64),
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003101 }
3102 };
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003103 err = do_write(&ff, &f_attr, sizeof(f_attr));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003104 if (err < 0) {
3105 pr_debug("failed to write perf header attribute\n");
3106 return err;
3107 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003108 }
3109
Adrian Hunterd645c442013-12-11 14:36:28 +02003110 if (!header->data_offset)
3111 header->data_offset = lseek(fd, 0, SEEK_CUR);
Jiri Olsa8d541e92013-07-17 19:49:44 +02003112 header->feat_offset = header->data_offset + header->data_size;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003113
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003114 if (at_exit) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003115 err = perf_header__adds_write(header, evlist, fd);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003116 if (err < 0)
3117 return err;
3118 }
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003119
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003120 f_header = (struct perf_file_header){
3121 .magic = PERF_MAGIC,
3122 .size = sizeof(f_header),
3123 .attr_size = sizeof(f_attr),
3124 .attrs = {
Jiri Olsa944d62b2013-07-17 19:49:43 +02003125 .offset = attr_offset,
Jiri Olsa6484d2f2019-07-21 13:24:28 +02003126 .size = evlist->core.nr_entries * sizeof(f_attr),
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003127 },
3128 .data = {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003129 .offset = header->data_offset,
3130 .size = header->data_size,
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003131 },
Jiri Olsa44b3c572013-07-11 17:28:31 +02003132 /* event_types is ignored, store zeros */
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003133 };
3134
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003135 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003136
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003137 lseek(fd, 0, SEEK_SET);
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003138 err = do_write(&ff, &f_header, sizeof(f_header));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003139 if (err < 0) {
3140 pr_debug("failed to write perf header\n");
3141 return err;
3142 }
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003143 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003144
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003145 return 0;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003146}
3147
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003148static int perf_header__getbuffer64(struct perf_header *header,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003149 int fd, void *buf, size_t size)
3150{
Arnaldo Carvalho de Melo1e7972c2011-01-03 16:50:55 -02003151 if (readn(fd, buf, size) <= 0)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003152 return -1;
3153
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003154 if (header->needs_swap)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003155 mem_bswap_64(buf, size);
3156
3157 return 0;
3158}
3159
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003160int perf_header__process_sections(struct perf_header *header, int fd,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02003161 void *data,
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003162 int (*process)(struct perf_file_section *section,
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003163 struct perf_header *ph,
3164 int feat, int fd, void *data))
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003165{
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003166 struct perf_file_section *feat_sec, *sec;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003167 int nr_sections;
3168 int sec_size;
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003169 int feat;
3170 int err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003171
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003172 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003173 if (!nr_sections)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003174 return 0;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003175
Paul Gortmaker91b98802013-01-30 20:05:49 -05003176 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003177 if (!feat_sec)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003178 return -1;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003179
3180 sec_size = sizeof(*feat_sec) * nr_sections;
3181
Jiri Olsa8d541e92013-07-17 19:49:44 +02003182 lseek(fd, header->feat_offset, SEEK_SET);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003183
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003184 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
3185 if (err < 0)
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003186 goto out_free;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003187
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003188 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
3189 err = process(sec++, header, feat, fd, data);
3190 if (err < 0)
3191 goto out_free;
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +01003192 }
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003193 err = 0;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003194out_free:
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003195 free(feat_sec);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003196 return err;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003197}
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003198
Stephane Eranian114382a2012-02-09 23:21:08 +01003199static const int attr_file_abi_sizes[] = {
3200 [0] = PERF_ATTR_SIZE_VER0,
3201 [1] = PERF_ATTR_SIZE_VER1,
Jiri Olsa239cc472012-08-07 15:20:42 +02003202 [2] = PERF_ATTR_SIZE_VER2,
Jiri Olsa0f6a3012012-08-07 15:20:45 +02003203 [3] = PERF_ATTR_SIZE_VER3,
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02003204 [4] = PERF_ATTR_SIZE_VER4,
Stephane Eranian114382a2012-02-09 23:21:08 +01003205 0,
3206};
3207
3208/*
3209 * In the legacy file format, the magic number is not used to encode endianness.
3210 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
3211 * on ABI revisions, we need to try all combinations for all endianness to
3212 * detect the endianness.
3213 */
3214static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
3215{
3216 uint64_t ref_size, attr_size;
3217 int i;
3218
3219 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
3220 ref_size = attr_file_abi_sizes[i]
3221 + sizeof(struct perf_file_section);
3222 if (hdr_sz != ref_size) {
3223 attr_size = bswap_64(hdr_sz);
3224 if (attr_size != ref_size)
3225 continue;
3226
3227 ph->needs_swap = true;
3228 }
3229 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
3230 i,
3231 ph->needs_swap);
3232 return 0;
3233 }
3234 /* could not determine endianness */
3235 return -1;
3236}
3237
3238#define PERF_PIPE_HDR_VER0 16
3239
3240static const size_t attr_pipe_abi_sizes[] = {
3241 [0] = PERF_PIPE_HDR_VER0,
3242 0,
3243};
3244
3245/*
3246 * In the legacy pipe format, there is an implicit assumption that endiannesss
3247 * between host recording the samples, and host parsing the samples is the
3248 * same. This is not always the case given that the pipe output may always be
3249 * redirected into a file and analyzed on a different machine with possibly a
3250 * different endianness and perf_event ABI revsions in the perf tool itself.
3251 */
3252static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
3253{
3254 u64 attr_size;
3255 int i;
3256
3257 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
3258 if (hdr_sz != attr_pipe_abi_sizes[i]) {
3259 attr_size = bswap_64(hdr_sz);
3260 if (attr_size != hdr_sz)
3261 continue;
3262
3263 ph->needs_swap = true;
3264 }
3265 pr_debug("Pipe ABI%d perf.data file detected\n", i);
3266 return 0;
3267 }
3268 return -1;
3269}
3270
Feng Tange84ba4e2012-10-30 11:56:07 +08003271bool is_perf_magic(u64 magic)
3272{
3273 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
3274 || magic == __perf_magic2
3275 || magic == __perf_magic2_sw)
3276 return true;
3277
3278 return false;
3279}
3280
Stephane Eranian114382a2012-02-09 23:21:08 +01003281static int check_magic_endian(u64 magic, uint64_t hdr_sz,
3282 bool is_pipe, struct perf_header *ph)
Stephane Eranian73323f52012-02-02 13:54:44 +01003283{
3284 int ret;
3285
3286 /* check for legacy format */
Stephane Eranian114382a2012-02-09 23:21:08 +01003287 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
Stephane Eranian73323f52012-02-02 13:54:44 +01003288 if (ret == 0) {
Jiri Olsa2a08c3e2013-07-17 19:49:47 +02003289 ph->version = PERF_HEADER_VERSION_1;
Stephane Eranian73323f52012-02-02 13:54:44 +01003290 pr_debug("legacy perf.data format\n");
Stephane Eranian114382a2012-02-09 23:21:08 +01003291 if (is_pipe)
3292 return try_all_pipe_abis(hdr_sz, ph);
Stephane Eranian73323f52012-02-02 13:54:44 +01003293
Stephane Eranian114382a2012-02-09 23:21:08 +01003294 return try_all_file_abis(hdr_sz, ph);
Stephane Eranian73323f52012-02-02 13:54:44 +01003295 }
Stephane Eranian114382a2012-02-09 23:21:08 +01003296 /*
3297 * the new magic number serves two purposes:
3298 * - unique number to identify actual perf.data files
3299 * - encode endianness of file
3300 */
Namhyung Kimf7913972015-01-29 17:06:45 +09003301 ph->version = PERF_HEADER_VERSION_2;
Stephane Eranian73323f52012-02-02 13:54:44 +01003302
Stephane Eranian114382a2012-02-09 23:21:08 +01003303 /* check magic number with one endianness */
3304 if (magic == __perf_magic2)
Stephane Eranian73323f52012-02-02 13:54:44 +01003305 return 0;
3306
Stephane Eranian114382a2012-02-09 23:21:08 +01003307 /* check magic number with opposite endianness */
3308 if (magic != __perf_magic2_sw)
Stephane Eranian73323f52012-02-02 13:54:44 +01003309 return -1;
3310
3311 ph->needs_swap = true;
3312
3313 return 0;
3314}
3315
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003316int perf_file_header__read(struct perf_file_header *header,
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003317 struct perf_header *ph, int fd)
3318{
Jiri Olsa727ebd52013-11-28 11:30:14 +01003319 ssize_t ret;
Stephane Eranian73323f52012-02-02 13:54:44 +01003320
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003321 lseek(fd, 0, SEEK_SET);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003322
Stephane Eranian73323f52012-02-02 13:54:44 +01003323 ret = readn(fd, header, sizeof(*header));
3324 if (ret <= 0)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003325 return -1;
3326
Stephane Eranian114382a2012-02-09 23:21:08 +01003327 if (check_magic_endian(header->magic,
3328 header->attr_size, false, ph) < 0) {
3329 pr_debug("magic/endian check failed\n");
Stephane Eranian73323f52012-02-02 13:54:44 +01003330 return -1;
Stephane Eranian114382a2012-02-09 23:21:08 +01003331 }
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003332
Stephane Eranian73323f52012-02-02 13:54:44 +01003333 if (ph->needs_swap) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003334 mem_bswap_64(header, offsetof(struct perf_file_header,
Stephane Eranian73323f52012-02-02 13:54:44 +01003335 adds_features));
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003336 }
3337
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003338 if (header->size != sizeof(*header)) {
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003339 /* Support the previous format */
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003340 if (header->size == offsetof(typeof(*header), adds_features))
3341 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003342 else
3343 return -1;
David Ahernd327fa42011-10-18 17:34:01 -06003344 } else if (ph->needs_swap) {
David Ahernd327fa42011-10-18 17:34:01 -06003345 /*
3346 * feature bitmap is declared as an array of unsigned longs --
3347 * not good since its size can differ between the host that
3348 * generated the data file and the host analyzing the file.
3349 *
3350 * We need to handle endianness, but we don't know the size of
3351 * the unsigned long where the file was generated. Take a best
3352 * guess at determining it: try 64-bit swap first (ie., file
3353 * created on a 64-bit host), and check if the hostname feature
3354 * bit is set (this feature bit is forced on as of fbe96f2).
3355 * If the bit is not, undo the 64-bit swap and try a 32-bit
3356 * swap. If the hostname bit is still not set (e.g., older data
3357 * file), punt and fallback to the original behavior --
3358 * clearing all feature bits and setting buildid.
3359 */
David Ahern80c01202012-06-08 11:47:51 -03003360 mem_bswap_64(&header->adds_features,
3361 BITS_TO_U64(HEADER_FEAT_BITS));
David Ahernd327fa42011-10-18 17:34:01 -06003362
3363 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
David Ahern80c01202012-06-08 11:47:51 -03003364 /* unswap as u64 */
3365 mem_bswap_64(&header->adds_features,
3366 BITS_TO_U64(HEADER_FEAT_BITS));
3367
3368 /* unswap as u32 */
3369 mem_bswap_32(&header->adds_features,
3370 BITS_TO_U32(HEADER_FEAT_BITS));
David Ahernd327fa42011-10-18 17:34:01 -06003371 }
3372
3373 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3374 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3375 set_bit(HEADER_BUILD_ID, header->adds_features);
3376 }
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003377 }
3378
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003379 memcpy(&ph->adds_features, &header->adds_features,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003380 sizeof(ph->adds_features));
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003381
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003382 ph->data_offset = header->data.offset;
3383 ph->data_size = header->data.size;
Jiri Olsa8d541e92013-07-17 19:49:44 +02003384 ph->feat_offset = header->data.offset + header->data.size;
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003385 return 0;
3386}
3387
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003388static int perf_file_section__process(struct perf_file_section *section,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003389 struct perf_header *ph,
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03003390 int feat, int fd, void *data)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003391{
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07003392 struct feat_fd fdd = {
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07003393 .fd = fd,
3394 .ph = ph,
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07003395 .size = section->size,
3396 .offset = section->offset,
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07003397 };
3398
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003399 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -02003400 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003401 "%d, continuing...\n", section->offset, feat);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003402 return 0;
3403 }
3404
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003405 if (feat >= HEADER_LAST_FEATURE) {
3406 pr_debug("unknown feature %d, continuing...\n", feat);
3407 return 0;
3408 }
3409
Robert Richterf1c67db2012-02-10 15:41:56 +01003410 if (!feat_ops[feat].process)
3411 return 0;
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003412
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07003413 return feat_ops[feat].process(&fdd, data);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003414}
3415
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003416static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
Tom Zanussi454c4072010-05-01 01:41:20 -05003417 struct perf_header *ph, int fd,
3418 bool repipe)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003419{
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003420 struct feat_fd ff = {
3421 .fd = STDOUT_FILENO,
3422 .ph = ph,
3423 };
Jiri Olsa727ebd52013-11-28 11:30:14 +01003424 ssize_t ret;
Stephane Eranian73323f52012-02-02 13:54:44 +01003425
3426 ret = readn(fd, header, sizeof(*header));
3427 if (ret <= 0)
3428 return -1;
3429
Stephane Eranian114382a2012-02-09 23:21:08 +01003430 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
3431 pr_debug("endian/magic failed\n");
Tom Zanussi8dc58102010-04-01 23:59:15 -05003432 return -1;
Stephane Eranian114382a2012-02-09 23:21:08 +01003433 }
3434
3435 if (ph->needs_swap)
3436 header->size = bswap_64(header->size);
Tom Zanussi8dc58102010-04-01 23:59:15 -05003437
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003438 if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
Tom Zanussi454c4072010-05-01 01:41:20 -05003439 return -1;
3440
Tom Zanussi8dc58102010-04-01 23:59:15 -05003441 return 0;
3442}
3443
Jiri Olsad4339562013-07-17 19:49:41 +02003444static int perf_header__read_pipe(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05003445{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003446 struct perf_header *header = &session->header;
Tom Zanussi8dc58102010-04-01 23:59:15 -05003447 struct perf_pipe_file_header f_header;
3448
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003449 if (perf_file_header__read_pipe(&f_header, header,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003450 perf_data__fd(session->data),
Tom Zanussi454c4072010-05-01 01:41:20 -05003451 session->repipe) < 0) {
Tom Zanussi8dc58102010-04-01 23:59:15 -05003452 pr_debug("incompatible file format\n");
3453 return -EINVAL;
3454 }
3455
Tom Zanussi8dc58102010-04-01 23:59:15 -05003456 return 0;
3457}
3458
Stephane Eranian69996df2012-02-09 23:21:06 +01003459static int read_attr(int fd, struct perf_header *ph,
3460 struct perf_file_attr *f_attr)
3461{
3462 struct perf_event_attr *attr = &f_attr->attr;
3463 size_t sz, left;
3464 size_t our_sz = sizeof(f_attr->attr);
Jiri Olsa727ebd52013-11-28 11:30:14 +01003465 ssize_t ret;
Stephane Eranian69996df2012-02-09 23:21:06 +01003466
3467 memset(f_attr, 0, sizeof(*f_attr));
3468
3469 /* read minimal guaranteed structure */
3470 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
3471 if (ret <= 0) {
3472 pr_debug("cannot read %d bytes of header attr\n",
3473 PERF_ATTR_SIZE_VER0);
3474 return -1;
3475 }
3476
3477 /* on file perf_event_attr size */
3478 sz = attr->size;
Stephane Eranian114382a2012-02-09 23:21:08 +01003479
Stephane Eranian69996df2012-02-09 23:21:06 +01003480 if (ph->needs_swap)
3481 sz = bswap_32(sz);
3482
3483 if (sz == 0) {
3484 /* assume ABI0 */
3485 sz = PERF_ATTR_SIZE_VER0;
3486 } else if (sz > our_sz) {
3487 pr_debug("file uses a more recent and unsupported ABI"
3488 " (%zu bytes extra)\n", sz - our_sz);
3489 return -1;
3490 }
3491 /* what we have not yet read and that we know about */
3492 left = sz - PERF_ATTR_SIZE_VER0;
3493 if (left) {
3494 void *ptr = attr;
3495 ptr += PERF_ATTR_SIZE_VER0;
3496
3497 ret = readn(fd, ptr, left);
3498 }
3499 /* read perf_file_section, ids are read in caller */
3500 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
3501
3502 return ret <= 0 ? -1 : 0;
3503}
3504
Jiri Olsa32dcd022019-07-21 13:23:51 +02003505static int perf_evsel__prepare_tracepoint_event(struct evsel *evsel,
Tzvetomir Stoyanov (VMware)096177a2018-08-08 14:02:46 -04003506 struct tep_handle *pevent)
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003507{
Tzvetomir Stoyanov97fbf3f2018-11-30 10:44:07 -05003508 struct tep_event *event;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003509 char bf[128];
3510
Namhyung Kim831394b2012-09-06 11:10:46 +09003511 /* already prepared */
3512 if (evsel->tp_format)
3513 return 0;
3514
Namhyung Kim3dce2ce2013-03-21 16:18:48 +09003515 if (pevent == NULL) {
3516 pr_debug("broken or missing trace data\n");
3517 return -1;
3518 }
3519
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003520 event = tep_find_event(pevent, evsel->core.attr.config);
Namhyung Kima7619ae2013-04-18 21:24:16 +09003521 if (event == NULL) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003522 pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config);
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003523 return -1;
Namhyung Kima7619ae2013-04-18 21:24:16 +09003524 }
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003525
Namhyung Kim831394b2012-09-06 11:10:46 +09003526 if (!evsel->name) {
3527 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
3528 evsel->name = strdup(bf);
3529 if (evsel->name == NULL)
3530 return -1;
3531 }
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003532
Arnaldo Carvalho de Melofcf65bf2012-08-07 09:58:03 -03003533 evsel->tp_format = event;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003534 return 0;
3535}
3536
Jiri Olsa63503db2019-07-21 13:23:52 +02003537static int perf_evlist__prepare_tracepoint_events(struct evlist *evlist,
Tzvetomir Stoyanov (VMware)096177a2018-08-08 14:02:46 -04003538 struct tep_handle *pevent)
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003539{
Jiri Olsa32dcd022019-07-21 13:23:51 +02003540 struct evsel *pos;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003541
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03003542 evlist__for_each_entry(evlist, pos) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003543 if (pos->core.attr.type == PERF_TYPE_TRACEPOINT &&
Namhyung Kim831394b2012-09-06 11:10:46 +09003544 perf_evsel__prepare_tracepoint_event(pos, pevent))
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003545 return -1;
3546 }
3547
3548 return 0;
3549}
3550
Jiri Olsad4339562013-07-17 19:49:41 +02003551int perf_session__read_header(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05003552{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003553 struct perf_data *data = session->data;
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003554 struct perf_header *header = &session->header;
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003555 struct perf_file_header f_header;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003556 struct perf_file_attr f_attr;
3557 u64 f_id;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003558 int nr_attrs, nr_ids, i, j;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003559 int fd = perf_data__fd(data);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003560
Jiri Olsa0f98b112019-07-21 13:23:55 +02003561 session->evlist = evlist__new();
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003562 if (session->evlist == NULL)
3563 return -ENOMEM;
3564
Kan Liang2c071442015-08-28 05:48:05 -04003565 session->evlist->env = &header->env;
Arnaldo Carvalho de Melo4cde9982015-09-09 12:25:00 -03003566 session->machines.host.env = &header->env;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003567 if (perf_data__is_pipe(data))
Jiri Olsad4339562013-07-17 19:49:41 +02003568 return perf_header__read_pipe(session);
Tom Zanussi8dc58102010-04-01 23:59:15 -05003569
Stephane Eranian69996df2012-02-09 23:21:06 +01003570 if (perf_file_header__read(&f_header, header, fd) < 0)
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02003571 return -EINVAL;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003572
Namhyung Kimb314e5c2013-09-30 17:19:48 +09003573 /*
3574 * Sanity check that perf.data was written cleanly; data size is
3575 * initialized to 0 and updated only if the on_exit function is run.
3576 * If data size is still 0 then the file contains only partial
3577 * information. Just warn user and process it as much as it can.
3578 */
3579 if (f_header.data.size == 0) {
3580 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
3581 "Was the 'perf record' command properly terminated?\n",
Jiri Olsaeae8ad82017-01-23 22:25:41 +01003582 data->file.path);
Namhyung Kimb314e5c2013-09-30 17:19:48 +09003583 }
3584
Vince Weaver76222362019-07-23 11:06:01 -04003585 if (f_header.attr_size == 0) {
3586 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
3587 "Was the 'perf record' command properly terminated?\n",
3588 data->file.path);
3589 return -EINVAL;
3590 }
3591
Stephane Eranian69996df2012-02-09 23:21:06 +01003592 nr_attrs = f_header.attrs.size / f_header.attr_size;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003593 lseek(fd, f_header.attrs.offset, SEEK_SET);
3594
3595 for (i = 0; i < nr_attrs; i++) {
Jiri Olsa32dcd022019-07-21 13:23:51 +02003596 struct evsel *evsel;
Peter Zijlstra1c222bc2009-08-06 20:57:41 +02003597 off_t tmp;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003598
Stephane Eranian69996df2012-02-09 23:21:06 +01003599 if (read_attr(fd, header, &f_attr) < 0)
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003600 goto out_errno;
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003601
David Ahern1060ab82015-04-09 16:15:46 -04003602 if (header->needs_swap) {
3603 f_attr.ids.size = bswap_64(f_attr.ids.size);
3604 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
David Aherneda39132011-07-15 12:34:09 -06003605 perf_event__attr_swap(&f_attr.attr);
David Ahern1060ab82015-04-09 16:15:46 -04003606 }
David Aherneda39132011-07-15 12:34:09 -06003607
Peter Zijlstra1c222bc2009-08-06 20:57:41 +02003608 tmp = lseek(fd, 0, SEEK_CUR);
Jiri Olsa365c3ae2019-07-21 13:23:58 +02003609 evsel = evsel__new(&f_attr.attr);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003610
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003611 if (evsel == NULL)
3612 goto out_delete_evlist;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03003613
3614 evsel->needs_swap = header->needs_swap;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003615 /*
3616 * Do it before so that if perf_evsel__alloc_id fails, this
Jiri Olsac12995a2019-07-21 13:23:56 +02003617 * entry gets purged too at evlist__delete().
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003618 */
Jiri Olsaa1cf3a72019-07-21 13:23:59 +02003619 evlist__add(session->evlist, evsel);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003620
3621 nr_ids = f_attr.ids.size / sizeof(u64);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003622 /*
3623 * We don't have the cpu and thread maps on the header, so
3624 * for allocating the perf_sample_id table we fake 1 cpu and
3625 * hattr->ids threads.
3626 */
3627 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
3628 goto out_delete_evlist;
3629
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003630 lseek(fd, f_attr.ids.offset, SEEK_SET);
3631
3632 for (j = 0; j < nr_ids; j++) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003633 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003634 goto out_errno;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003635
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003636 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02003637 }
Arnaldo Carvalho de Melo11deb1f2009-11-17 01:18:09 -02003638
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003639 lseek(fd, tmp, SEEK_SET);
3640 }
3641
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003642 perf_header__process_sections(header, fd, &session->tevent,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02003643 perf_file_section__process);
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +01003644
Namhyung Kim831394b2012-09-06 11:10:46 +09003645 if (perf_evlist__prepare_tracepoint_events(session->evlist,
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003646 session->tevent.pevent))
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003647 goto out_delete_evlist;
3648
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02003649 return 0;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003650out_errno:
3651 return -errno;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003652
3653out_delete_evlist:
Jiri Olsac12995a2019-07-21 13:23:56 +02003654 evlist__delete(session->evlist);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003655 session->evlist = NULL;
3656 return -ENOMEM;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003657}
Frederic Weisbecker0d3a5c82009-08-16 20:56:37 +02003658
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003659int perf_event__synthesize_attr(struct perf_tool *tool,
Robert Richterf4d83432012-08-16 21:10:17 +02003660 struct perf_event_attr *attr, u32 ids, u64 *id,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02003661 perf_event__handler_t process)
Frederic Weisbecker0d3a5c82009-08-16 20:56:37 +02003662{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003663 union perf_event *ev;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003664 size_t size;
3665 int err;
3666
3667 size = sizeof(struct perf_event_attr);
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003668 size = PERF_ALIGN(size, sizeof(u64));
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003669 size += sizeof(struct perf_event_header);
3670 size += ids * sizeof(u64);
3671
Numfor Mbiziwo-Tiapo20f97812019-07-24 16:44:58 -07003672 ev = zalloc(size);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003673
Chris Samuelce47dc52010-11-13 13:35:06 +11003674 if (ev == NULL)
3675 return -ENOMEM;
3676
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003677 ev->attr.attr = *attr;
3678 memcpy(ev->attr.id, id, ids * sizeof(u64));
3679
3680 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
Robert Richterf4d83432012-08-16 21:10:17 +02003681 ev->attr.header.size = (u16)size;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003682
Robert Richterf4d83432012-08-16 21:10:17 +02003683 if (ev->attr.header.size == size)
3684 err = process(tool, ev, NULL, NULL);
3685 else
3686 err = -E2BIG;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003687
3688 free(ev);
3689
3690 return err;
3691}
3692
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003693int perf_event__synthesize_features(struct perf_tool *tool,
3694 struct perf_session *session,
Jiri Olsa63503db2019-07-21 13:23:52 +02003695 struct evlist *evlist,
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003696 perf_event__handler_t process)
3697{
3698 struct perf_header *header = &session->header;
3699 struct feat_fd ff;
Jiri Olsa72932372019-08-28 15:57:16 +02003700 struct perf_record_header_feature *fe;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003701 size_t sz, sz_hdr;
3702 int feat, ret;
3703
3704 sz_hdr = sizeof(fe->header);
3705 sz = sizeof(union perf_event);
3706 /* get a nice alignment */
3707 sz = PERF_ALIGN(sz, page_size);
3708
3709 memset(&ff, 0, sizeof(ff));
3710
3711 ff.buf = malloc(sz);
3712 if (!ff.buf)
3713 return -ENOMEM;
3714
3715 ff.size = sz - sz_hdr;
Song Liuc952b352019-06-19 18:04:53 -07003716 ff.ph = &session->header;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003717
3718 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3719 if (!feat_ops[feat].synthesize) {
3720 pr_debug("No record header feature for header :%d\n", feat);
3721 continue;
3722 }
3723
3724 ff.offset = sizeof(*fe);
3725
3726 ret = feat_ops[feat].write(&ff, evlist);
3727 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
3728 pr_debug("Error writing feature\n");
3729 continue;
3730 }
3731 /* ff.buf may have changed due to realloc in do_write() */
3732 fe = ff.buf;
3733 memset(fe, 0, sizeof(*fe));
3734
3735 fe->feat_id = feat;
3736 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3737 fe->header.size = ff.offset;
3738
3739 ret = process(tool, ff.buf, NULL, NULL);
3740 if (ret) {
3741 free(ff.buf);
3742 return ret;
3743 }
3744 }
Jiri Olsa57b5de42018-03-14 10:22:05 +01003745
3746 /* Send HEADER_LAST_FEATURE mark. */
3747 fe = ff.buf;
3748 fe->feat_id = HEADER_LAST_FEATURE;
3749 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3750 fe->header.size = sizeof(*fe);
3751
3752 ret = process(tool, ff.buf, NULL, NULL);
3753
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003754 free(ff.buf);
Jiri Olsa57b5de42018-03-14 10:22:05 +01003755 return ret;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003756}
3757
Jiri Olsa89f16882018-09-13 14:54:03 +02003758int perf_event__process_feature(struct perf_session *session,
3759 union perf_event *event)
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003760{
Jiri Olsa89f16882018-09-13 14:54:03 +02003761 struct perf_tool *tool = session->tool;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003762 struct feat_fd ff = { .fd = 0 };
Jiri Olsa72932372019-08-28 15:57:16 +02003763 struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003764 int type = fe->header.type;
3765 u64 feat = fe->feat_id;
3766
3767 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
3768 pr_warning("invalid record type %d in pipe-mode\n", type);
3769 return 0;
3770 }
Ravi Bangoria92ead7e2018-06-25 18:12:20 +05303771 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003772 pr_warning("invalid record type %d in pipe-mode\n", type);
3773 return -1;
3774 }
3775
3776 if (!feat_ops[feat].process)
3777 return 0;
3778
3779 ff.buf = (void *)fe->data;
Jiri Olsa79b2fe52019-07-15 16:04:26 +02003780 ff.size = event->header.size - sizeof(*fe);
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003781 ff.ph = &session->header;
3782
3783 if (feat_ops[feat].process(&ff, NULL))
3784 return -1;
3785
3786 if (!feat_ops[feat].print || !tool->show_feat_hdr)
3787 return 0;
3788
3789 if (!feat_ops[feat].full_only ||
3790 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
3791 feat_ops[feat].print(&ff, stdout);
3792 } else {
3793 fprintf(stdout, "# %s info available, use -I to display\n",
3794 feat_ops[feat].name);
3795 }
3796
3797 return 0;
3798}
3799
Jiri Olsa72932372019-08-28 15:57:16 +02003800static struct perf_record_event_update *
Jiri Olsaa6e52812015-10-25 15:51:37 +01003801event_update_event__new(size_t size, u64 type, u64 id)
3802{
Jiri Olsa72932372019-08-28 15:57:16 +02003803 struct perf_record_event_update *ev;
Jiri Olsaa6e52812015-10-25 15:51:37 +01003804
3805 size += sizeof(*ev);
3806 size = PERF_ALIGN(size, sizeof(u64));
3807
3808 ev = zalloc(size);
3809 if (ev) {
3810 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3811 ev->header.size = (u16)size;
3812 ev->type = type;
3813 ev->id = id;
3814 }
3815 return ev;
3816}
3817
3818int
3819perf_event__synthesize_event_update_unit(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02003820 struct evsel *evsel,
Jiri Olsaa6e52812015-10-25 15:51:37 +01003821 perf_event__handler_t process)
3822{
Jiri Olsa72932372019-08-28 15:57:16 +02003823 struct perf_record_event_update *ev;
Jiri Olsaa6e52812015-10-25 15:51:37 +01003824 size_t size = strlen(evsel->unit);
3825 int err;
3826
3827 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3828 if (ev == NULL)
3829 return -ENOMEM;
3830
Arnaldo Carvalho de Melo75725882018-12-06 11:02:57 -03003831 strlcpy(ev->data, evsel->unit, size + 1);
Jiri Olsaa6e52812015-10-25 15:51:37 +01003832 err = process(tool, (union perf_event *)ev, NULL, NULL);
3833 free(ev);
3834 return err;
3835}
3836
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003837int
3838perf_event__synthesize_event_update_scale(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02003839 struct evsel *evsel,
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003840 perf_event__handler_t process)
3841{
Jiri Olsa72932372019-08-28 15:57:16 +02003842 struct perf_record_event_update *ev;
3843 struct perf_record_event_update_scale *ev_data;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003844 int err;
3845
3846 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3847 if (ev == NULL)
3848 return -ENOMEM;
3849
Jiri Olsa72932372019-08-28 15:57:16 +02003850 ev_data = (struct perf_record_event_update_scale *)ev->data;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003851 ev_data->scale = evsel->scale;
3852 err = process(tool, (union perf_event*) ev, NULL, NULL);
3853 free(ev);
3854 return err;
3855}
3856
Jiri Olsa802c9042015-10-25 15:51:39 +01003857int
3858perf_event__synthesize_event_update_name(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02003859 struct evsel *evsel,
Jiri Olsa802c9042015-10-25 15:51:39 +01003860 perf_event__handler_t process)
3861{
Jiri Olsa72932372019-08-28 15:57:16 +02003862 struct perf_record_event_update *ev;
Jiri Olsa802c9042015-10-25 15:51:39 +01003863 size_t len = strlen(evsel->name);
3864 int err;
3865
3866 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3867 if (ev == NULL)
3868 return -ENOMEM;
3869
Arnaldo Carvalho de Melo5192bde2018-12-06 11:09:46 -03003870 strlcpy(ev->data, evsel->name, len + 1);
Jiri Olsa802c9042015-10-25 15:51:39 +01003871 err = process(tool, (union perf_event*) ev, NULL, NULL);
3872 free(ev);
3873 return err;
3874}
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003875
Jiri Olsa86ebb092015-10-25 15:51:40 +01003876int
3877perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02003878 struct evsel *evsel,
Jiri Olsa86ebb092015-10-25 15:51:40 +01003879 perf_event__handler_t process)
3880{
Jiri Olsa72932372019-08-28 15:57:16 +02003881 size_t size = sizeof(struct perf_record_event_update);
3882 struct perf_record_event_update *ev;
Jiri Olsa86ebb092015-10-25 15:51:40 +01003883 int max, err;
3884 u16 type;
3885
Jiri Olsafe1f61b2019-07-21 13:24:38 +02003886 if (!evsel->core.own_cpus)
Jiri Olsa86ebb092015-10-25 15:51:40 +01003887 return 0;
3888
Jiri Olsafe1f61b2019-07-21 13:24:38 +02003889 ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
Jiri Olsa86ebb092015-10-25 15:51:40 +01003890 if (!ev)
3891 return -ENOMEM;
3892
3893 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3894 ev->header.size = (u16)size;
3895 ev->type = PERF_EVENT_UPDATE__CPUS;
3896 ev->id = evsel->id[0];
3897
Jiri Olsa72932372019-08-28 15:57:16 +02003898 cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
Jiri Olsafe1f61b2019-07-21 13:24:38 +02003899 evsel->core.own_cpus,
Jiri Olsa86ebb092015-10-25 15:51:40 +01003900 type, max);
3901
3902 err = process(tool, (union perf_event*) ev, NULL, NULL);
3903 free(ev);
3904 return err;
3905}
3906
Jiri Olsac853f932015-10-25 15:51:41 +01003907size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3908{
Jiri Olsa72932372019-08-28 15:57:16 +02003909 struct perf_record_event_update *ev = &event->event_update;
3910 struct perf_record_event_update_scale *ev_scale;
3911 struct perf_record_event_update_cpus *ev_cpus;
Jiri Olsaf8548392019-07-21 13:23:49 +02003912 struct perf_cpu_map *map;
Jiri Olsac853f932015-10-25 15:51:41 +01003913 size_t ret;
3914
Jiri Olsa5ded0682019-08-28 15:56:57 +02003915 ret = fprintf(fp, "\n... id: %" PRI_lu64 "\n", ev->id);
Jiri Olsac853f932015-10-25 15:51:41 +01003916
3917 switch (ev->type) {
3918 case PERF_EVENT_UPDATE__SCALE:
Jiri Olsa72932372019-08-28 15:57:16 +02003919 ev_scale = (struct perf_record_event_update_scale *)ev->data;
Jiri Olsac853f932015-10-25 15:51:41 +01003920 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3921 break;
3922 case PERF_EVENT_UPDATE__UNIT:
3923 ret += fprintf(fp, "... unit: %s\n", ev->data);
3924 break;
3925 case PERF_EVENT_UPDATE__NAME:
3926 ret += fprintf(fp, "... name: %s\n", ev->data);
3927 break;
3928 case PERF_EVENT_UPDATE__CPUS:
Jiri Olsa72932372019-08-28 15:57:16 +02003929 ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
Jiri Olsac853f932015-10-25 15:51:41 +01003930 ret += fprintf(fp, "... ");
3931
3932 map = cpu_map__new_data(&ev_cpus->cpus);
3933 if (map)
3934 ret += cpu_map__fprintf(map, fp);
3935 else
3936 ret += fprintf(fp, "failed to get cpus\n");
3937 break;
3938 default:
3939 ret += fprintf(fp, "... unknown type\n");
3940 break;
3941 }
3942
3943 return ret;
3944}
Jiri Olsa86ebb092015-10-25 15:51:40 +01003945
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003946int perf_event__synthesize_attrs(struct perf_tool *tool,
Jiri Olsa63503db2019-07-21 13:23:52 +02003947 struct evlist *evlist,
Jiri Olsa318ec182018-08-30 08:32:15 +02003948 perf_event__handler_t process)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003949{
Jiri Olsa32dcd022019-07-21 13:23:51 +02003950 struct evsel *evsel;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003951 int err = 0;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003952
Jiri Olsa318ec182018-08-30 08:32:15 +02003953 evlist__for_each_entry(evlist, evsel) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003954 err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->ids,
Robert Richter6606f872012-08-16 21:10:19 +02003955 evsel->id, process);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003956 if (err) {
3957 pr_debug("failed to create perf header attribute\n");
3958 return err;
3959 }
3960 }
3961
3962 return err;
3963}
3964
Jiri Olsa32dcd022019-07-21 13:23:51 +02003965static bool has_unit(struct evsel *counter)
Andi Kleenbfd8f722017-11-17 13:42:58 -08003966{
3967 return counter->unit && *counter->unit;
3968}
3969
Jiri Olsa32dcd022019-07-21 13:23:51 +02003970static bool has_scale(struct evsel *counter)
Andi Kleenbfd8f722017-11-17 13:42:58 -08003971{
3972 return counter->scale != 1;
3973}
3974
3975int perf_event__synthesize_extra_attr(struct perf_tool *tool,
Jiri Olsa63503db2019-07-21 13:23:52 +02003976 struct evlist *evsel_list,
Andi Kleenbfd8f722017-11-17 13:42:58 -08003977 perf_event__handler_t process,
3978 bool is_pipe)
3979{
Jiri Olsa32dcd022019-07-21 13:23:51 +02003980 struct evsel *counter;
Andi Kleenbfd8f722017-11-17 13:42:58 -08003981 int err;
3982
3983 /*
3984 * Synthesize other events stuff not carried within
3985 * attr event - unit, scale, name
3986 */
3987 evlist__for_each_entry(evsel_list, counter) {
3988 if (!counter->supported)
3989 continue;
3990
3991 /*
3992 * Synthesize unit and scale only if it's defined.
3993 */
3994 if (has_unit(counter)) {
3995 err = perf_event__synthesize_event_update_unit(tool, counter, process);
3996 if (err < 0) {
3997 pr_err("Couldn't synthesize evsel unit.\n");
3998 return err;
3999 }
4000 }
4001
4002 if (has_scale(counter)) {
4003 err = perf_event__synthesize_event_update_scale(tool, counter, process);
4004 if (err < 0) {
4005 pr_err("Couldn't synthesize evsel counter.\n");
4006 return err;
4007 }
4008 }
4009
Jiri Olsafe1f61b2019-07-21 13:24:38 +02004010 if (counter->core.own_cpus) {
Andi Kleenbfd8f722017-11-17 13:42:58 -08004011 err = perf_event__synthesize_event_update_cpus(tool, counter, process);
4012 if (err < 0) {
4013 pr_err("Couldn't synthesize evsel cpus.\n");
4014 return err;
4015 }
4016 }
4017
4018 /*
4019 * Name is needed only for pipe output,
4020 * perf.data carries event names.
4021 */
4022 if (is_pipe) {
4023 err = perf_event__synthesize_event_update_name(tool, counter, process);
4024 if (err < 0) {
4025 pr_err("Couldn't synthesize evsel name.\n");
4026 return err;
4027 }
4028 }
4029 }
4030 return 0;
4031}
4032
Adrian Hunter47c3d102013-07-04 16:20:21 +03004033int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
4034 union perf_event *event,
Jiri Olsa63503db2019-07-21 13:23:52 +02004035 struct evlist **pevlist)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004036{
Robert Richterf4d83432012-08-16 21:10:17 +02004037 u32 i, ids, n_ids;
Jiri Olsa32dcd022019-07-21 13:23:51 +02004038 struct evsel *evsel;
Jiri Olsa63503db2019-07-21 13:23:52 +02004039 struct evlist *evlist = *pevlist;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004040
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02004041 if (evlist == NULL) {
Jiri Olsa0f98b112019-07-21 13:23:55 +02004042 *pevlist = evlist = evlist__new();
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02004043 if (evlist == NULL)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004044 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004045 }
4046
Jiri Olsa365c3ae2019-07-21 13:23:58 +02004047 evsel = evsel__new(&event->attr.attr);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03004048 if (evsel == NULL)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004049 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004050
Jiri Olsaa1cf3a72019-07-21 13:23:59 +02004051 evlist__add(evlist, evsel);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03004052
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004053 ids = event->header.size;
4054 ids -= (void *)&event->attr.id - (void *)event;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004055 n_ids = ids / sizeof(u64);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03004056 /*
4057 * We don't have the cpu and thread maps on the header, so
4058 * for allocating the perf_sample_id table we fake 1 cpu and
4059 * hattr->ids threads.
4060 */
4061 if (perf_evsel__alloc_id(evsel, 1, n_ids))
4062 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004063
4064 for (i = 0; i < n_ids; i++) {
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02004065 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004066 }
4067
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004068 return 0;
4069}
Tom Zanussicd19a032010-04-01 23:59:20 -05004070
Jiri Olsaffe777252015-10-25 15:51:36 +01004071int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
4072 union perf_event *event,
Jiri Olsa63503db2019-07-21 13:23:52 +02004073 struct evlist **pevlist)
Jiri Olsaffe777252015-10-25 15:51:36 +01004074{
Jiri Olsa72932372019-08-28 15:57:16 +02004075 struct perf_record_event_update *ev = &event->event_update;
4076 struct perf_record_event_update_scale *ev_scale;
4077 struct perf_record_event_update_cpus *ev_cpus;
Jiri Olsa63503db2019-07-21 13:23:52 +02004078 struct evlist *evlist;
Jiri Olsa32dcd022019-07-21 13:23:51 +02004079 struct evsel *evsel;
Jiri Olsaf8548392019-07-21 13:23:49 +02004080 struct perf_cpu_map *map;
Jiri Olsaffe777252015-10-25 15:51:36 +01004081
4082 if (!pevlist || *pevlist == NULL)
4083 return -EINVAL;
4084
4085 evlist = *pevlist;
4086
4087 evsel = perf_evlist__id2evsel(evlist, ev->id);
4088 if (evsel == NULL)
4089 return -EINVAL;
4090
Jiri Olsaa6e52812015-10-25 15:51:37 +01004091 switch (ev->type) {
4092 case PERF_EVENT_UPDATE__UNIT:
4093 evsel->unit = strdup(ev->data);
Jiri Olsadaeecbc2015-10-25 15:51:38 +01004094 break;
Jiri Olsa802c9042015-10-25 15:51:39 +01004095 case PERF_EVENT_UPDATE__NAME:
4096 evsel->name = strdup(ev->data);
4097 break;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01004098 case PERF_EVENT_UPDATE__SCALE:
Jiri Olsa72932372019-08-28 15:57:16 +02004099 ev_scale = (struct perf_record_event_update_scale *)ev->data;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01004100 evsel->scale = ev_scale->scale;
Arnaldo Carvalho de Melo8434a2e2017-02-08 21:57:22 -03004101 break;
Jiri Olsa86ebb092015-10-25 15:51:40 +01004102 case PERF_EVENT_UPDATE__CPUS:
Jiri Olsa72932372019-08-28 15:57:16 +02004103 ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
Jiri Olsa86ebb092015-10-25 15:51:40 +01004104
4105 map = cpu_map__new_data(&ev_cpus->cpus);
4106 if (map)
Jiri Olsafe1f61b2019-07-21 13:24:38 +02004107 evsel->core.own_cpus = map;
Jiri Olsa86ebb092015-10-25 15:51:40 +01004108 else
4109 pr_err("failed to get event_update cpus\n");
Jiri Olsaa6e52812015-10-25 15:51:37 +01004110 default:
4111 break;
4112 }
4113
Jiri Olsaffe777252015-10-25 15:51:36 +01004114 return 0;
4115}
4116
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02004117int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
Jiri Olsa63503db2019-07-21 13:23:52 +02004118 struct evlist *evlist,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02004119 perf_event__handler_t process)
Tom Zanussi92155452010-04-01 23:59:21 -05004120{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004121 union perf_event ev;
Jiri Olsa29208e52011-10-20 15:59:43 +02004122 struct tracing_data *tdata;
Tom Zanussi92155452010-04-01 23:59:21 -05004123 ssize_t size = 0, aligned_size = 0, padding;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07004124 struct feat_fd ff;
Irina Tirdea1d037ca2012-09-11 01:15:03 +03004125 int err __maybe_unused = 0;
Tom Zanussi92155452010-04-01 23:59:21 -05004126
Jiri Olsa29208e52011-10-20 15:59:43 +02004127 /*
4128 * We are going to store the size of the data followed
4129 * by the data contents. Since the fd descriptor is a pipe,
4130 * we cannot seek back to store the size of the data once
4131 * we know it. Instead we:
4132 *
4133 * - write the tracing data to the temp file
4134 * - get/write the data size to pipe
4135 * - write the tracing data from the temp file
4136 * to the pipe
4137 */
Jiri Olsace9036a2019-07-21 13:24:23 +02004138 tdata = tracing_data_get(&evlist->core.entries, fd, true);
Jiri Olsa29208e52011-10-20 15:59:43 +02004139 if (!tdata)
4140 return -1;
4141
Tom Zanussi92155452010-04-01 23:59:21 -05004142 memset(&ev, 0, sizeof(ev));
4143
4144 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
Jiri Olsa29208e52011-10-20 15:59:43 +02004145 size = tdata->size;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03004146 aligned_size = PERF_ALIGN(size, sizeof(u64));
Tom Zanussi92155452010-04-01 23:59:21 -05004147 padding = aligned_size - size;
4148 ev.tracing_data.header.size = sizeof(ev.tracing_data);
4149 ev.tracing_data.size = aligned_size;
4150
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02004151 process(tool, &ev, NULL, NULL);
Tom Zanussi92155452010-04-01 23:59:21 -05004152
Jiri Olsa29208e52011-10-20 15:59:43 +02004153 /*
4154 * The put function will copy all the tracing data
4155 * stored in temp file to the pipe.
4156 */
4157 tracing_data_put(tdata);
4158
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07004159 ff = (struct feat_fd){ .fd = fd };
4160 if (write_padded(&ff, NULL, 0, padding))
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -07004161 return -1;
Tom Zanussi92155452010-04-01 23:59:21 -05004162
4163 return aligned_size;
4164}
4165
Jiri Olsa89f16882018-09-13 14:54:03 +02004166int perf_event__process_tracing_data(struct perf_session *session,
4167 union perf_event *event)
Tom Zanussi92155452010-04-01 23:59:21 -05004168{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004169 ssize_t size_read, padding, size = event->tracing_data.size;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01004170 int fd = perf_data__fd(session->data);
Jiri Olsacc9784bd2013-10-15 16:27:34 +02004171 off_t offset = lseek(fd, 0, SEEK_CUR);
Tom Zanussi92155452010-04-01 23:59:21 -05004172 char buf[BUFSIZ];
4173
4174 /* setup for reading amidst mmap */
Jiri Olsa72932372019-08-28 15:57:16 +02004175 lseek(fd, offset + sizeof(struct perf_record_header_tracing_data),
Tom Zanussi92155452010-04-01 23:59:21 -05004176 SEEK_SET);
4177
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01004178 size_read = trace_report(fd, &session->tevent,
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03004179 session->repipe);
Irina Tirdea9ac3e482012-09-11 01:15:01 +03004180 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
Tom Zanussi92155452010-04-01 23:59:21 -05004181
Jiri Olsacc9784bd2013-10-15 16:27:34 +02004182 if (readn(fd, buf, padding) < 0) {
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03004183 pr_err("%s: reading input file", __func__);
4184 return -1;
4185 }
Tom Zanussi454c4072010-05-01 01:41:20 -05004186 if (session->repipe) {
4187 int retw = write(STDOUT_FILENO, buf, padding);
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03004188 if (retw <= 0 || retw != padding) {
4189 pr_err("%s: repiping tracing data padding", __func__);
4190 return -1;
4191 }
Tom Zanussi454c4072010-05-01 01:41:20 -05004192 }
Tom Zanussi92155452010-04-01 23:59:21 -05004193
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03004194 if (size_read + padding != size) {
4195 pr_err("%s: tracing data size mismatch", __func__);
4196 return -1;
4197 }
Tom Zanussi92155452010-04-01 23:59:21 -05004198
Namhyung Kim831394b2012-09-06 11:10:46 +09004199 perf_evlist__prepare_tracepoint_events(session->evlist,
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01004200 session->tevent.pevent);
Arnaldo Carvalho de Melo8b6ee4c2012-08-07 23:36:16 -03004201
Tom Zanussi92155452010-04-01 23:59:21 -05004202 return size_read + padding;
4203}
Tom Zanussic7929e42010-04-01 23:59:22 -05004204
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02004205int perf_event__synthesize_build_id(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02004206 struct dso *pos, u16 misc,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004207 perf_event__handler_t process,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02004208 struct machine *machine)
Tom Zanussic7929e42010-04-01 23:59:22 -05004209{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004210 union perf_event ev;
Tom Zanussic7929e42010-04-01 23:59:22 -05004211 size_t len;
4212 int err = 0;
4213
4214 if (!pos->hit)
4215 return err;
4216
4217 memset(&ev, 0, sizeof(ev));
4218
4219 len = pos->long_name_len + 1;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03004220 len = PERF_ALIGN(len, NAME_ALIGN);
Tom Zanussic7929e42010-04-01 23:59:22 -05004221 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
4222 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
4223 ev.build_id.header.misc = misc;
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -03004224 ev.build_id.pid = machine->pid;
Tom Zanussic7929e42010-04-01 23:59:22 -05004225 ev.build_id.header.size = sizeof(ev.build_id) + len;
4226 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
4227
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02004228 err = process(tool, &ev, NULL, machine);
Tom Zanussic7929e42010-04-01 23:59:22 -05004229
4230 return err;
4231}
4232
Jiri Olsa89f16882018-09-13 14:54:03 +02004233int perf_event__process_build_id(struct perf_session *session,
4234 union perf_event *event)
Tom Zanussic7929e42010-04-01 23:59:22 -05004235{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004236 __event_process_build_id(&event->build_id,
4237 event->build_id.filename,
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08004238 session);
Tom Zanussic7929e42010-04-01 23:59:22 -05004239 return 0;
4240}