blob: 0a842d9eff226f89aa2710f7dd712125f0869124 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -03002#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -03003#include <inttypes.h>
Arnaldo Carvalho de Meloa0675582017-04-17 16:51:59 -03004#include "string2.h"
Arnaldo Carvalho de Melo391e4202017-04-19 18:51:14 -03005#include <sys/param.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02006#include <sys/types.h>
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02007#include <byteswap.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02008#include <unistd.h>
9#include <stdio.h>
10#include <stdlib.h>
Arnaldo Carvalho de Melo03536312017-06-16 12:18:27 -030011#include <linux/compiler.h>
Frederic Weisbecker8671dab2009-11-11 04:51:03 +010012#include <linux/list.h>
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -020013#include <linux/kernel.h>
Robert Richterb1e5a9b2011-12-07 10:02:57 +010014#include <linux/bitops.h>
Arnaldo Carvalho de Melofc6a1722019-06-25 21:33:14 -030015#include <linux/string.h>
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -070016#include <linux/stringify.h>
Arnaldo Carvalho de Melo7f7c5362019-07-04 11:32:27 -030017#include <linux/zalloc.h>
Arnaldo Carvalho de Melo7a8ef4c2017-04-19 20:57:47 -030018#include <sys/stat.h>
Stephane Eranianfbe96f22011-09-30 15:40:40 +020019#include <sys/utsname.h>
Jin Yao60115182017-12-08 21:13:41 +080020#include <linux/time64.h>
Jiri Olsae2091ce2018-03-07 16:50:08 +010021#include <dirent.h>
Song Liu606f9722019-03-11 22:30:43 -070022#include <bpf/libbpf.h>
Jiri Olsa9c3516d2019-07-21 13:24:30 +020023#include <perf/cpumap.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020024
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020025#include "evlist.h"
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030026#include "evsel.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020027#include "header.h"
Arnaldo Carvalho de Melo98521b32017-04-25 15:45:35 -030028#include "memswap.h"
Frederic Weisbecker03456a12009-10-06 23:36:47 +020029#include "../perf.h"
30#include "trace-event.h"
Arnaldo Carvalho de Melo301a0b02009-12-13 19:50:25 -020031#include "session.h"
Frederic Weisbecker8671dab2009-11-11 04:51:03 +010032#include "symbol.h"
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +010033#include "debug.h"
Stephane Eranianfbe96f22011-09-30 15:40:40 +020034#include "cpumap.h"
Robert Richter50a96672012-08-16 21:10:24 +020035#include "pmu.h"
Jiri Olsa7dbf4dc2012-09-10 18:50:19 +020036#include "vdso.h"
Namhyung Kima1ae5652012-09-24 17:14:59 +090037#include "strbuf.h"
Jiri Olsaebb296c2012-10-27 23:18:28 +020038#include "build-id.h"
Jiri Olsacc9784bd2013-10-15 16:27:34 +020039#include "data.h"
Jiri Olsa720e98b2016-02-16 16:01:43 +010040#include <api/fs/fs.h>
41#include "asm/bug.h"
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -070042#include "tool.h"
Jin Yao60115182017-12-08 21:13:41 +080043#include "time-utils.h"
Jiri Olsae2091ce2018-03-07 16:50:08 +010044#include "units.h"
Arnaldo Carvalho de Melo2da39f12019-08-27 11:51:18 -030045#include "util.h"
Jiri Olsa5135d5e2019-02-19 10:58:13 +010046#include "cputopo.h"
Song Liu606f9722019-03-11 22:30:43 -070047#include "bpf-event.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020048
Arnaldo Carvalho de Melo3052ba52019-06-25 17:27:31 -030049#include <linux/ctype.h>
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -030050
Stephane Eranian73323f52012-02-02 13:54:44 +010051/*
52 * magic2 = "PERFILE2"
53 * must be a numerical value to let the endianness
54 * determine the memory layout. That way we are able
55 * to detect endianness when reading the perf.data file
56 * back.
57 *
58 * we check for legacy (PERFFILE) format.
59 */
60static const char *__perf_magic1 = "PERFFILE";
61static const u64 __perf_magic2 = 0x32454c4946524550ULL;
62static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020063
Stephane Eranian73323f52012-02-02 13:54:44 +010064#define PERF_MAGIC __perf_magic2
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020065
Soramichi AKIYAMAd25ed5d2017-01-17 00:22:37 +090066const char perf_version_string[] = PERF_VERSION;
67
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020068struct perf_file_attr {
Ingo Molnarcdd6c482009-09-21 12:02:48 +020069 struct perf_event_attr attr;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020070 struct perf_file_section ids;
71};
72
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -070073struct feat_fd {
74 struct perf_header *ph;
75 int fd;
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -070076 void *buf; /* Either buf != NULL or fd >= 0 */
David Carrillo-Cisneros62552452017-07-17 21:25:42 -070077 ssize_t offset;
78 size_t size;
Jiri Olsa32dcd022019-07-21 13:23:51 +020079 struct evsel *events;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -070080};
81
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030082void perf_header__set_feat(struct perf_header *header, int feat)
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020083{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030084 set_bit(feat, header->adds_features);
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020085}
86
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030087void perf_header__clear_feat(struct perf_header *header, int feat)
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -020088{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030089 clear_bit(feat, header->adds_features);
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -020090}
91
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030092bool perf_header__has_feat(const struct perf_header *header, int feat)
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020093{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030094 return test_bit(feat, header->adds_features);
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020095}
96
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -070097static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
98{
99 ssize_t ret = writen(ff->fd, buf, size);
100
101 if (ret != (ssize_t)size)
102 return ret < 0 ? (int)ret : -1;
103 return 0;
104}
105
106static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
107{
108 /* struct perf_event_header::size is u16 */
109 const size_t max_size = 0xffff - sizeof(struct perf_event_header);
110 size_t new_size = ff->size;
111 void *addr;
112
113 if (size + ff->offset > max_size)
114 return -E2BIG;
115
116 while (size > (new_size - ff->offset))
117 new_size <<= 1;
118 new_size = min(max_size, new_size);
119
120 if (ff->size < new_size) {
121 addr = realloc(ff->buf, new_size);
122 if (!addr)
123 return -ENOMEM;
124 ff->buf = addr;
125 ff->size = new_size;
126 }
127
128 memcpy(ff->buf + ff->offset, buf, size);
129 ff->offset += size;
130
131 return 0;
132}
133
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -0700134/* Return: 0 if succeded, -ERR if failed. */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700135int do_write(struct feat_fd *ff, const void *buf, size_t size)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200136{
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700137 if (!ff->buf)
138 return __do_write_fd(ff, buf, size);
139 return __do_write_buf(ff, buf, size);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200140}
141
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -0700142/* Return: 0 if succeded, -ERR if failed. */
Jiri Olsae2091ce2018-03-07 16:50:08 +0100143static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
144{
145 u64 *p = (u64 *) set;
146 int i, ret;
147
148 ret = do_write(ff, &size, sizeof(size));
149 if (ret < 0)
150 return ret;
151
152 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
153 ret = do_write(ff, p + i, sizeof(*p));
154 if (ret < 0)
155 return ret;
156 }
157
158 return 0;
159}
160
161/* Return: 0 if succeded, -ERR if failed. */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700162int write_padded(struct feat_fd *ff, const void *bf,
163 size_t count, size_t count_aligned)
Arnaldo Carvalho de Melof92cb242010-01-04 16:19:28 -0200164{
165 static const char zero_buf[NAME_ALIGN];
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700166 int err = do_write(ff, bf, count);
Arnaldo Carvalho de Melof92cb242010-01-04 16:19:28 -0200167
168 if (!err)
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700169 err = do_write(ff, zero_buf, count_aligned - count);
Arnaldo Carvalho de Melof92cb242010-01-04 16:19:28 -0200170
171 return err;
172}
173
Kan Liang2bb00d22015-09-01 09:58:12 -0400174#define string_size(str) \
175 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
176
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -0700177/* Return: 0 if succeded, -ERR if failed. */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700178static int do_write_string(struct feat_fd *ff, const char *str)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200179{
180 u32 len, olen;
181 int ret;
182
183 olen = strlen(str) + 1;
Irina Tirdea9ac3e482012-09-11 01:15:01 +0300184 len = PERF_ALIGN(olen, NAME_ALIGN);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200185
186 /* write len, incl. \0 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700187 ret = do_write(ff, &len, sizeof(len));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200188 if (ret < 0)
189 return ret;
190
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700191 return write_padded(ff, str, olen, len);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200192}
193
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700194static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700195{
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700196 ssize_t ret = readn(ff->fd, addr, size);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700197
198 if (ret != size)
199 return ret < 0 ? (int)ret : -1;
200 return 0;
201}
202
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700203static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
204{
205 if (size > (ssize_t)ff->size - ff->offset)
206 return -1;
207
208 memcpy(addr, ff->buf + ff->offset, size);
209 ff->offset += size;
210
211 return 0;
212
213}
214
215static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
216{
217 if (!ff->buf)
218 return __do_read_fd(ff, addr, size);
219 return __do_read_buf(ff, addr, size);
220}
221
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700222static int do_read_u32(struct feat_fd *ff, u32 *addr)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700223{
224 int ret;
225
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700226 ret = __do_read(ff, addr, sizeof(*addr));
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700227 if (ret)
228 return ret;
229
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700230 if (ff->ph->needs_swap)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700231 *addr = bswap_32(*addr);
232 return 0;
233}
234
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700235static int do_read_u64(struct feat_fd *ff, u64 *addr)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700236{
237 int ret;
238
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700239 ret = __do_read(ff, addr, sizeof(*addr));
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700240 if (ret)
241 return ret;
242
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700243 if (ff->ph->needs_swap)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700244 *addr = bswap_64(*addr);
245 return 0;
246}
247
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700248static char *do_read_string(struct feat_fd *ff)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200249{
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200250 u32 len;
251 char *buf;
252
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700253 if (do_read_u32(ff, &len))
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200254 return NULL;
255
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200256 buf = malloc(len);
257 if (!buf)
258 return NULL;
259
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700260 if (!__do_read(ff, buf, len)) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200261 /*
262 * strings are padded by zeroes
263 * thus the actual strlen of buf
264 * may be less than len
265 */
266 return buf;
267 }
268
269 free(buf);
270 return NULL;
271}
272
Jiri Olsae2091ce2018-03-07 16:50:08 +0100273/* Return: 0 if succeded, -ERR if failed. */
274static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
275{
276 unsigned long *set;
277 u64 size, *p;
278 int i, ret;
279
280 ret = do_read_u64(ff, &size);
281 if (ret)
282 return ret;
283
284 set = bitmap_alloc(size);
285 if (!set)
286 return -ENOMEM;
287
Jiri Olsae2091ce2018-03-07 16:50:08 +0100288 p = (u64 *) set;
289
290 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
291 ret = do_read_u64(ff, p + i);
292 if (ret < 0) {
293 free(set);
294 return ret;
295 }
296 }
297
298 *pset = set;
299 *psize = size;
300 return 0;
301}
302
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700303static int write_tracing_data(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200304 struct evlist *evlist)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200305{
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700306 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
307 return -1;
308
Jiri Olsace9036a2019-07-21 13:24:23 +0200309 return read_tracing_data(ff->fd, &evlist->core.entries);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200310}
311
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700312static int write_build_id(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200313 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200314{
315 struct perf_session *session;
316 int err;
317
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700318 session = container_of(ff->ph, struct perf_session, header);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200319
Robert Richtere20960c2011-12-07 10:02:55 +0100320 if (!perf_session__read_build_ids(session, true))
321 return -1;
322
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700323 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
324 return -1;
325
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700326 err = perf_session__write_buildid_table(session, ff);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200327 if (err < 0) {
328 pr_debug("failed to write buildid table\n");
329 return err;
330 }
Namhyung Kim73c5d222014-11-07 22:57:56 +0900331 perf_session__cache_build_ids(session);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200332
333 return 0;
334}
335
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700336static int write_hostname(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200337 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200338{
339 struct utsname uts;
340 int ret;
341
342 ret = uname(&uts);
343 if (ret < 0)
344 return -1;
345
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700346 return do_write_string(ff, uts.nodename);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200347}
348
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700349static int write_osrelease(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200350 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200351{
352 struct utsname uts;
353 int ret;
354
355 ret = uname(&uts);
356 if (ret < 0)
357 return -1;
358
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700359 return do_write_string(ff, uts.release);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200360}
361
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700362static int write_arch(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200363 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200364{
365 struct utsname uts;
366 int ret;
367
368 ret = uname(&uts);
369 if (ret < 0)
370 return -1;
371
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700372 return do_write_string(ff, uts.machine);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200373}
374
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700375static int write_version(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200376 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200377{
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700378 return do_write_string(ff, perf_version_string);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200379}
380
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700381static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200382{
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200383 FILE *file;
384 char *buf = NULL;
385 char *s, *p;
Wang Nan493c3032014-10-24 09:45:26 +0800386 const char *search = cpuinfo_proc;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200387 size_t len = 0;
388 int ret = -1;
389
390 if (!search)
391 return -1;
392
393 file = fopen("/proc/cpuinfo", "r");
394 if (!file)
395 return -1;
396
397 while (getline(&buf, &len, file) > 0) {
398 ret = strncmp(buf, search, strlen(search));
399 if (!ret)
400 break;
401 }
402
Wang Naned307752014-10-16 11:08:29 +0800403 if (ret) {
404 ret = -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200405 goto done;
Wang Naned307752014-10-16 11:08:29 +0800406 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200407
408 s = buf;
409
410 p = strchr(buf, ':');
411 if (p && *(p+1) == ' ' && *(p+2))
412 s = p + 2;
413 p = strchr(s, '\n');
414 if (p)
415 *p = '\0';
416
417 /* squash extra space characters (branding string) */
418 p = s;
419 while (*p) {
420 if (isspace(*p)) {
421 char *r = p + 1;
Arnaldo Carvalho de Melofc6a1722019-06-25 21:33:14 -0300422 char *q = skip_spaces(r);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200423 *p = ' ';
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200424 if (q != (p+1))
425 while ((*r++ = *q++));
426 }
427 p++;
428 }
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700429 ret = do_write_string(ff, s);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200430done:
431 free(buf);
432 fclose(file);
433 return ret;
434}
435
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700436static int write_cpudesc(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200437 struct evlist *evlist __maybe_unused)
Wang Nan493c3032014-10-24 09:45:26 +0800438{
Wang Nan493c3032014-10-24 09:45:26 +0800439 const char *cpuinfo_procs[] = CPUINFO_PROC;
440 unsigned int i;
441
442 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
443 int ret;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700444 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
Wang Nan493c3032014-10-24 09:45:26 +0800445 if (ret >= 0)
446 return ret;
447 }
448 return -1;
449}
450
451
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700452static int write_nrcpus(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200453 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200454{
455 long nr;
456 u32 nrc, nra;
457 int ret;
458
Jan Stancekda8a58b2017-02-17 12:10:26 +0100459 nrc = cpu__max_present_cpu();
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200460
461 nr = sysconf(_SC_NPROCESSORS_ONLN);
462 if (nr < 0)
463 return -1;
464
465 nra = (u32)(nr & UINT_MAX);
466
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700467 ret = do_write(ff, &nrc, sizeof(nrc));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200468 if (ret < 0)
469 return ret;
470
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700471 return do_write(ff, &nra, sizeof(nra));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200472}
473
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700474static int write_event_desc(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200475 struct evlist *evlist)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200476{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200477 struct evsel *evsel;
Namhyung Kim74ba9e12012-09-05 14:02:47 +0900478 u32 nre, nri, sz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200479 int ret;
480
Jiri Olsa6484d2f2019-07-21 13:24:28 +0200481 nre = evlist->core.nr_entries;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200482
483 /*
484 * write number of events
485 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700486 ret = do_write(ff, &nre, sizeof(nre));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200487 if (ret < 0)
488 return ret;
489
490 /*
491 * size of perf_event_attr struct
492 */
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200493 sz = (u32)sizeof(evsel->core.attr);
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700494 ret = do_write(ff, &sz, sizeof(sz));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200495 if (ret < 0)
496 return ret;
497
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300498 evlist__for_each_entry(evlist, evsel) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200499 ret = do_write(ff, &evsel->core.attr, sz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200500 if (ret < 0)
501 return ret;
502 /*
503 * write number of unique id per event
504 * there is one id per instance of an event
505 *
506 * copy into an nri to be independent of the
507 * type of ids,
508 */
Robert Richter6606f872012-08-16 21:10:19 +0200509 nri = evsel->ids;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700510 ret = do_write(ff, &nri, sizeof(nri));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200511 if (ret < 0)
512 return ret;
513
514 /*
515 * write event string as passed on cmdline
516 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700517 ret = do_write_string(ff, perf_evsel__name(evsel));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200518 if (ret < 0)
519 return ret;
520 /*
521 * write unique ids for this event
522 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700523 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200524 if (ret < 0)
525 return ret;
526 }
527 return 0;
528}
529
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700530static int write_cmdline(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200531 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200532{
Andi Kleen94816ad2019-02-24 07:37:19 -0800533 char pbuf[MAXPATHLEN], *buf;
534 int i, ret, n;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200535
Tommi Rantala55f771282017-03-22 15:06:24 +0200536 /* actual path to perf binary */
Andi Kleen94816ad2019-02-24 07:37:19 -0800537 buf = perf_exe(pbuf, MAXPATHLEN);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200538
539 /* account for binary path */
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300540 n = perf_env.nr_cmdline + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200541
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700542 ret = do_write(ff, &n, sizeof(n));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200543 if (ret < 0)
544 return ret;
545
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700546 ret = do_write_string(ff, buf);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200547 if (ret < 0)
548 return ret;
549
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300550 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700551 ret = do_write_string(ff, perf_env.cmdline_argv[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200552 if (ret < 0)
553 return ret;
554 }
555 return 0;
556}
557
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200558
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700559static int write_cpu_topology(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200560 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200561{
Jiri Olsa5135d5e2019-02-19 10:58:13 +0100562 struct cpu_topology *tp;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200563 u32 i;
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300564 int ret, j;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200565
Jiri Olsa5135d5e2019-02-19 10:58:13 +0100566 tp = cpu_topology__new();
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200567 if (!tp)
568 return -1;
569
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700570 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200571 if (ret < 0)
572 goto done;
573
574 for (i = 0; i < tp->core_sib; i++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700575 ret = do_write_string(ff, tp->core_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200576 if (ret < 0)
577 goto done;
578 }
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700579 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200580 if (ret < 0)
581 goto done;
582
583 for (i = 0; i < tp->thread_sib; i++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700584 ret = do_write_string(ff, tp->thread_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200585 if (ret < 0)
586 break;
587 }
Kan Liang2bb00d22015-09-01 09:58:12 -0400588
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300589 ret = perf_env__read_cpu_topology_map(&perf_env);
590 if (ret < 0)
591 goto done;
592
593 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700594 ret = do_write(ff, &perf_env.cpu[j].core_id,
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300595 sizeof(perf_env.cpu[j].core_id));
Kan Liang2bb00d22015-09-01 09:58:12 -0400596 if (ret < 0)
597 return ret;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700598 ret = do_write(ff, &perf_env.cpu[j].socket_id,
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300599 sizeof(perf_env.cpu[j].socket_id));
Kan Liang2bb00d22015-09-01 09:58:12 -0400600 if (ret < 0)
601 return ret;
602 }
Kan Liangacae8b32019-06-04 15:50:41 -0700603
604 if (!tp->die_sib)
605 goto done;
606
607 ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib));
608 if (ret < 0)
609 goto done;
610
611 for (i = 0; i < tp->die_sib; i++) {
612 ret = do_write_string(ff, tp->die_siblings[i]);
613 if (ret < 0)
614 goto done;
615 }
616
617 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
618 ret = do_write(ff, &perf_env.cpu[j].die_id,
619 sizeof(perf_env.cpu[j].die_id));
620 if (ret < 0)
621 return ret;
622 }
623
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200624done:
Jiri Olsa5135d5e2019-02-19 10:58:13 +0100625 cpu_topology__delete(tp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200626 return ret;
627}
628
629
630
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700631static int write_total_mem(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200632 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200633{
634 char *buf = NULL;
635 FILE *fp;
636 size_t len = 0;
637 int ret = -1, n;
638 uint64_t mem;
639
640 fp = fopen("/proc/meminfo", "r");
641 if (!fp)
642 return -1;
643
644 while (getline(&buf, &len, fp) > 0) {
645 ret = strncmp(buf, "MemTotal:", 9);
646 if (!ret)
647 break;
648 }
649 if (!ret) {
650 n = sscanf(buf, "%*s %"PRIu64, &mem);
651 if (n == 1)
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700652 ret = do_write(ff, &mem, sizeof(mem));
Wang Naned307752014-10-16 11:08:29 +0800653 } else
654 ret = -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200655 free(buf);
656 fclose(fp);
657 return ret;
658}
659
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700660static int write_numa_topology(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200661 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200662{
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100663 struct numa_topology *tp;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200664 int ret = -1;
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100665 u32 i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200666
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100667 tp = numa_topology__new();
668 if (!tp)
669 return -ENOMEM;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200670
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100671 ret = do_write(ff, &tp->nr, sizeof(u32));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200672 if (ret < 0)
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100673 goto err;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200674
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100675 for (i = 0; i < tp->nr; i++) {
676 struct numa_topology_node *n = &tp->nodes[i];
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200677
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100678 ret = do_write(ff, &n->node, sizeof(u32));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200679 if (ret < 0)
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100680 goto err;
681
682 ret = do_write(ff, &n->mem_total, sizeof(u64));
683 if (ret)
684 goto err;
685
686 ret = do_write(ff, &n->mem_free, sizeof(u64));
687 if (ret)
688 goto err;
689
690 ret = do_write_string(ff, n->cpus);
691 if (ret < 0)
692 goto err;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200693 }
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100694
695 ret = 0;
696
697err:
698 numa_topology__delete(tp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200699 return ret;
700}
701
702/*
Robert Richter50a96672012-08-16 21:10:24 +0200703 * File format:
704 *
705 * struct pmu_mappings {
706 * u32 pmu_num;
707 * struct pmu_map {
708 * u32 type;
709 * char name[];
710 * }[pmu_num];
711 * };
712 */
713
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700714static int write_pmu_mappings(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200715 struct evlist *evlist __maybe_unused)
Robert Richter50a96672012-08-16 21:10:24 +0200716{
717 struct perf_pmu *pmu = NULL;
David Carrillo-Cisnerosa02c3952017-07-17 21:25:44 -0700718 u32 pmu_num = 0;
Namhyung Kim5323f602012-12-17 15:38:54 +0900719 int ret;
Robert Richter50a96672012-08-16 21:10:24 +0200720
David Carrillo-Cisnerosa02c3952017-07-17 21:25:44 -0700721 /*
722 * Do a first pass to count number of pmu to avoid lseek so this
723 * works in pipe mode as well.
724 */
725 while ((pmu = perf_pmu__scan(pmu))) {
726 if (!pmu->name)
727 continue;
728 pmu_num++;
729 }
730
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700731 ret = do_write(ff, &pmu_num, sizeof(pmu_num));
Namhyung Kim5323f602012-12-17 15:38:54 +0900732 if (ret < 0)
733 return ret;
Robert Richter50a96672012-08-16 21:10:24 +0200734
735 while ((pmu = perf_pmu__scan(pmu))) {
736 if (!pmu->name)
737 continue;
Namhyung Kim5323f602012-12-17 15:38:54 +0900738
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700739 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
Namhyung Kim5323f602012-12-17 15:38:54 +0900740 if (ret < 0)
741 return ret;
742
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700743 ret = do_write_string(ff, pmu->name);
Namhyung Kim5323f602012-12-17 15:38:54 +0900744 if (ret < 0)
745 return ret;
Robert Richter50a96672012-08-16 21:10:24 +0200746 }
747
Robert Richter50a96672012-08-16 21:10:24 +0200748 return 0;
749}
750
751/*
Namhyung Kima8bb5592013-01-22 18:09:31 +0900752 * File format:
753 *
754 * struct group_descs {
755 * u32 nr_groups;
756 * struct group_desc {
757 * char name[];
758 * u32 leader_idx;
759 * u32 nr_members;
760 * }[nr_groups];
761 * };
762 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700763static int write_group_desc(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200764 struct evlist *evlist)
Namhyung Kima8bb5592013-01-22 18:09:31 +0900765{
766 u32 nr_groups = evlist->nr_groups;
Jiri Olsa32dcd022019-07-21 13:23:51 +0200767 struct evsel *evsel;
Namhyung Kima8bb5592013-01-22 18:09:31 +0900768 int ret;
769
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700770 ret = do_write(ff, &nr_groups, sizeof(nr_groups));
Namhyung Kima8bb5592013-01-22 18:09:31 +0900771 if (ret < 0)
772 return ret;
773
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300774 evlist__for_each_entry(evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +0900775 if (perf_evsel__is_group_leader(evsel) &&
Jiri Olsa5643b1a2019-07-21 13:24:46 +0200776 evsel->core.nr_members > 1) {
Namhyung Kima8bb5592013-01-22 18:09:31 +0900777 const char *name = evsel->group_name ?: "{anon_group}";
778 u32 leader_idx = evsel->idx;
Jiri Olsa5643b1a2019-07-21 13:24:46 +0200779 u32 nr_members = evsel->core.nr_members;
Namhyung Kima8bb5592013-01-22 18:09:31 +0900780
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700781 ret = do_write_string(ff, name);
Namhyung Kima8bb5592013-01-22 18:09:31 +0900782 if (ret < 0)
783 return ret;
784
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700785 ret = do_write(ff, &leader_idx, sizeof(leader_idx));
Namhyung Kima8bb5592013-01-22 18:09:31 +0900786 if (ret < 0)
787 return ret;
788
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700789 ret = do_write(ff, &nr_members, sizeof(nr_members));
Namhyung Kima8bb5592013-01-22 18:09:31 +0900790 if (ret < 0)
791 return ret;
792 }
793 }
794 return 0;
795}
796
797/*
Kan Liangf4a07422018-11-21 08:49:39 -0800798 * Return the CPU id as a raw string.
799 *
800 * Each architecture should provide a more precise id string that
801 * can be use to match the architecture's "mapfile".
802 */
803char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
804{
805 return NULL;
806}
807
808/* Return zero when the cpuid from the mapfile.csv matches the
809 * cpuid string generated on this platform.
810 * Otherwise return non-zero.
811 */
812int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
813{
814 regex_t re;
815 regmatch_t pmatch[1];
816 int match;
817
818 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
819 /* Warn unable to generate match particular string. */
820 pr_info("Invalid regular expression %s\n", mapcpuid);
821 return 1;
822 }
823
824 match = !regexec(&re, cpuid, 1, pmatch, 0);
825 regfree(&re);
826 if (match) {
827 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
828
829 /* Verify the entire string matched. */
830 if (match_len == strlen(cpuid))
831 return 0;
832 }
833 return 1;
834}
835
836/*
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200837 * default get_cpuid(): nothing gets recorded
Jiada Wang7a759cd2017-04-09 20:02:37 -0700838 * actual implementation must be in arch/$(SRCARCH)/util/header.c
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200839 */
Rui Teng11d8f872016-07-28 10:05:57 +0800840int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200841{
842 return -1;
843}
844
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700845static int write_cpuid(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200846 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200847{
848 char buffer[64];
849 int ret;
850
851 ret = get_cpuid(buffer, sizeof(buffer));
Jiri Olsaa9aeb872019-02-13 13:32:43 +0100852 if (ret)
853 return -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200854
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700855 return do_write_string(ff, buffer);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200856}
857
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700858static int write_branch_stack(struct feat_fd *ff __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +0200859 struct evlist *evlist __maybe_unused)
Stephane Eranian330aa672012-03-08 23:47:46 +0100860{
861 return 0;
862}
863
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700864static int write_auxtrace(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200865 struct evlist *evlist __maybe_unused)
Adrian Hunter4025ea42015-04-09 18:53:41 +0300866{
Adrian Hunter99fa2982015-04-30 17:37:25 +0300867 struct perf_session *session;
868 int err;
869
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700870 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
871 return -1;
872
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700873 session = container_of(ff->ph, struct perf_session, header);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300874
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700875 err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300876 if (err < 0)
877 pr_err("Failed to write auxtrace index\n");
878 return err;
Adrian Hunter4025ea42015-04-09 18:53:41 +0300879}
880
Alexey Budankovcf790512018-10-09 17:36:24 +0300881static int write_clockid(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200882 struct evlist *evlist __maybe_unused)
Alexey Budankovcf790512018-10-09 17:36:24 +0300883{
884 return do_write(ff, &ff->ph->env.clockid_res_ns,
885 sizeof(ff->ph->env.clockid_res_ns));
886}
887
Jiri Olsa258031c2019-03-08 14:47:39 +0100888static int write_dir_format(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200889 struct evlist *evlist __maybe_unused)
Jiri Olsa258031c2019-03-08 14:47:39 +0100890{
891 struct perf_session *session;
892 struct perf_data *data;
893
894 session = container_of(ff->ph, struct perf_session, header);
895 data = session->data;
896
897 if (WARN_ON(!perf_data__is_dir(data)))
898 return -1;
899
900 return do_write(ff, &data->dir.version, sizeof(data->dir.version));
901}
902
Song Liu606f9722019-03-11 22:30:43 -0700903#ifdef HAVE_LIBBPF_SUPPORT
904static int write_bpf_prog_info(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200905 struct evlist *evlist __maybe_unused)
Song Liu606f9722019-03-11 22:30:43 -0700906{
907 struct perf_env *env = &ff->ph->env;
908 struct rb_root *root;
909 struct rb_node *next;
910 int ret;
911
912 down_read(&env->bpf_progs.lock);
913
914 ret = do_write(ff, &env->bpf_progs.infos_cnt,
915 sizeof(env->bpf_progs.infos_cnt));
916 if (ret < 0)
917 goto out;
918
919 root = &env->bpf_progs.infos;
920 next = rb_first(root);
921 while (next) {
922 struct bpf_prog_info_node *node;
923 size_t len;
924
925 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
926 next = rb_next(&node->rb_node);
927 len = sizeof(struct bpf_prog_info_linear) +
928 node->info_linear->data_len;
929
930 /* before writing to file, translate address to offset */
931 bpf_program__bpil_addr_to_offs(node->info_linear);
932 ret = do_write(ff, node->info_linear, len);
933 /*
934 * translate back to address even when do_write() fails,
935 * so that this function never changes the data.
936 */
937 bpf_program__bpil_offs_to_addr(node->info_linear);
938 if (ret < 0)
939 goto out;
940 }
941out:
942 up_read(&env->bpf_progs.lock);
943 return ret;
944}
945#else // HAVE_LIBBPF_SUPPORT
946static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +0200947 struct evlist *evlist __maybe_unused)
Song Liu606f9722019-03-11 22:30:43 -0700948{
949 return 0;
950}
951#endif // HAVE_LIBBPF_SUPPORT
952
Song Liua70a112312019-03-11 22:30:45 -0700953static int write_bpf_btf(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200954 struct evlist *evlist __maybe_unused)
Song Liua70a112312019-03-11 22:30:45 -0700955{
956 struct perf_env *env = &ff->ph->env;
957 struct rb_root *root;
958 struct rb_node *next;
959 int ret;
960
961 down_read(&env->bpf_progs.lock);
962
963 ret = do_write(ff, &env->bpf_progs.btfs_cnt,
964 sizeof(env->bpf_progs.btfs_cnt));
965
966 if (ret < 0)
967 goto out;
968
969 root = &env->bpf_progs.btfs;
970 next = rb_first(root);
971 while (next) {
972 struct btf_node *node;
973
974 node = rb_entry(next, struct btf_node, rb_node);
975 next = rb_next(&node->rb_node);
976 ret = do_write(ff, &node->id,
977 sizeof(u32) * 2 + node->data_size);
978 if (ret < 0)
979 goto out;
980 }
981out:
982 up_read(&env->bpf_progs.lock);
983 return ret;
984}
985
Jiri Olsa720e98b2016-02-16 16:01:43 +0100986static int cpu_cache_level__sort(const void *a, const void *b)
987{
988 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
989 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
990
991 return cache_a->level - cache_b->level;
992}
993
994static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
995{
996 if (a->level != b->level)
997 return false;
998
999 if (a->line_size != b->line_size)
1000 return false;
1001
1002 if (a->sets != b->sets)
1003 return false;
1004
1005 if (a->ways != b->ways)
1006 return false;
1007
1008 if (strcmp(a->type, b->type))
1009 return false;
1010
1011 if (strcmp(a->size, b->size))
1012 return false;
1013
1014 if (strcmp(a->map, b->map))
1015 return false;
1016
1017 return true;
1018}
1019
1020static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1021{
1022 char path[PATH_MAX], file[PATH_MAX];
1023 struct stat st;
1024 size_t len;
1025
1026 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1027 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1028
1029 if (stat(file, &st))
1030 return 1;
1031
1032 scnprintf(file, PATH_MAX, "%s/level", path);
1033 if (sysfs__read_int(file, (int *) &cache->level))
1034 return -1;
1035
1036 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1037 if (sysfs__read_int(file, (int *) &cache->line_size))
1038 return -1;
1039
1040 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1041 if (sysfs__read_int(file, (int *) &cache->sets))
1042 return -1;
1043
1044 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1045 if (sysfs__read_int(file, (int *) &cache->ways))
1046 return -1;
1047
1048 scnprintf(file, PATH_MAX, "%s/type", path);
1049 if (sysfs__read_str(file, &cache->type, &len))
1050 return -1;
1051
1052 cache->type[len] = 0;
Arnaldo Carvalho de Melo13c230a2019-06-26 12:13:13 -03001053 cache->type = strim(cache->type);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001054
1055 scnprintf(file, PATH_MAX, "%s/size", path);
1056 if (sysfs__read_str(file, &cache->size, &len)) {
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001057 zfree(&cache->type);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001058 return -1;
1059 }
1060
1061 cache->size[len] = 0;
Arnaldo Carvalho de Melo13c230a2019-06-26 12:13:13 -03001062 cache->size = strim(cache->size);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001063
1064 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1065 if (sysfs__read_str(file, &cache->map, &len)) {
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001066 zfree(&cache->map);
1067 zfree(&cache->type);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001068 return -1;
1069 }
1070
1071 cache->map[len] = 0;
Arnaldo Carvalho de Melo13c230a2019-06-26 12:13:13 -03001072 cache->map = strim(cache->map);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001073 return 0;
1074}
1075
1076static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1077{
1078 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1079}
1080
1081static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
1082{
1083 u32 i, cnt = 0;
1084 long ncpus;
1085 u32 nr, cpu;
1086 u16 level;
1087
1088 ncpus = sysconf(_SC_NPROCESSORS_CONF);
1089 if (ncpus < 0)
1090 return -1;
1091
1092 nr = (u32)(ncpus & UINT_MAX);
1093
1094 for (cpu = 0; cpu < nr; cpu++) {
1095 for (level = 0; level < 10; level++) {
1096 struct cpu_cache_level c;
1097 int err;
1098
1099 err = cpu_cache_level__read(&c, cpu, level);
1100 if (err < 0)
1101 return err;
1102
1103 if (err == 1)
1104 break;
1105
1106 for (i = 0; i < cnt; i++) {
1107 if (cpu_cache_level__cmp(&c, &caches[i]))
1108 break;
1109 }
1110
1111 if (i == cnt)
1112 caches[cnt++] = c;
1113 else
1114 cpu_cache_level__free(&c);
1115
1116 if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1117 goto out;
1118 }
1119 }
1120 out:
1121 *cntp = cnt;
1122 return 0;
1123}
1124
Kyle Meyer9f94c7f2019-06-20 14:36:30 -05001125#define MAX_CACHES (MAX_NR_CPUS * 4)
Jiri Olsa720e98b2016-02-16 16:01:43 +01001126
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001127static int write_cache(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +02001128 struct evlist *evlist __maybe_unused)
Jiri Olsa720e98b2016-02-16 16:01:43 +01001129{
1130 struct cpu_cache_level caches[MAX_CACHES];
1131 u32 cnt = 0, i, version = 1;
1132 int ret;
1133
1134 ret = build_caches(caches, MAX_CACHES, &cnt);
1135 if (ret)
1136 goto out;
1137
1138 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1139
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001140 ret = do_write(ff, &version, sizeof(u32));
Jiri Olsa720e98b2016-02-16 16:01:43 +01001141 if (ret < 0)
1142 goto out;
1143
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001144 ret = do_write(ff, &cnt, sizeof(u32));
Jiri Olsa720e98b2016-02-16 16:01:43 +01001145 if (ret < 0)
1146 goto out;
1147
1148 for (i = 0; i < cnt; i++) {
1149 struct cpu_cache_level *c = &caches[i];
1150
1151 #define _W(v) \
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001152 ret = do_write(ff, &c->v, sizeof(u32)); \
Jiri Olsa720e98b2016-02-16 16:01:43 +01001153 if (ret < 0) \
1154 goto out;
1155
1156 _W(level)
1157 _W(line_size)
1158 _W(sets)
1159 _W(ways)
1160 #undef _W
1161
1162 #define _W(v) \
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001163 ret = do_write_string(ff, (const char *) c->v); \
Jiri Olsa720e98b2016-02-16 16:01:43 +01001164 if (ret < 0) \
1165 goto out;
1166
1167 _W(type)
1168 _W(size)
1169 _W(map)
1170 #undef _W
1171 }
1172
1173out:
1174 for (i = 0; i < cnt; i++)
1175 cpu_cache_level__free(&caches[i]);
1176 return ret;
1177}
1178
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001179static int write_stat(struct feat_fd *ff __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +02001180 struct evlist *evlist __maybe_unused)
Jiri Olsaffa517a2015-10-25 15:51:43 +01001181{
1182 return 0;
1183}
1184
Jin Yao60115182017-12-08 21:13:41 +08001185static int write_sample_time(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +02001186 struct evlist *evlist)
Jin Yao60115182017-12-08 21:13:41 +08001187{
1188 int ret;
1189
1190 ret = do_write(ff, &evlist->first_sample_time,
1191 sizeof(evlist->first_sample_time));
1192 if (ret < 0)
1193 return ret;
1194
1195 return do_write(ff, &evlist->last_sample_time,
1196 sizeof(evlist->last_sample_time));
1197}
1198
Jiri Olsae2091ce2018-03-07 16:50:08 +01001199
1200static int memory_node__read(struct memory_node *n, unsigned long idx)
1201{
1202 unsigned int phys, size = 0;
1203 char path[PATH_MAX];
1204 struct dirent *ent;
1205 DIR *dir;
1206
1207#define for_each_memory(mem, dir) \
1208 while ((ent = readdir(dir))) \
1209 if (strcmp(ent->d_name, ".") && \
1210 strcmp(ent->d_name, "..") && \
1211 sscanf(ent->d_name, "memory%u", &mem) == 1)
1212
1213 scnprintf(path, PATH_MAX,
1214 "%s/devices/system/node/node%lu",
1215 sysfs__mountpoint(), idx);
1216
1217 dir = opendir(path);
1218 if (!dir) {
1219 pr_warning("failed: cant' open memory sysfs data\n");
1220 return -1;
1221 }
1222
1223 for_each_memory(phys, dir) {
1224 size = max(phys, size);
1225 }
1226
1227 size++;
1228
1229 n->set = bitmap_alloc(size);
1230 if (!n->set) {
1231 closedir(dir);
1232 return -ENOMEM;
1233 }
1234
Jiri Olsae2091ce2018-03-07 16:50:08 +01001235 n->node = idx;
1236 n->size = size;
1237
1238 rewinddir(dir);
1239
1240 for_each_memory(phys, dir) {
1241 set_bit(phys, n->set);
1242 }
1243
1244 closedir(dir);
1245 return 0;
1246}
1247
1248static int memory_node__sort(const void *a, const void *b)
1249{
1250 const struct memory_node *na = a;
1251 const struct memory_node *nb = b;
1252
1253 return na->node - nb->node;
1254}
1255
1256static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
1257{
1258 char path[PATH_MAX];
1259 struct dirent *ent;
1260 DIR *dir;
1261 u64 cnt = 0;
1262 int ret = 0;
1263
1264 scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1265 sysfs__mountpoint());
1266
1267 dir = opendir(path);
1268 if (!dir) {
Thomas Richter4f75f1cb2018-04-12 15:32:46 +02001269 pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1270 __func__, path);
Jiri Olsae2091ce2018-03-07 16:50:08 +01001271 return -1;
1272 }
1273
1274 while (!ret && (ent = readdir(dir))) {
1275 unsigned int idx;
1276 int r;
1277
1278 if (!strcmp(ent->d_name, ".") ||
1279 !strcmp(ent->d_name, ".."))
1280 continue;
1281
1282 r = sscanf(ent->d_name, "node%u", &idx);
1283 if (r != 1)
1284 continue;
1285
1286 if (WARN_ONCE(cnt >= size,
1287 "failed to write MEM_TOPOLOGY, way too many nodes\n"))
1288 return -1;
1289
1290 ret = memory_node__read(&nodes[cnt++], idx);
1291 }
1292
1293 *cntp = cnt;
1294 closedir(dir);
1295
1296 if (!ret)
1297 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1298
1299 return ret;
1300}
1301
1302#define MAX_MEMORY_NODES 2000
1303
1304/*
1305 * The MEM_TOPOLOGY holds physical memory map for every
1306 * node in system. The format of data is as follows:
1307 *
1308 * 0 - version | for future changes
1309 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1310 * 16 - count | number of nodes
1311 *
1312 * For each node we store map of physical indexes for
1313 * each node:
1314 *
1315 * 32 - node id | node index
1316 * 40 - size | size of bitmap
1317 * 48 - bitmap | bitmap of memory indexes that belongs to node
1318 */
1319static int write_mem_topology(struct feat_fd *ff __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +02001320 struct evlist *evlist __maybe_unused)
Jiri Olsae2091ce2018-03-07 16:50:08 +01001321{
1322 static struct memory_node nodes[MAX_MEMORY_NODES];
1323 u64 bsize, version = 1, i, nr;
1324 int ret;
1325
1326 ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1327 (unsigned long long *) &bsize);
1328 if (ret)
1329 return ret;
1330
1331 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
1332 if (ret)
1333 return ret;
1334
1335 ret = do_write(ff, &version, sizeof(version));
1336 if (ret < 0)
1337 goto out;
1338
1339 ret = do_write(ff, &bsize, sizeof(bsize));
1340 if (ret < 0)
1341 goto out;
1342
1343 ret = do_write(ff, &nr, sizeof(nr));
1344 if (ret < 0)
1345 goto out;
1346
1347 for (i = 0; i < nr; i++) {
1348 struct memory_node *n = &nodes[i];
1349
1350 #define _W(v) \
1351 ret = do_write(ff, &n->v, sizeof(n->v)); \
1352 if (ret < 0) \
1353 goto out;
1354
1355 _W(node)
1356 _W(size)
1357
1358 #undef _W
1359
1360 ret = do_write_bitmap(ff, n->set, n->size);
1361 if (ret < 0)
1362 goto out;
1363 }
1364
1365out:
1366 return ret;
1367}
1368
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001369static int write_compressed(struct feat_fd *ff __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +02001370 struct evlist *evlist __maybe_unused)
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001371{
1372 int ret;
1373
1374 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
1375 if (ret)
1376 return ret;
1377
1378 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
1379 if (ret)
1380 return ret;
1381
1382 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
1383 if (ret)
1384 return ret;
1385
1386 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
1387 if (ret)
1388 return ret;
1389
1390 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
1391}
1392
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001393static void print_hostname(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001394{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001395 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001396}
1397
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001398static void print_osrelease(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001399{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001400 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001401}
1402
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001403static void print_arch(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001404{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001405 fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001406}
1407
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001408static void print_cpudesc(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001409{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001410 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001411}
1412
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001413static void print_nrcpus(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001414{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001415 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1416 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001417}
1418
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001419static void print_version(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001420{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001421 fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001422}
1423
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001424static void print_cmdline(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001425{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001426 int nr, i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001427
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001428 nr = ff->ph->env.nr_cmdline;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001429
1430 fprintf(fp, "# cmdline : ");
1431
Alexey Budankovf92da712018-06-04 09:50:56 +03001432 for (i = 0; i < nr; i++) {
1433 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1434 if (!argv_i) {
1435 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1436 } else {
1437 char *mem = argv_i;
1438 do {
1439 char *quote = strchr(argv_i, '\'');
1440 if (!quote)
1441 break;
1442 *quote++ = '\0';
1443 fprintf(fp, "%s\\\'", argv_i);
1444 argv_i = quote;
1445 } while (1);
1446 fprintf(fp, "%s ", argv_i);
1447 free(mem);
1448 }
1449 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001450 fputc('\n', fp);
1451}
1452
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001453static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001454{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001455 struct perf_header *ph = ff->ph;
1456 int cpu_nr = ph->env.nr_cpus_avail;
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001457 int nr, i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001458 char *str;
1459
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001460 nr = ph->env.nr_sibling_cores;
1461 str = ph->env.sibling_cores;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001462
1463 for (i = 0; i < nr; i++) {
Kan Liange05a8992019-06-04 15:50:43 -07001464 fprintf(fp, "# sibling sockets : %s\n", str);
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001465 str += strlen(str) + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001466 }
1467
Kan Liangacae8b32019-06-04 15:50:41 -07001468 if (ph->env.nr_sibling_dies) {
1469 nr = ph->env.nr_sibling_dies;
1470 str = ph->env.sibling_dies;
1471
1472 for (i = 0; i < nr; i++) {
1473 fprintf(fp, "# sibling dies : %s\n", str);
1474 str += strlen(str) + 1;
1475 }
1476 }
1477
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001478 nr = ph->env.nr_sibling_threads;
1479 str = ph->env.sibling_threads;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001480
1481 for (i = 0; i < nr; i++) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001482 fprintf(fp, "# sibling threads : %s\n", str);
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001483 str += strlen(str) + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001484 }
Kan Liang2bb00d22015-09-01 09:58:12 -04001485
Kan Liangacae8b32019-06-04 15:50:41 -07001486 if (ph->env.nr_sibling_dies) {
1487 if (ph->env.cpu != NULL) {
1488 for (i = 0; i < cpu_nr; i++)
1489 fprintf(fp, "# CPU %d: Core ID %d, "
1490 "Die ID %d, Socket ID %d\n",
1491 i, ph->env.cpu[i].core_id,
1492 ph->env.cpu[i].die_id,
1493 ph->env.cpu[i].socket_id);
1494 } else
1495 fprintf(fp, "# Core ID, Die ID and Socket ID "
1496 "information is not available\n");
1497 } else {
1498 if (ph->env.cpu != NULL) {
1499 for (i = 0; i < cpu_nr; i++)
1500 fprintf(fp, "# CPU %d: Core ID %d, "
1501 "Socket ID %d\n",
1502 i, ph->env.cpu[i].core_id,
1503 ph->env.cpu[i].socket_id);
1504 } else
1505 fprintf(fp, "# Core ID and Socket ID "
1506 "information is not available\n");
1507 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001508}
1509
Alexey Budankovcf790512018-10-09 17:36:24 +03001510static void print_clockid(struct feat_fd *ff, FILE *fp)
1511{
1512 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1513 ff->ph->env.clockid_res_ns * 1000);
1514}
1515
Jiri Olsa258031c2019-03-08 14:47:39 +01001516static void print_dir_format(struct feat_fd *ff, FILE *fp)
1517{
1518 struct perf_session *session;
1519 struct perf_data *data;
1520
1521 session = container_of(ff->ph, struct perf_session, header);
1522 data = session->data;
1523
1524 fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
1525}
1526
Song Liu606f9722019-03-11 22:30:43 -07001527static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
1528{
1529 struct perf_env *env = &ff->ph->env;
1530 struct rb_root *root;
1531 struct rb_node *next;
1532
1533 down_read(&env->bpf_progs.lock);
1534
1535 root = &env->bpf_progs.infos;
1536 next = rb_first(root);
1537
1538 while (next) {
1539 struct bpf_prog_info_node *node;
1540
1541 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1542 next = rb_next(&node->rb_node);
Song Liuf8dfeae2019-03-19 09:54:54 -07001543
1544 bpf_event__print_bpf_prog_info(&node->info_linear->info,
1545 env, fp);
Song Liu606f9722019-03-11 22:30:43 -07001546 }
1547
1548 up_read(&env->bpf_progs.lock);
1549}
1550
Song Liua70a112312019-03-11 22:30:45 -07001551static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
1552{
1553 struct perf_env *env = &ff->ph->env;
1554 struct rb_root *root;
1555 struct rb_node *next;
1556
1557 down_read(&env->bpf_progs.lock);
1558
1559 root = &env->bpf_progs.btfs;
1560 next = rb_first(root);
1561
1562 while (next) {
1563 struct btf_node *node;
1564
1565 node = rb_entry(next, struct btf_node, rb_node);
1566 next = rb_next(&node->rb_node);
1567 fprintf(fp, "# btf info of id %u\n", node->id);
1568 }
1569
1570 up_read(&env->bpf_progs.lock);
1571}
1572
Jiri Olsa32dcd022019-07-21 13:23:51 +02001573static void free_event_desc(struct evsel *events)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001574{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001575 struct evsel *evsel;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001576
1577 if (!events)
1578 return;
1579
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001580 for (evsel = events; evsel->core.attr.size; evsel++) {
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -03001581 zfree(&evsel->name);
1582 zfree(&evsel->id);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001583 }
1584
1585 free(events);
1586}
1587
Jiri Olsa32dcd022019-07-21 13:23:51 +02001588static struct evsel *read_event_desc(struct feat_fd *ff)
Robert Richter4e1b9c62012-08-16 21:10:22 +02001589{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001590 struct evsel *evsel, *events = NULL;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001591 u64 *id;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001592 void *buf = NULL;
Stephane Eranian62db9062012-02-09 23:21:07 +01001593 u32 nre, sz, nr, i, j;
Stephane Eranian62db9062012-02-09 23:21:07 +01001594 size_t msz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001595
1596 /* number of events */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001597 if (do_read_u32(ff, &nre))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001598 goto error;
1599
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001600 if (do_read_u32(ff, &sz))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001601 goto error;
1602
Stephane Eranian62db9062012-02-09 23:21:07 +01001603 /* buffer to hold on file attr struct */
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001604 buf = malloc(sz);
1605 if (!buf)
1606 goto error;
1607
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001608 /* the last event terminates with evsel->core.attr.size == 0: */
Robert Richter4e1b9c62012-08-16 21:10:22 +02001609 events = calloc(nre + 1, sizeof(*events));
1610 if (!events)
1611 goto error;
1612
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001613 msz = sizeof(evsel->core.attr);
Jiri Olsa9fafd982012-03-20 19:15:39 +01001614 if (sz < msz)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001615 msz = sz;
1616
Robert Richter4e1b9c62012-08-16 21:10:22 +02001617 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1618 evsel->idx = i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001619
Stephane Eranian62db9062012-02-09 23:21:07 +01001620 /*
1621 * must read entire on-file attr struct to
1622 * sync up with layout.
1623 */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001624 if (__do_read(ff, buf, sz))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001625 goto error;
1626
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001627 if (ff->ph->needs_swap)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001628 perf_event__attr_swap(buf);
1629
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001630 memcpy(&evsel->core.attr, buf, msz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001631
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001632 if (do_read_u32(ff, &nr))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001633 goto error;
1634
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001635 if (ff->ph->needs_swap)
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001636 evsel->needs_swap = true;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001637
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001638 evsel->name = do_read_string(ff);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07001639 if (!evsel->name)
1640 goto error;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001641
1642 if (!nr)
1643 continue;
1644
1645 id = calloc(nr, sizeof(*id));
1646 if (!id)
1647 goto error;
1648 evsel->ids = nr;
1649 evsel->id = id;
1650
1651 for (j = 0 ; j < nr; j++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001652 if (do_read_u64(ff, id))
Robert Richter4e1b9c62012-08-16 21:10:22 +02001653 goto error;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001654 id++;
1655 }
1656 }
1657out:
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -03001658 free(buf);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001659 return events;
1660error:
Markus Elfring4cc97612015-06-25 17:12:32 +02001661 free_event_desc(events);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001662 events = NULL;
1663 goto out;
1664}
1665
Peter Zijlstra2c5e8c52015-04-07 11:09:54 +02001666static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
Arnaldo Carvalho de Melo03536312017-06-16 12:18:27 -03001667 void *priv __maybe_unused)
Peter Zijlstra2c5e8c52015-04-07 11:09:54 +02001668{
1669 return fprintf(fp, ", %s = %s", name, val);
1670}
1671
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001672static void print_event_desc(struct feat_fd *ff, FILE *fp)
Robert Richter4e1b9c62012-08-16 21:10:22 +02001673{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001674 struct evsel *evsel, *events;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001675 u32 j;
1676 u64 *id;
1677
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07001678 if (ff->events)
1679 events = ff->events;
1680 else
1681 events = read_event_desc(ff);
1682
Robert Richter4e1b9c62012-08-16 21:10:22 +02001683 if (!events) {
1684 fprintf(fp, "# event desc: not available or unable to read\n");
1685 return;
1686 }
1687
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001688 for (evsel = events; evsel->core.attr.size; evsel++) {
Robert Richter4e1b9c62012-08-16 21:10:22 +02001689 fprintf(fp, "# event : name = %s, ", evsel->name);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001690
Robert Richter4e1b9c62012-08-16 21:10:22 +02001691 if (evsel->ids) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001692 fprintf(fp, ", id = {");
Robert Richter4e1b9c62012-08-16 21:10:22 +02001693 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1694 if (j)
1695 fputc(',', fp);
1696 fprintf(fp, " %"PRIu64, *id);
1697 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001698 fprintf(fp, " }");
Robert Richter4e1b9c62012-08-16 21:10:22 +02001699 }
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001700
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001701 perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001702
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001703 fputc('\n', fp);
1704 }
Robert Richter4e1b9c62012-08-16 21:10:22 +02001705
1706 free_event_desc(events);
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07001707 ff->events = NULL;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001708}
1709
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001710static void print_total_mem(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001711{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001712 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001713}
1714
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001715static void print_numa_topology(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001716{
Jiri Olsac60da222016-07-04 14:16:20 +02001717 int i;
1718 struct numa_node *n;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001719
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001720 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1721 n = &ff->ph->env.numa_nodes[i];
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001722
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001723 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1724 " free = %"PRIu64" kB\n",
Jiri Olsac60da222016-07-04 14:16:20 +02001725 n->node, n->mem_total, n->mem_free);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001726
Jiri Olsac60da222016-07-04 14:16:20 +02001727 fprintf(fp, "# node%u cpu list : ", n->node);
1728 cpu_map__fprintf(n->map, fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001729 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001730}
1731
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001732static void print_cpuid(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001733{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001734 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001735}
1736
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001737static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
Stephane Eranian330aa672012-03-08 23:47:46 +01001738{
1739 fprintf(fp, "# contains samples with branch stack\n");
1740}
1741
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001742static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
Adrian Hunter4025ea42015-04-09 18:53:41 +03001743{
1744 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1745}
1746
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001747static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
Jiri Olsaffa517a2015-10-25 15:51:43 +01001748{
1749 fprintf(fp, "# contains stat data\n");
1750}
1751
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001752static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
Jiri Olsa720e98b2016-02-16 16:01:43 +01001753{
1754 int i;
1755
1756 fprintf(fp, "# CPU cache info:\n");
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001757 for (i = 0; i < ff->ph->env.caches_cnt; i++) {
Jiri Olsa720e98b2016-02-16 16:01:43 +01001758 fprintf(fp, "# ");
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001759 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001760 }
1761}
1762
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001763static void print_compressed(struct feat_fd *ff, FILE *fp)
1764{
1765 fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
1766 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
1767 ff->ph->env.comp_level, ff->ph->env.comp_ratio);
1768}
1769
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001770static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
Robert Richter50a96672012-08-16 21:10:24 +02001771{
1772 const char *delimiter = "# pmu mappings: ";
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001773 char *str, *tmp;
Robert Richter50a96672012-08-16 21:10:24 +02001774 u32 pmu_num;
1775 u32 type;
1776
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001777 pmu_num = ff->ph->env.nr_pmu_mappings;
Robert Richter50a96672012-08-16 21:10:24 +02001778 if (!pmu_num) {
1779 fprintf(fp, "# pmu mappings: not available\n");
1780 return;
1781 }
1782
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001783 str = ff->ph->env.pmu_mappings;
Namhyung Kimbe4a2de2012-09-05 14:02:49 +09001784
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001785 while (pmu_num) {
1786 type = strtoul(str, &tmp, 0);
1787 if (*tmp != ':')
1788 goto error;
1789
1790 str = tmp + 1;
1791 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1792
Robert Richter50a96672012-08-16 21:10:24 +02001793 delimiter = ", ";
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001794 str += strlen(str) + 1;
1795 pmu_num--;
Robert Richter50a96672012-08-16 21:10:24 +02001796 }
1797
1798 fprintf(fp, "\n");
1799
1800 if (!pmu_num)
1801 return;
1802error:
1803 fprintf(fp, "# pmu mappings: unable to read\n");
1804}
1805
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001806static void print_group_desc(struct feat_fd *ff, FILE *fp)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001807{
1808 struct perf_session *session;
Jiri Olsa32dcd022019-07-21 13:23:51 +02001809 struct evsel *evsel;
Namhyung Kima8bb5592013-01-22 18:09:31 +09001810 u32 nr = 0;
1811
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001812 session = container_of(ff->ph, struct perf_session, header);
Namhyung Kima8bb5592013-01-22 18:09:31 +09001813
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001814 evlist__for_each_entry(session->evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09001815 if (perf_evsel__is_group_leader(evsel) &&
Jiri Olsa5643b1a2019-07-21 13:24:46 +02001816 evsel->core.nr_members > 1) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09001817 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1818 perf_evsel__name(evsel));
1819
Jiri Olsa5643b1a2019-07-21 13:24:46 +02001820 nr = evsel->core.nr_members - 1;
Namhyung Kima8bb5592013-01-22 18:09:31 +09001821 } else if (nr) {
1822 fprintf(fp, ",%s", perf_evsel__name(evsel));
1823
1824 if (--nr == 0)
1825 fprintf(fp, "}\n");
1826 }
1827 }
1828}
1829
Jin Yao60115182017-12-08 21:13:41 +08001830static void print_sample_time(struct feat_fd *ff, FILE *fp)
1831{
1832 struct perf_session *session;
1833 char time_buf[32];
1834 double d;
1835
1836 session = container_of(ff->ph, struct perf_session, header);
1837
1838 timestamp__scnprintf_usec(session->evlist->first_sample_time,
1839 time_buf, sizeof(time_buf));
1840 fprintf(fp, "# time of first sample : %s\n", time_buf);
1841
1842 timestamp__scnprintf_usec(session->evlist->last_sample_time,
1843 time_buf, sizeof(time_buf));
1844 fprintf(fp, "# time of last sample : %s\n", time_buf);
1845
1846 d = (double)(session->evlist->last_sample_time -
1847 session->evlist->first_sample_time) / NSEC_PER_MSEC;
1848
1849 fprintf(fp, "# sample duration : %10.3f ms\n", d);
1850}
1851
Jiri Olsae2091ce2018-03-07 16:50:08 +01001852static void memory_node__fprintf(struct memory_node *n,
1853 unsigned long long bsize, FILE *fp)
1854{
1855 char buf_map[100], buf_size[50];
1856 unsigned long long size;
1857
1858 size = bsize * bitmap_weight(n->set, n->size);
1859 unit_number__scnprintf(buf_size, 50, size);
1860
1861 bitmap_scnprintf(n->set, n->size, buf_map, 100);
1862 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
1863}
1864
1865static void print_mem_topology(struct feat_fd *ff, FILE *fp)
1866{
1867 struct memory_node *nodes;
1868 int i, nr;
1869
1870 nodes = ff->ph->env.memory_nodes;
1871 nr = ff->ph->env.nr_memory_nodes;
1872
1873 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
1874 nr, ff->ph->env.memory_bsize);
1875
1876 for (i = 0; i < nr; i++) {
1877 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
1878 }
1879}
1880
Jiri Olsa72932372019-08-28 15:57:16 +02001881static int __event_process_build_id(struct perf_record_header_build_id *bev,
Robert Richter08d95bd2012-02-10 15:41:55 +01001882 char *filename,
1883 struct perf_session *session)
1884{
1885 int err = -1;
Robert Richter08d95bd2012-02-10 15:41:55 +01001886 struct machine *machine;
Wang Nan1f121b02015-06-03 08:52:21 +00001887 u16 cpumode;
Robert Richter08d95bd2012-02-10 15:41:55 +01001888 struct dso *dso;
1889 enum dso_kernel_type dso_type;
1890
1891 machine = perf_session__findnew_machine(session, bev->pid);
1892 if (!machine)
1893 goto out;
1894
Wang Nan1f121b02015-06-03 08:52:21 +00001895 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
Robert Richter08d95bd2012-02-10 15:41:55 +01001896
Wang Nan1f121b02015-06-03 08:52:21 +00001897 switch (cpumode) {
Robert Richter08d95bd2012-02-10 15:41:55 +01001898 case PERF_RECORD_MISC_KERNEL:
1899 dso_type = DSO_TYPE_KERNEL;
Robert Richter08d95bd2012-02-10 15:41:55 +01001900 break;
1901 case PERF_RECORD_MISC_GUEST_KERNEL:
1902 dso_type = DSO_TYPE_GUEST_KERNEL;
Robert Richter08d95bd2012-02-10 15:41:55 +01001903 break;
1904 case PERF_RECORD_MISC_USER:
1905 case PERF_RECORD_MISC_GUEST_USER:
1906 dso_type = DSO_TYPE_USER;
Robert Richter08d95bd2012-02-10 15:41:55 +01001907 break;
1908 default:
1909 goto out;
1910 }
1911
Arnaldo Carvalho de Meloaa7cc2a2015-05-29 11:31:12 -03001912 dso = machine__findnew_dso(machine, filename);
Robert Richter08d95bd2012-02-10 15:41:55 +01001913 if (dso != NULL) {
Masami Hiramatsub5d8bbe2016-05-11 22:51:59 +09001914 char sbuild_id[SBUILD_ID_SIZE];
Robert Richter08d95bd2012-02-10 15:41:55 +01001915
1916 dso__set_build_id(dso, &bev->build_id);
1917
Namhyung Kim1deec1b2017-05-31 21:01:03 +09001918 if (dso_type != DSO_TYPE_USER) {
1919 struct kmod_path m = { .name = NULL, };
1920
1921 if (!kmod_path__parse_name(&m, filename) && m.kmod)
Namhyung Kim6b335e82017-05-31 21:01:04 +09001922 dso__set_module_info(dso, &m, machine);
Namhyung Kim1deec1b2017-05-31 21:01:03 +09001923 else
1924 dso->kernel = dso_type;
1925
1926 free(m.name);
1927 }
Robert Richter08d95bd2012-02-10 15:41:55 +01001928
1929 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1930 sbuild_id);
1931 pr_debug("build id event received for %s: %s\n",
1932 dso->long_name, sbuild_id);
Arnaldo Carvalho de Melod3a7c482015-06-02 11:53:26 -03001933 dso__put(dso);
Robert Richter08d95bd2012-02-10 15:41:55 +01001934 }
1935
1936 err = 0;
1937out:
1938 return err;
1939}
1940
1941static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1942 int input, u64 offset, u64 size)
1943{
1944 struct perf_session *session = container_of(header, struct perf_session, header);
1945 struct {
1946 struct perf_event_header header;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03001947 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
Robert Richter08d95bd2012-02-10 15:41:55 +01001948 char filename[0];
1949 } old_bev;
Jiri Olsa72932372019-08-28 15:57:16 +02001950 struct perf_record_header_build_id bev;
Robert Richter08d95bd2012-02-10 15:41:55 +01001951 char filename[PATH_MAX];
1952 u64 limit = offset + size;
1953
1954 while (offset < limit) {
1955 ssize_t len;
1956
Namhyung Kim5323f602012-12-17 15:38:54 +09001957 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
Robert Richter08d95bd2012-02-10 15:41:55 +01001958 return -1;
1959
1960 if (header->needs_swap)
1961 perf_event_header__bswap(&old_bev.header);
1962
1963 len = old_bev.header.size - sizeof(old_bev);
Namhyung Kim5323f602012-12-17 15:38:54 +09001964 if (readn(input, filename, len) != len)
Robert Richter08d95bd2012-02-10 15:41:55 +01001965 return -1;
1966
1967 bev.header = old_bev.header;
1968
1969 /*
1970 * As the pid is the missing value, we need to fill
1971 * it properly. The header.misc value give us nice hint.
1972 */
1973 bev.pid = HOST_KERNEL_ID;
1974 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1975 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1976 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1977
1978 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1979 __event_process_build_id(&bev, filename, session);
1980
1981 offset += bev.header.size;
1982 }
1983
1984 return 0;
1985}
1986
1987static int perf_header__read_build_ids(struct perf_header *header,
1988 int input, u64 offset, u64 size)
1989{
1990 struct perf_session *session = container_of(header, struct perf_session, header);
Jiri Olsa72932372019-08-28 15:57:16 +02001991 struct perf_record_header_build_id bev;
Robert Richter08d95bd2012-02-10 15:41:55 +01001992 char filename[PATH_MAX];
1993 u64 limit = offset + size, orig_offset = offset;
1994 int err = -1;
1995
1996 while (offset < limit) {
1997 ssize_t len;
1998
Namhyung Kim5323f602012-12-17 15:38:54 +09001999 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
Robert Richter08d95bd2012-02-10 15:41:55 +01002000 goto out;
2001
2002 if (header->needs_swap)
2003 perf_event_header__bswap(&bev.header);
2004
2005 len = bev.header.size - sizeof(bev);
Namhyung Kim5323f602012-12-17 15:38:54 +09002006 if (readn(input, filename, len) != len)
Robert Richter08d95bd2012-02-10 15:41:55 +01002007 goto out;
2008 /*
2009 * The a1645ce1 changeset:
2010 *
2011 * "perf: 'perf kvm' tool for monitoring guest performance from host"
2012 *
Jiri Olsa72932372019-08-28 15:57:16 +02002013 * Added a field to struct perf_record_header_build_id that broke the file
Robert Richter08d95bd2012-02-10 15:41:55 +01002014 * format.
2015 *
2016 * Since the kernel build-id is the first entry, process the
2017 * table using the old format if the well known
2018 * '[kernel.kallsyms]' string for the kernel build-id has the
2019 * first 4 characters chopped off (where the pid_t sits).
2020 */
2021 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
2022 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
2023 return -1;
2024 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
2025 }
2026
2027 __event_process_build_id(&bev, filename, session);
2028
2029 offset += bev.header.size;
2030 }
2031 err = 0;
2032out:
2033 return err;
2034}
2035
David Carrillo-Cisnerosdfaa1582017-07-17 21:25:35 -07002036/* Macro for features that simply need to read and store a string. */
2037#define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002038static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
David Carrillo-Cisnerosdfaa1582017-07-17 21:25:35 -07002039{\
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002040 ff->ph->env.__feat_env = do_read_string(ff); \
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002041 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
David Carrillo-Cisnerosdfaa1582017-07-17 21:25:35 -07002042}
2043
2044FEAT_PROCESS_STR_FUN(hostname, hostname);
2045FEAT_PROCESS_STR_FUN(osrelease, os_release);
2046FEAT_PROCESS_STR_FUN(version, version);
2047FEAT_PROCESS_STR_FUN(arch, arch);
2048FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
2049FEAT_PROCESS_STR_FUN(cpuid, cpuid);
2050
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002051static int process_tracing_data(struct feat_fd *ff, void *data)
Robert Richterf1c67db2012-02-10 15:41:56 +01002052{
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002053 ssize_t ret = trace_report(ff->fd, data, false);
2054
Namhyung Kim3dce2ce2013-03-21 16:18:48 +09002055 return ret < 0 ? -1 : 0;
Robert Richterf1c67db2012-02-10 15:41:56 +01002056}
2057
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002058static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
Robert Richterf1c67db2012-02-10 15:41:56 +01002059{
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002060 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
Robert Richterf1c67db2012-02-10 15:41:56 +01002061 pr_debug("Failed to read buildids, continuing...\n");
2062 return 0;
2063}
2064
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002065static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002066{
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002067 int ret;
2068 u32 nr_cpus_avail, nr_cpus_online;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002069
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002070 ret = do_read_u32(ff, &nr_cpus_avail);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002071 if (ret)
2072 return ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002073
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002074 ret = do_read_u32(ff, &nr_cpus_online);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002075 if (ret)
2076 return ret;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002077 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
2078 ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002079 return 0;
2080}
2081
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002082static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002083{
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002084 u64 total_mem;
2085 int ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002086
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002087 ret = do_read_u64(ff, &total_mem);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002088 if (ret)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002089 return -1;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002090 ff->ph->env.total_mem = (unsigned long long)total_mem;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002091 return 0;
2092}
2093
Jiri Olsa32dcd022019-07-21 13:23:51 +02002094static struct evsel *
Jiri Olsa63503db2019-07-21 13:23:52 +02002095perf_evlist__find_by_index(struct evlist *evlist, int idx)
Robert Richter7c2f7af2012-08-16 21:10:23 +02002096{
Jiri Olsa32dcd022019-07-21 13:23:51 +02002097 struct evsel *evsel;
Robert Richter7c2f7af2012-08-16 21:10:23 +02002098
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002099 evlist__for_each_entry(evlist, evsel) {
Robert Richter7c2f7af2012-08-16 21:10:23 +02002100 if (evsel->idx == idx)
2101 return evsel;
2102 }
2103
2104 return NULL;
2105}
2106
2107static void
Jiri Olsa63503db2019-07-21 13:23:52 +02002108perf_evlist__set_event_name(struct evlist *evlist,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002109 struct evsel *event)
Robert Richter7c2f7af2012-08-16 21:10:23 +02002110{
Jiri Olsa32dcd022019-07-21 13:23:51 +02002111 struct evsel *evsel;
Robert Richter7c2f7af2012-08-16 21:10:23 +02002112
2113 if (!event->name)
2114 return;
2115
2116 evsel = perf_evlist__find_by_index(evlist, event->idx);
2117 if (!evsel)
2118 return;
2119
2120 if (evsel->name)
2121 return;
2122
2123 evsel->name = strdup(event->name);
2124}
2125
2126static int
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002127process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
Robert Richter7c2f7af2012-08-16 21:10:23 +02002128{
Namhyung Kim3d7eb862012-09-24 17:15:01 +09002129 struct perf_session *session;
Jiri Olsa32dcd022019-07-21 13:23:51 +02002130 struct evsel *evsel, *events = read_event_desc(ff);
Robert Richter7c2f7af2012-08-16 21:10:23 +02002131
2132 if (!events)
2133 return 0;
2134
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002135 session = container_of(ff->ph, struct perf_session, header);
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07002136
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002137 if (session->data->is_pipe) {
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07002138 /* Save events for reading later by print_event_desc,
2139 * since they can't be read again in pipe mode. */
2140 ff->events = events;
2141 }
2142
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002143 for (evsel = events; evsel->core.attr.size; evsel++)
Robert Richter7c2f7af2012-08-16 21:10:23 +02002144 perf_evlist__set_event_name(session->evlist, evsel);
2145
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002146 if (!session->data->is_pipe)
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07002147 free_event_desc(events);
Robert Richter7c2f7af2012-08-16 21:10:23 +02002148
2149 return 0;
2150}
2151
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002152static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002153{
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002154 char *str, *cmdline = NULL, **argv = NULL;
2155 u32 nr, i, len = 0;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002156
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002157 if (do_read_u32(ff, &nr))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002158 return -1;
2159
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002160 ff->ph->env.nr_cmdline = nr;
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002161
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002162 cmdline = zalloc(ff->size + nr + 1);
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002163 if (!cmdline)
2164 return -1;
2165
2166 argv = zalloc(sizeof(char *) * (nr + 1));
2167 if (!argv)
2168 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002169
2170 for (i = 0; i < nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002171 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002172 if (!str)
2173 goto error;
2174
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002175 argv[i] = cmdline + len;
2176 memcpy(argv[i], str, strlen(str) + 1);
2177 len += strlen(str) + 1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002178 free(str);
2179 }
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002180 ff->ph->env.cmdline = cmdline;
2181 ff->ph->env.cmdline_argv = (const char **) argv;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002182 return 0;
2183
2184error:
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002185 free(argv);
2186 free(cmdline);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002187 return -1;
2188}
2189
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002190static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002191{
Namhyung Kima1ae5652012-09-24 17:14:59 +09002192 u32 nr, i;
2193 char *str;
2194 struct strbuf sb;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002195 int cpu_nr = ff->ph->env.nr_cpus_avail;
Kan Liang2bb00d22015-09-01 09:58:12 -04002196 u64 size = 0;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002197 struct perf_header *ph = ff->ph;
Thomas Richter01766222018-06-11 09:31:52 +02002198 bool do_core_id_test = true;
Kan Liang2bb00d22015-09-01 09:58:12 -04002199
2200 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
2201 if (!ph->env.cpu)
2202 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002203
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002204 if (do_read_u32(ff, &nr))
Kan Liang2bb00d22015-09-01 09:58:12 -04002205 goto free_cpu;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002206
Namhyung Kima1ae5652012-09-24 17:14:59 +09002207 ph->env.nr_sibling_cores = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04002208 size += sizeof(u32);
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002209 if (strbuf_init(&sb, 128) < 0)
2210 goto free_cpu;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002211
2212 for (i = 0; i < nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002213 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002214 if (!str)
2215 goto error;
2216
2217 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002218 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2219 goto error;
Kan Liang2bb00d22015-09-01 09:58:12 -04002220 size += string_size(str);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002221 free(str);
2222 }
2223 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
2224
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002225 if (do_read_u32(ff, &nr))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002226 return -1;
2227
Namhyung Kima1ae5652012-09-24 17:14:59 +09002228 ph->env.nr_sibling_threads = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04002229 size += sizeof(u32);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002230
2231 for (i = 0; i < nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002232 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002233 if (!str)
2234 goto error;
2235
2236 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002237 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2238 goto error;
Kan Liang2bb00d22015-09-01 09:58:12 -04002239 size += string_size(str);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002240 free(str);
2241 }
2242 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
Kan Liang2bb00d22015-09-01 09:58:12 -04002243
2244 /*
2245 * The header may be from old perf,
2246 * which doesn't include core id and socket id information.
2247 */
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002248 if (ff->size <= size) {
Kan Liang2bb00d22015-09-01 09:58:12 -04002249 zfree(&ph->env.cpu);
2250 return 0;
2251 }
2252
Thomas Richter01766222018-06-11 09:31:52 +02002253 /* On s390 the socket_id number is not related to the numbers of cpus.
2254 * The socket_id number might be higher than the numbers of cpus.
2255 * This depends on the configuration.
Tan Xiaojun0a4d8fb2019-08-02 11:48:57 +08002256 * AArch64 is the same.
Thomas Richter01766222018-06-11 09:31:52 +02002257 */
Tan Xiaojun0a4d8fb2019-08-02 11:48:57 +08002258 if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
2259 || !strncmp(ph->env.arch, "aarch64", 7)))
Thomas Richter01766222018-06-11 09:31:52 +02002260 do_core_id_test = false;
2261
Kan Liang2bb00d22015-09-01 09:58:12 -04002262 for (i = 0; i < (u32)cpu_nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002263 if (do_read_u32(ff, &nr))
Kan Liang2bb00d22015-09-01 09:58:12 -04002264 goto free_cpu;
2265
Kan Liang2bb00d22015-09-01 09:58:12 -04002266 ph->env.cpu[i].core_id = nr;
Kan Liangacae8b32019-06-04 15:50:41 -07002267 size += sizeof(u32);
Kan Liang2bb00d22015-09-01 09:58:12 -04002268
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002269 if (do_read_u32(ff, &nr))
Kan Liang2bb00d22015-09-01 09:58:12 -04002270 goto free_cpu;
2271
Thomas Richter01766222018-06-11 09:31:52 +02002272 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
Kan Liang2bb00d22015-09-01 09:58:12 -04002273 pr_debug("socket_id number is too big."
2274 "You may need to upgrade the perf tool.\n");
2275 goto free_cpu;
2276 }
2277
2278 ph->env.cpu[i].socket_id = nr;
Kan Liangacae8b32019-06-04 15:50:41 -07002279 size += sizeof(u32);
2280 }
2281
2282 /*
2283 * The header may be from old perf,
2284 * which doesn't include die information.
2285 */
2286 if (ff->size <= size)
2287 return 0;
2288
2289 if (do_read_u32(ff, &nr))
2290 return -1;
2291
2292 ph->env.nr_sibling_dies = nr;
2293 size += sizeof(u32);
2294
2295 for (i = 0; i < nr; i++) {
2296 str = do_read_string(ff);
2297 if (!str)
2298 goto error;
2299
2300 /* include a NULL character at the end */
2301 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2302 goto error;
2303 size += string_size(str);
2304 free(str);
2305 }
2306 ph->env.sibling_dies = strbuf_detach(&sb, NULL);
2307
2308 for (i = 0; i < (u32)cpu_nr; i++) {
2309 if (do_read_u32(ff, &nr))
2310 goto free_cpu;
2311
2312 ph->env.cpu[i].die_id = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04002313 }
2314
Namhyung Kima1ae5652012-09-24 17:14:59 +09002315 return 0;
2316
2317error:
2318 strbuf_release(&sb);
Kan Liang2bb00d22015-09-01 09:58:12 -04002319free_cpu:
2320 zfree(&ph->env.cpu);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002321 return -1;
2322}
2323
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002324static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002325{
Jiri Olsac60da222016-07-04 14:16:20 +02002326 struct numa_node *nodes, *n;
Jiri Olsac60da222016-07-04 14:16:20 +02002327 u32 nr, i;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002328 char *str;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002329
2330 /* nr nodes */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002331 if (do_read_u32(ff, &nr))
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002332 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002333
Jiri Olsac60da222016-07-04 14:16:20 +02002334 nodes = zalloc(sizeof(*nodes) * nr);
2335 if (!nodes)
2336 return -ENOMEM;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002337
2338 for (i = 0; i < nr; i++) {
Jiri Olsac60da222016-07-04 14:16:20 +02002339 n = &nodes[i];
2340
Namhyung Kima1ae5652012-09-24 17:14:59 +09002341 /* node number */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002342 if (do_read_u32(ff, &n->node))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002343 goto error;
2344
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002345 if (do_read_u64(ff, &n->mem_total))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002346 goto error;
2347
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002348 if (do_read_u64(ff, &n->mem_free))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002349 goto error;
2350
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002351 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002352 if (!str)
2353 goto error;
2354
Jiri Olsa9c3516d2019-07-21 13:24:30 +02002355 n->map = perf_cpu_map__new(str);
Jiri Olsac60da222016-07-04 14:16:20 +02002356 if (!n->map)
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002357 goto error;
Jiri Olsac60da222016-07-04 14:16:20 +02002358
Namhyung Kima1ae5652012-09-24 17:14:59 +09002359 free(str);
2360 }
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002361 ff->ph->env.nr_numa_nodes = nr;
2362 ff->ph->env.numa_nodes = nodes;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002363 return 0;
2364
2365error:
Jiri Olsac60da222016-07-04 14:16:20 +02002366 free(nodes);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002367 return -1;
2368}
2369
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002370static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002371{
Namhyung Kima1ae5652012-09-24 17:14:59 +09002372 char *name;
2373 u32 pmu_num;
2374 u32 type;
2375 struct strbuf sb;
2376
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002377 if (do_read_u32(ff, &pmu_num))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002378 return -1;
2379
Namhyung Kima1ae5652012-09-24 17:14:59 +09002380 if (!pmu_num) {
2381 pr_debug("pmu mappings not available\n");
2382 return 0;
2383 }
2384
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002385 ff->ph->env.nr_pmu_mappings = pmu_num;
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002386 if (strbuf_init(&sb, 128) < 0)
2387 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002388
2389 while (pmu_num) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002390 if (do_read_u32(ff, &type))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002391 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002392
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002393 name = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002394 if (!name)
2395 goto error;
2396
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002397 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2398 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002399 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002400 if (strbuf_add(&sb, "", 1) < 0)
2401 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002402
Kan Liange0838e02015-09-10 11:03:05 -03002403 if (!strcmp(name, "msr"))
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002404 ff->ph->env.msr_pmu_type = type;
Kan Liange0838e02015-09-10 11:03:05 -03002405
Namhyung Kima1ae5652012-09-24 17:14:59 +09002406 free(name);
2407 pmu_num--;
2408 }
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002409 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002410 return 0;
2411
2412error:
2413 strbuf_release(&sb);
2414 return -1;
2415}
2416
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002417static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima8bb5592013-01-22 18:09:31 +09002418{
2419 size_t ret = -1;
2420 u32 i, nr, nr_groups;
2421 struct perf_session *session;
Jiri Olsa32dcd022019-07-21 13:23:51 +02002422 struct evsel *evsel, *leader = NULL;
Namhyung Kima8bb5592013-01-22 18:09:31 +09002423 struct group_desc {
2424 char *name;
2425 u32 leader_idx;
2426 u32 nr_members;
2427 } *desc;
2428
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002429 if (do_read_u32(ff, &nr_groups))
Namhyung Kima8bb5592013-01-22 18:09:31 +09002430 return -1;
2431
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002432 ff->ph->env.nr_groups = nr_groups;
Namhyung Kima8bb5592013-01-22 18:09:31 +09002433 if (!nr_groups) {
2434 pr_debug("group desc not available\n");
2435 return 0;
2436 }
2437
2438 desc = calloc(nr_groups, sizeof(*desc));
2439 if (!desc)
2440 return -1;
2441
2442 for (i = 0; i < nr_groups; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002443 desc[i].name = do_read_string(ff);
Namhyung Kima8bb5592013-01-22 18:09:31 +09002444 if (!desc[i].name)
2445 goto out_free;
2446
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002447 if (do_read_u32(ff, &desc[i].leader_idx))
Namhyung Kima8bb5592013-01-22 18:09:31 +09002448 goto out_free;
2449
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002450 if (do_read_u32(ff, &desc[i].nr_members))
Namhyung Kima8bb5592013-01-22 18:09:31 +09002451 goto out_free;
Namhyung Kima8bb5592013-01-22 18:09:31 +09002452 }
2453
2454 /*
2455 * Rebuild group relationship based on the group_desc
2456 */
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002457 session = container_of(ff->ph, struct perf_session, header);
Namhyung Kima8bb5592013-01-22 18:09:31 +09002458 session->evlist->nr_groups = nr_groups;
2459
2460 i = nr = 0;
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002461 evlist__for_each_entry(session->evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09002462 if (evsel->idx == (int) desc[i].leader_idx) {
2463 evsel->leader = evsel;
2464 /* {anon_group} is a dummy name */
Namhyung Kim210e8122013-11-18 11:20:43 +09002465 if (strcmp(desc[i].name, "{anon_group}")) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09002466 evsel->group_name = desc[i].name;
Namhyung Kim210e8122013-11-18 11:20:43 +09002467 desc[i].name = NULL;
2468 }
Jiri Olsa5643b1a2019-07-21 13:24:46 +02002469 evsel->core.nr_members = desc[i].nr_members;
Namhyung Kima8bb5592013-01-22 18:09:31 +09002470
2471 if (i >= nr_groups || nr > 0) {
2472 pr_debug("invalid group desc\n");
2473 goto out_free;
2474 }
2475
2476 leader = evsel;
Jiri Olsa5643b1a2019-07-21 13:24:46 +02002477 nr = evsel->core.nr_members - 1;
Namhyung Kima8bb5592013-01-22 18:09:31 +09002478 i++;
2479 } else if (nr) {
2480 /* This is a group member */
2481 evsel->leader = leader;
2482
2483 nr--;
2484 }
2485 }
2486
2487 if (i != nr_groups || nr != 0) {
2488 pr_debug("invalid group desc\n");
2489 goto out_free;
2490 }
2491
2492 ret = 0;
2493out_free:
Namhyung Kim50a27402013-11-18 11:20:44 +09002494 for (i = 0; i < nr_groups; i++)
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -03002495 zfree(&desc[i].name);
Namhyung Kima8bb5592013-01-22 18:09:31 +09002496 free(desc);
2497
2498 return ret;
2499}
2500
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002501static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
Adrian Hunter99fa2982015-04-30 17:37:25 +03002502{
2503 struct perf_session *session;
2504 int err;
2505
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002506 session = container_of(ff->ph, struct perf_session, header);
Adrian Hunter99fa2982015-04-30 17:37:25 +03002507
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002508 err = auxtrace_index__process(ff->fd, ff->size, session,
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002509 ff->ph->needs_swap);
Adrian Hunter99fa2982015-04-30 17:37:25 +03002510 if (err < 0)
2511 pr_err("Failed to process auxtrace index\n");
2512 return err;
2513}
2514
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002515static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
Jiri Olsa720e98b2016-02-16 16:01:43 +01002516{
2517 struct cpu_cache_level *caches;
2518 u32 cnt, i, version;
2519
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002520 if (do_read_u32(ff, &version))
Jiri Olsa720e98b2016-02-16 16:01:43 +01002521 return -1;
2522
Jiri Olsa720e98b2016-02-16 16:01:43 +01002523 if (version != 1)
2524 return -1;
2525
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002526 if (do_read_u32(ff, &cnt))
Jiri Olsa720e98b2016-02-16 16:01:43 +01002527 return -1;
2528
Jiri Olsa720e98b2016-02-16 16:01:43 +01002529 caches = zalloc(sizeof(*caches) * cnt);
2530 if (!caches)
2531 return -1;
2532
2533 for (i = 0; i < cnt; i++) {
2534 struct cpu_cache_level c;
2535
2536 #define _R(v) \
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002537 if (do_read_u32(ff, &c.v))\
Jiri Olsa720e98b2016-02-16 16:01:43 +01002538 goto out_free_caches; \
Jiri Olsa720e98b2016-02-16 16:01:43 +01002539
2540 _R(level)
2541 _R(line_size)
2542 _R(sets)
2543 _R(ways)
2544 #undef _R
2545
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002546 #define _R(v) \
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002547 c.v = do_read_string(ff); \
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002548 if (!c.v) \
Jiri Olsa720e98b2016-02-16 16:01:43 +01002549 goto out_free_caches;
2550
2551 _R(type)
2552 _R(size)
2553 _R(map)
2554 #undef _R
2555
2556 caches[i] = c;
2557 }
2558
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002559 ff->ph->env.caches = caches;
2560 ff->ph->env.caches_cnt = cnt;
Jiri Olsa720e98b2016-02-16 16:01:43 +01002561 return 0;
2562out_free_caches:
2563 free(caches);
2564 return -1;
2565}
2566
Jin Yao60115182017-12-08 21:13:41 +08002567static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2568{
2569 struct perf_session *session;
2570 u64 first_sample_time, last_sample_time;
2571 int ret;
2572
2573 session = container_of(ff->ph, struct perf_session, header);
2574
2575 ret = do_read_u64(ff, &first_sample_time);
2576 if (ret)
2577 return -1;
2578
2579 ret = do_read_u64(ff, &last_sample_time);
2580 if (ret)
2581 return -1;
2582
2583 session->evlist->first_sample_time = first_sample_time;
2584 session->evlist->last_sample_time = last_sample_time;
2585 return 0;
2586}
2587
Jiri Olsae2091ce2018-03-07 16:50:08 +01002588static int process_mem_topology(struct feat_fd *ff,
2589 void *data __maybe_unused)
2590{
2591 struct memory_node *nodes;
2592 u64 version, i, nr, bsize;
2593 int ret = -1;
2594
2595 if (do_read_u64(ff, &version))
2596 return -1;
2597
2598 if (version != 1)
2599 return -1;
2600
2601 if (do_read_u64(ff, &bsize))
2602 return -1;
2603
2604 if (do_read_u64(ff, &nr))
2605 return -1;
2606
2607 nodes = zalloc(sizeof(*nodes) * nr);
2608 if (!nodes)
2609 return -1;
2610
2611 for (i = 0; i < nr; i++) {
2612 struct memory_node n;
2613
2614 #define _R(v) \
2615 if (do_read_u64(ff, &n.v)) \
2616 goto out; \
2617
2618 _R(node)
2619 _R(size)
2620
2621 #undef _R
2622
2623 if (do_read_bitmap(ff, &n.set, &n.size))
2624 goto out;
2625
2626 nodes[i] = n;
2627 }
2628
2629 ff->ph->env.memory_bsize = bsize;
2630 ff->ph->env.memory_nodes = nodes;
2631 ff->ph->env.nr_memory_nodes = nr;
2632 ret = 0;
2633
2634out:
2635 if (ret)
2636 free(nodes);
2637 return ret;
2638}
2639
Alexey Budankovcf790512018-10-09 17:36:24 +03002640static int process_clockid(struct feat_fd *ff,
2641 void *data __maybe_unused)
2642{
2643 if (do_read_u64(ff, &ff->ph->env.clockid_res_ns))
2644 return -1;
2645
2646 return 0;
2647}
2648
Jiri Olsa258031c2019-03-08 14:47:39 +01002649static int process_dir_format(struct feat_fd *ff,
2650 void *_data __maybe_unused)
2651{
2652 struct perf_session *session;
2653 struct perf_data *data;
2654
2655 session = container_of(ff->ph, struct perf_session, header);
2656 data = session->data;
2657
2658 if (WARN_ON(!perf_data__is_dir(data)))
2659 return -1;
2660
2661 return do_read_u64(ff, &data->dir.version);
2662}
2663
Song Liu606f9722019-03-11 22:30:43 -07002664#ifdef HAVE_LIBBPF_SUPPORT
2665static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
2666{
2667 struct bpf_prog_info_linear *info_linear;
2668 struct bpf_prog_info_node *info_node;
2669 struct perf_env *env = &ff->ph->env;
2670 u32 count, i;
2671 int err = -1;
2672
2673 if (ff->ph->needs_swap) {
2674 pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
2675 return 0;
2676 }
2677
2678 if (do_read_u32(ff, &count))
2679 return -1;
2680
2681 down_write(&env->bpf_progs.lock);
2682
2683 for (i = 0; i < count; ++i) {
2684 u32 info_len, data_len;
2685
2686 info_linear = NULL;
2687 info_node = NULL;
2688 if (do_read_u32(ff, &info_len))
2689 goto out;
2690 if (do_read_u32(ff, &data_len))
2691 goto out;
2692
2693 if (info_len > sizeof(struct bpf_prog_info)) {
2694 pr_warning("detected invalid bpf_prog_info\n");
2695 goto out;
2696 }
2697
2698 info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
2699 data_len);
2700 if (!info_linear)
2701 goto out;
2702 info_linear->info_len = sizeof(struct bpf_prog_info);
2703 info_linear->data_len = data_len;
2704 if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
2705 goto out;
2706 if (__do_read(ff, &info_linear->info, info_len))
2707 goto out;
2708 if (info_len < sizeof(struct bpf_prog_info))
2709 memset(((void *)(&info_linear->info)) + info_len, 0,
2710 sizeof(struct bpf_prog_info) - info_len);
2711
2712 if (__do_read(ff, info_linear->data, data_len))
2713 goto out;
2714
2715 info_node = malloc(sizeof(struct bpf_prog_info_node));
2716 if (!info_node)
2717 goto out;
2718
2719 /* after reading from file, translate offset to address */
2720 bpf_program__bpil_offs_to_addr(info_linear);
2721 info_node->info_linear = info_linear;
2722 perf_env__insert_bpf_prog_info(env, info_node);
2723 }
2724
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002725 up_write(&env->bpf_progs.lock);
Song Liu606f9722019-03-11 22:30:43 -07002726 return 0;
2727out:
2728 free(info_linear);
2729 free(info_node);
2730 up_write(&env->bpf_progs.lock);
2731 return err;
2732}
2733#else // HAVE_LIBBPF_SUPPORT
2734static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
2735{
2736 return 0;
2737}
2738#endif // HAVE_LIBBPF_SUPPORT
2739
Song Liua70a112312019-03-11 22:30:45 -07002740static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
2741{
2742 struct perf_env *env = &ff->ph->env;
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002743 struct btf_node *node = NULL;
Song Liua70a112312019-03-11 22:30:45 -07002744 u32 count, i;
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002745 int err = -1;
Song Liua70a112312019-03-11 22:30:45 -07002746
2747 if (ff->ph->needs_swap) {
2748 pr_warning("interpreting btf from systems with endianity is not yet supported\n");
2749 return 0;
2750 }
2751
2752 if (do_read_u32(ff, &count))
2753 return -1;
2754
2755 down_write(&env->bpf_progs.lock);
2756
2757 for (i = 0; i < count; ++i) {
Song Liua70a112312019-03-11 22:30:45 -07002758 u32 id, data_size;
2759
2760 if (do_read_u32(ff, &id))
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002761 goto out;
Song Liua70a112312019-03-11 22:30:45 -07002762 if (do_read_u32(ff, &data_size))
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002763 goto out;
Song Liua70a112312019-03-11 22:30:45 -07002764
2765 node = malloc(sizeof(struct btf_node) + data_size);
2766 if (!node)
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002767 goto out;
Song Liua70a112312019-03-11 22:30:45 -07002768
2769 node->id = id;
2770 node->data_size = data_size;
2771
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002772 if (__do_read(ff, node->data, data_size))
2773 goto out;
Song Liua70a112312019-03-11 22:30:45 -07002774
2775 perf_env__insert_btf(env, node);
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002776 node = NULL;
Song Liua70a112312019-03-11 22:30:45 -07002777 }
2778
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002779 err = 0;
2780out:
Song Liua70a112312019-03-11 22:30:45 -07002781 up_write(&env->bpf_progs.lock);
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002782 free(node);
2783 return err;
Song Liua70a112312019-03-11 22:30:45 -07002784}
2785
Alexey Budankov42e1fd82019-03-18 20:41:33 +03002786static int process_compressed(struct feat_fd *ff,
2787 void *data __maybe_unused)
2788{
2789 if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
2790 return -1;
2791
2792 if (do_read_u32(ff, &(ff->ph->env.comp_type)))
2793 return -1;
2794
2795 if (do_read_u32(ff, &(ff->ph->env.comp_level)))
2796 return -1;
2797
2798 if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
2799 return -1;
2800
2801 if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
2802 return -1;
2803
2804 return 0;
2805}
2806
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002807struct feature_ops {
Jiri Olsa63503db2019-07-21 13:23:52 +02002808 int (*write)(struct feat_fd *ff, struct evlist *evlist);
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002809 void (*print)(struct feat_fd *ff, FILE *fp);
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002810 int (*process)(struct feat_fd *ff, void *data);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002811 const char *name;
2812 bool full_only;
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002813 bool synthesize;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002814};
2815
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002816#define FEAT_OPR(n, func, __full_only) \
2817 [HEADER_##n] = { \
2818 .name = __stringify(n), \
2819 .write = write_##func, \
2820 .print = print_##func, \
2821 .full_only = __full_only, \
2822 .process = process_##func, \
2823 .synthesize = true \
2824 }
2825
2826#define FEAT_OPN(n, func, __full_only) \
2827 [HEADER_##n] = { \
2828 .name = __stringify(n), \
2829 .write = write_##func, \
2830 .print = print_##func, \
2831 .full_only = __full_only, \
2832 .process = process_##func \
2833 }
Robert Richter8cdfa782011-12-07 10:02:56 +01002834
2835/* feature_ops not implemented: */
Stephane Eranian2eeaaa02012-05-15 13:28:13 +02002836#define print_tracing_data NULL
2837#define print_build_id NULL
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002838
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002839#define process_branch_stack NULL
2840#define process_stat NULL
2841
2842
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002843static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002844 FEAT_OPN(TRACING_DATA, tracing_data, false),
2845 FEAT_OPN(BUILD_ID, build_id, false),
2846 FEAT_OPR(HOSTNAME, hostname, false),
2847 FEAT_OPR(OSRELEASE, osrelease, false),
2848 FEAT_OPR(VERSION, version, false),
2849 FEAT_OPR(ARCH, arch, false),
2850 FEAT_OPR(NRCPUS, nrcpus, false),
2851 FEAT_OPR(CPUDESC, cpudesc, false),
2852 FEAT_OPR(CPUID, cpuid, false),
2853 FEAT_OPR(TOTAL_MEM, total_mem, false),
2854 FEAT_OPR(EVENT_DESC, event_desc, false),
2855 FEAT_OPR(CMDLINE, cmdline, false),
2856 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
2857 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
2858 FEAT_OPN(BRANCH_STACK, branch_stack, false),
2859 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
Jiri Olsae8fedff2018-07-12 15:52:02 +02002860 FEAT_OPR(GROUP_DESC, group_desc, false),
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002861 FEAT_OPN(AUXTRACE, auxtrace, false),
2862 FEAT_OPN(STAT, stat, false),
2863 FEAT_OPN(CACHE, cache, true),
Jin Yao60115182017-12-08 21:13:41 +08002864 FEAT_OPR(SAMPLE_TIME, sample_time, false),
Jiri Olsae2091ce2018-03-07 16:50:08 +01002865 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
Jiri Olsa258031c2019-03-08 14:47:39 +01002866 FEAT_OPR(CLOCKID, clockid, false),
Song Liu606f9722019-03-11 22:30:43 -07002867 FEAT_OPN(DIR_FORMAT, dir_format, false),
Song Liua70a112312019-03-11 22:30:45 -07002868 FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
2869 FEAT_OPR(BPF_BTF, bpf_btf, false),
Alexey Budankov42e1fd82019-03-18 20:41:33 +03002870 FEAT_OPR(COMPRESSED, compressed, false),
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002871};
2872
2873struct header_print_data {
2874 FILE *fp;
2875 bool full; /* extended list of headers */
2876};
2877
2878static int perf_file_section__fprintf_info(struct perf_file_section *section,
2879 struct perf_header *ph,
2880 int feat, int fd, void *data)
2881{
2882 struct header_print_data *hd = data;
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002883 struct feat_fd ff;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002884
2885 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2886 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2887 "%d, continuing...\n", section->offset, feat);
2888 return 0;
2889 }
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002890 if (feat >= HEADER_LAST_FEATURE) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002891 pr_warning("unknown feature %d\n", feat);
Robert Richterf7a8a132011-12-07 10:02:51 +01002892 return 0;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002893 }
2894 if (!feat_ops[feat].print)
2895 return 0;
2896
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002897 ff = (struct feat_fd) {
2898 .fd = fd,
2899 .ph = ph,
2900 };
2901
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002902 if (!feat_ops[feat].full_only || hd->full)
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002903 feat_ops[feat].print(&ff, hd->fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002904 else
2905 fprintf(hd->fp, "# %s info available, use -I to display\n",
2906 feat_ops[feat].name);
2907
2908 return 0;
2909}
2910
2911int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2912{
2913 struct header_print_data hd;
2914 struct perf_header *header = &session->header;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002915 int fd = perf_data__fd(session->data);
Jiri Olsaf45f5612016-10-10 09:03:07 +02002916 struct stat st;
Arnaldo Carvalho de Melo0afcf292018-12-11 16:11:54 -03002917 time_t stctime;
Jiri Olsaaabae162016-10-10 09:35:50 +02002918 int ret, bit;
Jiri Olsaf45f5612016-10-10 09:03:07 +02002919
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002920 hd.fp = fp;
2921 hd.full = full;
2922
Jiri Olsaf45f5612016-10-10 09:03:07 +02002923 ret = fstat(fd, &st);
2924 if (ret == -1)
2925 return -1;
2926
Arnaldo Carvalho de Melo0afcf292018-12-11 16:11:54 -03002927 stctime = st.st_ctime;
2928 fprintf(fp, "# captured on : %s", ctime(&stctime));
Jiri Olsae971a5a2018-03-07 16:50:03 +01002929
2930 fprintf(fp, "# header version : %u\n", header->version);
2931 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
2932 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
2933 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
Jiri Olsaf45f5612016-10-10 09:03:07 +02002934
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002935 perf_header__process_sections(header, fd, &hd,
2936 perf_file_section__fprintf_info);
Jiri Olsaaabae162016-10-10 09:35:50 +02002937
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002938 if (session->data->is_pipe)
David Carrillo-Cisnerosc9d1c932017-04-10 13:14:32 -07002939 return 0;
2940
Jiri Olsaaabae162016-10-10 09:35:50 +02002941 fprintf(fp, "# missing features: ");
2942 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2943 if (bit)
2944 fprintf(fp, "%s ", feat_ops[bit].name);
2945 }
2946
2947 fprintf(fp, "\n");
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002948 return 0;
2949}
2950
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002951static int do_write_feat(struct feat_fd *ff, int type,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002952 struct perf_file_section **p,
Jiri Olsa63503db2019-07-21 13:23:52 +02002953 struct evlist *evlist)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002954{
2955 int err;
2956 int ret = 0;
2957
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002958 if (perf_header__has_feat(ff->ph, type)) {
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002959 if (!feat_ops[type].write)
2960 return -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002961
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -07002962 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
2963 return -1;
2964
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002965 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002966
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002967 err = feat_ops[type].write(ff, evlist);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002968 if (err < 0) {
Jiri Olsa0c2aff42016-10-10 09:38:02 +02002969 pr_debug("failed to write feature %s\n", feat_ops[type].name);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002970
2971 /* undo anything written */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002972 lseek(ff->fd, (*p)->offset, SEEK_SET);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002973
2974 return -1;
2975 }
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002976 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002977 (*p)++;
2978 }
2979 return ret;
2980}
2981
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002982static int perf_header__adds_write(struct perf_header *header,
Jiri Olsa63503db2019-07-21 13:23:52 +02002983 struct evlist *evlist, int fd)
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002984{
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002985 int nr_sections;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002986 struct feat_fd ff;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002987 struct perf_file_section *feat_sec, *p;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002988 int sec_size;
2989 u64 sec_start;
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002990 int feat;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002991 int err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002992
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002993 ff = (struct feat_fd){
2994 .fd = fd,
2995 .ph = header,
2996 };
2997
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002998 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002999 if (!nr_sections)
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003000 return 0;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003001
Paul Gortmaker91b98802013-01-30 20:05:49 -05003002 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003003 if (feat_sec == NULL)
3004 return -ENOMEM;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003005
3006 sec_size = sizeof(*feat_sec) * nr_sections;
3007
Jiri Olsa8d541e92013-07-17 19:49:44 +02003008 sec_start = header->feat_offset;
Xiao Guangrongf887f302010-02-04 16:46:42 +08003009 lseek(fd, sec_start + sec_size, SEEK_SET);
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003010
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003011 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003012 if (do_write_feat(&ff, feat, &p, evlist))
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003013 perf_header__clear_feat(header, feat);
3014 }
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003015
Xiao Guangrongf887f302010-02-04 16:46:42 +08003016 lseek(fd, sec_start, SEEK_SET);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02003017 /*
3018 * may write more than needed due to dropped feature, but
Ingo Molnaradba1632018-12-03 11:22:00 +01003019 * this is okay, reader will skip the missing entries
Stephane Eranianfbe96f22011-09-30 15:40:40 +02003020 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003021 err = do_write(&ff, feat_sec, sec_size);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003022 if (err < 0)
3023 pr_debug("failed to write feature section\n");
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003024 free(feat_sec);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003025 return err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003026}
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003027
Tom Zanussi8dc58102010-04-01 23:59:15 -05003028int perf_header__write_pipe(int fd)
3029{
3030 struct perf_pipe_file_header f_header;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003031 struct feat_fd ff;
Tom Zanussi8dc58102010-04-01 23:59:15 -05003032 int err;
3033
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003034 ff = (struct feat_fd){ .fd = fd };
3035
Tom Zanussi8dc58102010-04-01 23:59:15 -05003036 f_header = (struct perf_pipe_file_header){
3037 .magic = PERF_MAGIC,
3038 .size = sizeof(f_header),
3039 };
3040
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003041 err = do_write(&ff, &f_header, sizeof(f_header));
Tom Zanussi8dc58102010-04-01 23:59:15 -05003042 if (err < 0) {
3043 pr_debug("failed to write perf pipe header\n");
3044 return err;
3045 }
3046
3047 return 0;
3048}
3049
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003050int perf_session__write_header(struct perf_session *session,
Jiri Olsa63503db2019-07-21 13:23:52 +02003051 struct evlist *evlist,
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003052 int fd, bool at_exit)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003053{
3054 struct perf_file_header f_header;
3055 struct perf_file_attr f_attr;
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003056 struct perf_header *header = &session->header;
Jiri Olsa32dcd022019-07-21 13:23:51 +02003057 struct evsel *evsel;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003058 struct feat_fd ff;
Jiri Olsa944d62b2013-07-17 19:49:43 +02003059 u64 attr_offset;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003060 int err;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003061
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003062 ff = (struct feat_fd){ .fd = fd};
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003063 lseek(fd, sizeof(f_header), SEEK_SET);
3064
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03003065 evlist__for_each_entry(session->evlist, evsel) {
Robert Richter6606f872012-08-16 21:10:19 +02003066 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003067 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003068 if (err < 0) {
3069 pr_debug("failed to write perf header\n");
3070 return err;
3071 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003072 }
3073
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003074 attr_offset = lseek(ff.fd, 0, SEEK_CUR);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003075
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03003076 evlist__for_each_entry(evlist, evsel) {
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003077 f_attr = (struct perf_file_attr){
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003078 .attr = evsel->core.attr,
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003079 .ids = {
Robert Richter6606f872012-08-16 21:10:19 +02003080 .offset = evsel->id_offset,
3081 .size = evsel->ids * sizeof(u64),
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003082 }
3083 };
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003084 err = do_write(&ff, &f_attr, sizeof(f_attr));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003085 if (err < 0) {
3086 pr_debug("failed to write perf header attribute\n");
3087 return err;
3088 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003089 }
3090
Adrian Hunterd645c442013-12-11 14:36:28 +02003091 if (!header->data_offset)
3092 header->data_offset = lseek(fd, 0, SEEK_CUR);
Jiri Olsa8d541e92013-07-17 19:49:44 +02003093 header->feat_offset = header->data_offset + header->data_size;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003094
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003095 if (at_exit) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003096 err = perf_header__adds_write(header, evlist, fd);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003097 if (err < 0)
3098 return err;
3099 }
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003100
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003101 f_header = (struct perf_file_header){
3102 .magic = PERF_MAGIC,
3103 .size = sizeof(f_header),
3104 .attr_size = sizeof(f_attr),
3105 .attrs = {
Jiri Olsa944d62b2013-07-17 19:49:43 +02003106 .offset = attr_offset,
Jiri Olsa6484d2f2019-07-21 13:24:28 +02003107 .size = evlist->core.nr_entries * sizeof(f_attr),
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003108 },
3109 .data = {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003110 .offset = header->data_offset,
3111 .size = header->data_size,
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003112 },
Jiri Olsa44b3c572013-07-11 17:28:31 +02003113 /* event_types is ignored, store zeros */
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003114 };
3115
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003116 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003117
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003118 lseek(fd, 0, SEEK_SET);
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003119 err = do_write(&ff, &f_header, sizeof(f_header));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003120 if (err < 0) {
3121 pr_debug("failed to write perf header\n");
3122 return err;
3123 }
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003124 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003125
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003126 return 0;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003127}
3128
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003129static int perf_header__getbuffer64(struct perf_header *header,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003130 int fd, void *buf, size_t size)
3131{
Arnaldo Carvalho de Melo1e7972c2011-01-03 16:50:55 -02003132 if (readn(fd, buf, size) <= 0)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003133 return -1;
3134
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003135 if (header->needs_swap)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003136 mem_bswap_64(buf, size);
3137
3138 return 0;
3139}
3140
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003141int perf_header__process_sections(struct perf_header *header, int fd,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02003142 void *data,
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003143 int (*process)(struct perf_file_section *section,
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003144 struct perf_header *ph,
3145 int feat, int fd, void *data))
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003146{
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003147 struct perf_file_section *feat_sec, *sec;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003148 int nr_sections;
3149 int sec_size;
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003150 int feat;
3151 int err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003152
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003153 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003154 if (!nr_sections)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003155 return 0;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003156
Paul Gortmaker91b98802013-01-30 20:05:49 -05003157 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003158 if (!feat_sec)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003159 return -1;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003160
3161 sec_size = sizeof(*feat_sec) * nr_sections;
3162
Jiri Olsa8d541e92013-07-17 19:49:44 +02003163 lseek(fd, header->feat_offset, SEEK_SET);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003164
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003165 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
3166 if (err < 0)
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003167 goto out_free;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003168
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003169 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
3170 err = process(sec++, header, feat, fd, data);
3171 if (err < 0)
3172 goto out_free;
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +01003173 }
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003174 err = 0;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003175out_free:
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003176 free(feat_sec);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003177 return err;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003178}
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003179
Stephane Eranian114382a2012-02-09 23:21:08 +01003180static const int attr_file_abi_sizes[] = {
3181 [0] = PERF_ATTR_SIZE_VER0,
3182 [1] = PERF_ATTR_SIZE_VER1,
Jiri Olsa239cc472012-08-07 15:20:42 +02003183 [2] = PERF_ATTR_SIZE_VER2,
Jiri Olsa0f6a3012012-08-07 15:20:45 +02003184 [3] = PERF_ATTR_SIZE_VER3,
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02003185 [4] = PERF_ATTR_SIZE_VER4,
Stephane Eranian114382a2012-02-09 23:21:08 +01003186 0,
3187};
3188
3189/*
3190 * In the legacy file format, the magic number is not used to encode endianness.
3191 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
3192 * on ABI revisions, we need to try all combinations for all endianness to
3193 * detect the endianness.
3194 */
3195static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
3196{
3197 uint64_t ref_size, attr_size;
3198 int i;
3199
3200 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
3201 ref_size = attr_file_abi_sizes[i]
3202 + sizeof(struct perf_file_section);
3203 if (hdr_sz != ref_size) {
3204 attr_size = bswap_64(hdr_sz);
3205 if (attr_size != ref_size)
3206 continue;
3207
3208 ph->needs_swap = true;
3209 }
3210 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
3211 i,
3212 ph->needs_swap);
3213 return 0;
3214 }
3215 /* could not determine endianness */
3216 return -1;
3217}
3218
3219#define PERF_PIPE_HDR_VER0 16
3220
3221static const size_t attr_pipe_abi_sizes[] = {
3222 [0] = PERF_PIPE_HDR_VER0,
3223 0,
3224};
3225
3226/*
3227 * In the legacy pipe format, there is an implicit assumption that endiannesss
3228 * between host recording the samples, and host parsing the samples is the
3229 * same. This is not always the case given that the pipe output may always be
3230 * redirected into a file and analyzed on a different machine with possibly a
3231 * different endianness and perf_event ABI revsions in the perf tool itself.
3232 */
3233static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
3234{
3235 u64 attr_size;
3236 int i;
3237
3238 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
3239 if (hdr_sz != attr_pipe_abi_sizes[i]) {
3240 attr_size = bswap_64(hdr_sz);
3241 if (attr_size != hdr_sz)
3242 continue;
3243
3244 ph->needs_swap = true;
3245 }
3246 pr_debug("Pipe ABI%d perf.data file detected\n", i);
3247 return 0;
3248 }
3249 return -1;
3250}
3251
Feng Tange84ba4e2012-10-30 11:56:07 +08003252bool is_perf_magic(u64 magic)
3253{
3254 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
3255 || magic == __perf_magic2
3256 || magic == __perf_magic2_sw)
3257 return true;
3258
3259 return false;
3260}
3261
Stephane Eranian114382a2012-02-09 23:21:08 +01003262static int check_magic_endian(u64 magic, uint64_t hdr_sz,
3263 bool is_pipe, struct perf_header *ph)
Stephane Eranian73323f52012-02-02 13:54:44 +01003264{
3265 int ret;
3266
3267 /* check for legacy format */
Stephane Eranian114382a2012-02-09 23:21:08 +01003268 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
Stephane Eranian73323f52012-02-02 13:54:44 +01003269 if (ret == 0) {
Jiri Olsa2a08c3e2013-07-17 19:49:47 +02003270 ph->version = PERF_HEADER_VERSION_1;
Stephane Eranian73323f52012-02-02 13:54:44 +01003271 pr_debug("legacy perf.data format\n");
Stephane Eranian114382a2012-02-09 23:21:08 +01003272 if (is_pipe)
3273 return try_all_pipe_abis(hdr_sz, ph);
Stephane Eranian73323f52012-02-02 13:54:44 +01003274
Stephane Eranian114382a2012-02-09 23:21:08 +01003275 return try_all_file_abis(hdr_sz, ph);
Stephane Eranian73323f52012-02-02 13:54:44 +01003276 }
Stephane Eranian114382a2012-02-09 23:21:08 +01003277 /*
3278 * the new magic number serves two purposes:
3279 * - unique number to identify actual perf.data files
3280 * - encode endianness of file
3281 */
Namhyung Kimf7913972015-01-29 17:06:45 +09003282 ph->version = PERF_HEADER_VERSION_2;
Stephane Eranian73323f52012-02-02 13:54:44 +01003283
Stephane Eranian114382a2012-02-09 23:21:08 +01003284 /* check magic number with one endianness */
3285 if (magic == __perf_magic2)
Stephane Eranian73323f52012-02-02 13:54:44 +01003286 return 0;
3287
Stephane Eranian114382a2012-02-09 23:21:08 +01003288 /* check magic number with opposite endianness */
3289 if (magic != __perf_magic2_sw)
Stephane Eranian73323f52012-02-02 13:54:44 +01003290 return -1;
3291
3292 ph->needs_swap = true;
3293
3294 return 0;
3295}
3296
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003297int perf_file_header__read(struct perf_file_header *header,
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003298 struct perf_header *ph, int fd)
3299{
Jiri Olsa727ebd52013-11-28 11:30:14 +01003300 ssize_t ret;
Stephane Eranian73323f52012-02-02 13:54:44 +01003301
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003302 lseek(fd, 0, SEEK_SET);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003303
Stephane Eranian73323f52012-02-02 13:54:44 +01003304 ret = readn(fd, header, sizeof(*header));
3305 if (ret <= 0)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003306 return -1;
3307
Stephane Eranian114382a2012-02-09 23:21:08 +01003308 if (check_magic_endian(header->magic,
3309 header->attr_size, false, ph) < 0) {
3310 pr_debug("magic/endian check failed\n");
Stephane Eranian73323f52012-02-02 13:54:44 +01003311 return -1;
Stephane Eranian114382a2012-02-09 23:21:08 +01003312 }
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003313
Stephane Eranian73323f52012-02-02 13:54:44 +01003314 if (ph->needs_swap) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003315 mem_bswap_64(header, offsetof(struct perf_file_header,
Stephane Eranian73323f52012-02-02 13:54:44 +01003316 adds_features));
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003317 }
3318
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003319 if (header->size != sizeof(*header)) {
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003320 /* Support the previous format */
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003321 if (header->size == offsetof(typeof(*header), adds_features))
3322 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003323 else
3324 return -1;
David Ahernd327fa42011-10-18 17:34:01 -06003325 } else if (ph->needs_swap) {
David Ahernd327fa42011-10-18 17:34:01 -06003326 /*
3327 * feature bitmap is declared as an array of unsigned longs --
3328 * not good since its size can differ between the host that
3329 * generated the data file and the host analyzing the file.
3330 *
3331 * We need to handle endianness, but we don't know the size of
3332 * the unsigned long where the file was generated. Take a best
3333 * guess at determining it: try 64-bit swap first (ie., file
3334 * created on a 64-bit host), and check if the hostname feature
3335 * bit is set (this feature bit is forced on as of fbe96f2).
3336 * If the bit is not, undo the 64-bit swap and try a 32-bit
3337 * swap. If the hostname bit is still not set (e.g., older data
3338 * file), punt and fallback to the original behavior --
3339 * clearing all feature bits and setting buildid.
3340 */
David Ahern80c01202012-06-08 11:47:51 -03003341 mem_bswap_64(&header->adds_features,
3342 BITS_TO_U64(HEADER_FEAT_BITS));
David Ahernd327fa42011-10-18 17:34:01 -06003343
3344 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
David Ahern80c01202012-06-08 11:47:51 -03003345 /* unswap as u64 */
3346 mem_bswap_64(&header->adds_features,
3347 BITS_TO_U64(HEADER_FEAT_BITS));
3348
3349 /* unswap as u32 */
3350 mem_bswap_32(&header->adds_features,
3351 BITS_TO_U32(HEADER_FEAT_BITS));
David Ahernd327fa42011-10-18 17:34:01 -06003352 }
3353
3354 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3355 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3356 set_bit(HEADER_BUILD_ID, header->adds_features);
3357 }
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003358 }
3359
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003360 memcpy(&ph->adds_features, &header->adds_features,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003361 sizeof(ph->adds_features));
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003362
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003363 ph->data_offset = header->data.offset;
3364 ph->data_size = header->data.size;
Jiri Olsa8d541e92013-07-17 19:49:44 +02003365 ph->feat_offset = header->data.offset + header->data.size;
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003366 return 0;
3367}
3368
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003369static int perf_file_section__process(struct perf_file_section *section,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003370 struct perf_header *ph,
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03003371 int feat, int fd, void *data)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003372{
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07003373 struct feat_fd fdd = {
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07003374 .fd = fd,
3375 .ph = ph,
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07003376 .size = section->size,
3377 .offset = section->offset,
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07003378 };
3379
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003380 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -02003381 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003382 "%d, continuing...\n", section->offset, feat);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003383 return 0;
3384 }
3385
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003386 if (feat >= HEADER_LAST_FEATURE) {
3387 pr_debug("unknown feature %d, continuing...\n", feat);
3388 return 0;
3389 }
3390
Robert Richterf1c67db2012-02-10 15:41:56 +01003391 if (!feat_ops[feat].process)
3392 return 0;
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003393
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07003394 return feat_ops[feat].process(&fdd, data);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003395}
3396
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003397static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
Tom Zanussi454c4072010-05-01 01:41:20 -05003398 struct perf_header *ph, int fd,
3399 bool repipe)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003400{
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003401 struct feat_fd ff = {
3402 .fd = STDOUT_FILENO,
3403 .ph = ph,
3404 };
Jiri Olsa727ebd52013-11-28 11:30:14 +01003405 ssize_t ret;
Stephane Eranian73323f52012-02-02 13:54:44 +01003406
3407 ret = readn(fd, header, sizeof(*header));
3408 if (ret <= 0)
3409 return -1;
3410
Stephane Eranian114382a2012-02-09 23:21:08 +01003411 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
3412 pr_debug("endian/magic failed\n");
Tom Zanussi8dc58102010-04-01 23:59:15 -05003413 return -1;
Stephane Eranian114382a2012-02-09 23:21:08 +01003414 }
3415
3416 if (ph->needs_swap)
3417 header->size = bswap_64(header->size);
Tom Zanussi8dc58102010-04-01 23:59:15 -05003418
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003419 if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
Tom Zanussi454c4072010-05-01 01:41:20 -05003420 return -1;
3421
Tom Zanussi8dc58102010-04-01 23:59:15 -05003422 return 0;
3423}
3424
Jiri Olsad4339562013-07-17 19:49:41 +02003425static int perf_header__read_pipe(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05003426{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003427 struct perf_header *header = &session->header;
Tom Zanussi8dc58102010-04-01 23:59:15 -05003428 struct perf_pipe_file_header f_header;
3429
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003430 if (perf_file_header__read_pipe(&f_header, header,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003431 perf_data__fd(session->data),
Tom Zanussi454c4072010-05-01 01:41:20 -05003432 session->repipe) < 0) {
Tom Zanussi8dc58102010-04-01 23:59:15 -05003433 pr_debug("incompatible file format\n");
3434 return -EINVAL;
3435 }
3436
Tom Zanussi8dc58102010-04-01 23:59:15 -05003437 return 0;
3438}
3439
Stephane Eranian69996df2012-02-09 23:21:06 +01003440static int read_attr(int fd, struct perf_header *ph,
3441 struct perf_file_attr *f_attr)
3442{
3443 struct perf_event_attr *attr = &f_attr->attr;
3444 size_t sz, left;
3445 size_t our_sz = sizeof(f_attr->attr);
Jiri Olsa727ebd52013-11-28 11:30:14 +01003446 ssize_t ret;
Stephane Eranian69996df2012-02-09 23:21:06 +01003447
3448 memset(f_attr, 0, sizeof(*f_attr));
3449
3450 /* read minimal guaranteed structure */
3451 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
3452 if (ret <= 0) {
3453 pr_debug("cannot read %d bytes of header attr\n",
3454 PERF_ATTR_SIZE_VER0);
3455 return -1;
3456 }
3457
3458 /* on file perf_event_attr size */
3459 sz = attr->size;
Stephane Eranian114382a2012-02-09 23:21:08 +01003460
Stephane Eranian69996df2012-02-09 23:21:06 +01003461 if (ph->needs_swap)
3462 sz = bswap_32(sz);
3463
3464 if (sz == 0) {
3465 /* assume ABI0 */
3466 sz = PERF_ATTR_SIZE_VER0;
3467 } else if (sz > our_sz) {
3468 pr_debug("file uses a more recent and unsupported ABI"
3469 " (%zu bytes extra)\n", sz - our_sz);
3470 return -1;
3471 }
3472 /* what we have not yet read and that we know about */
3473 left = sz - PERF_ATTR_SIZE_VER0;
3474 if (left) {
3475 void *ptr = attr;
3476 ptr += PERF_ATTR_SIZE_VER0;
3477
3478 ret = readn(fd, ptr, left);
3479 }
3480 /* read perf_file_section, ids are read in caller */
3481 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
3482
3483 return ret <= 0 ? -1 : 0;
3484}
3485
Jiri Olsa32dcd022019-07-21 13:23:51 +02003486static int perf_evsel__prepare_tracepoint_event(struct evsel *evsel,
Tzvetomir Stoyanov (VMware)096177a2018-08-08 14:02:46 -04003487 struct tep_handle *pevent)
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003488{
Tzvetomir Stoyanov97fbf3f2018-11-30 10:44:07 -05003489 struct tep_event *event;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003490 char bf[128];
3491
Namhyung Kim831394b2012-09-06 11:10:46 +09003492 /* already prepared */
3493 if (evsel->tp_format)
3494 return 0;
3495
Namhyung Kim3dce2ce2013-03-21 16:18:48 +09003496 if (pevent == NULL) {
3497 pr_debug("broken or missing trace data\n");
3498 return -1;
3499 }
3500
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003501 event = tep_find_event(pevent, evsel->core.attr.config);
Namhyung Kima7619ae2013-04-18 21:24:16 +09003502 if (event == NULL) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003503 pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config);
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003504 return -1;
Namhyung Kima7619ae2013-04-18 21:24:16 +09003505 }
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003506
Namhyung Kim831394b2012-09-06 11:10:46 +09003507 if (!evsel->name) {
3508 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
3509 evsel->name = strdup(bf);
3510 if (evsel->name == NULL)
3511 return -1;
3512 }
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003513
Arnaldo Carvalho de Melofcf65bf2012-08-07 09:58:03 -03003514 evsel->tp_format = event;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003515 return 0;
3516}
3517
Jiri Olsa63503db2019-07-21 13:23:52 +02003518static int perf_evlist__prepare_tracepoint_events(struct evlist *evlist,
Tzvetomir Stoyanov (VMware)096177a2018-08-08 14:02:46 -04003519 struct tep_handle *pevent)
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003520{
Jiri Olsa32dcd022019-07-21 13:23:51 +02003521 struct evsel *pos;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003522
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03003523 evlist__for_each_entry(evlist, pos) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003524 if (pos->core.attr.type == PERF_TYPE_TRACEPOINT &&
Namhyung Kim831394b2012-09-06 11:10:46 +09003525 perf_evsel__prepare_tracepoint_event(pos, pevent))
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003526 return -1;
3527 }
3528
3529 return 0;
3530}
3531
Jiri Olsad4339562013-07-17 19:49:41 +02003532int perf_session__read_header(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05003533{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003534 struct perf_data *data = session->data;
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003535 struct perf_header *header = &session->header;
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003536 struct perf_file_header f_header;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003537 struct perf_file_attr f_attr;
3538 u64 f_id;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003539 int nr_attrs, nr_ids, i, j;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003540 int fd = perf_data__fd(data);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003541
Jiri Olsa0f98b112019-07-21 13:23:55 +02003542 session->evlist = evlist__new();
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003543 if (session->evlist == NULL)
3544 return -ENOMEM;
3545
Kan Liang2c071442015-08-28 05:48:05 -04003546 session->evlist->env = &header->env;
Arnaldo Carvalho de Melo4cde9982015-09-09 12:25:00 -03003547 session->machines.host.env = &header->env;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003548 if (perf_data__is_pipe(data))
Jiri Olsad4339562013-07-17 19:49:41 +02003549 return perf_header__read_pipe(session);
Tom Zanussi8dc58102010-04-01 23:59:15 -05003550
Stephane Eranian69996df2012-02-09 23:21:06 +01003551 if (perf_file_header__read(&f_header, header, fd) < 0)
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02003552 return -EINVAL;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003553
Namhyung Kimb314e5c2013-09-30 17:19:48 +09003554 /*
3555 * Sanity check that perf.data was written cleanly; data size is
3556 * initialized to 0 and updated only if the on_exit function is run.
3557 * If data size is still 0 then the file contains only partial
3558 * information. Just warn user and process it as much as it can.
3559 */
3560 if (f_header.data.size == 0) {
3561 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
3562 "Was the 'perf record' command properly terminated?\n",
Jiri Olsaeae8ad82017-01-23 22:25:41 +01003563 data->file.path);
Namhyung Kimb314e5c2013-09-30 17:19:48 +09003564 }
3565
Vince Weaver76222362019-07-23 11:06:01 -04003566 if (f_header.attr_size == 0) {
3567 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
3568 "Was the 'perf record' command properly terminated?\n",
3569 data->file.path);
3570 return -EINVAL;
3571 }
3572
Stephane Eranian69996df2012-02-09 23:21:06 +01003573 nr_attrs = f_header.attrs.size / f_header.attr_size;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003574 lseek(fd, f_header.attrs.offset, SEEK_SET);
3575
3576 for (i = 0; i < nr_attrs; i++) {
Jiri Olsa32dcd022019-07-21 13:23:51 +02003577 struct evsel *evsel;
Peter Zijlstra1c222bc2009-08-06 20:57:41 +02003578 off_t tmp;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003579
Stephane Eranian69996df2012-02-09 23:21:06 +01003580 if (read_attr(fd, header, &f_attr) < 0)
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003581 goto out_errno;
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003582
David Ahern1060ab82015-04-09 16:15:46 -04003583 if (header->needs_swap) {
3584 f_attr.ids.size = bswap_64(f_attr.ids.size);
3585 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
David Aherneda39132011-07-15 12:34:09 -06003586 perf_event__attr_swap(&f_attr.attr);
David Ahern1060ab82015-04-09 16:15:46 -04003587 }
David Aherneda39132011-07-15 12:34:09 -06003588
Peter Zijlstra1c222bc2009-08-06 20:57:41 +02003589 tmp = lseek(fd, 0, SEEK_CUR);
Jiri Olsa365c3ae2019-07-21 13:23:58 +02003590 evsel = evsel__new(&f_attr.attr);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003591
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003592 if (evsel == NULL)
3593 goto out_delete_evlist;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03003594
3595 evsel->needs_swap = header->needs_swap;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003596 /*
3597 * Do it before so that if perf_evsel__alloc_id fails, this
Jiri Olsac12995a2019-07-21 13:23:56 +02003598 * entry gets purged too at evlist__delete().
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003599 */
Jiri Olsaa1cf3a72019-07-21 13:23:59 +02003600 evlist__add(session->evlist, evsel);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003601
3602 nr_ids = f_attr.ids.size / sizeof(u64);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003603 /*
3604 * We don't have the cpu and thread maps on the header, so
3605 * for allocating the perf_sample_id table we fake 1 cpu and
3606 * hattr->ids threads.
3607 */
3608 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
3609 goto out_delete_evlist;
3610
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003611 lseek(fd, f_attr.ids.offset, SEEK_SET);
3612
3613 for (j = 0; j < nr_ids; j++) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003614 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003615 goto out_errno;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003616
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003617 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02003618 }
Arnaldo Carvalho de Melo11deb1f2009-11-17 01:18:09 -02003619
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003620 lseek(fd, tmp, SEEK_SET);
3621 }
3622
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003623 perf_header__process_sections(header, fd, &session->tevent,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02003624 perf_file_section__process);
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +01003625
Namhyung Kim831394b2012-09-06 11:10:46 +09003626 if (perf_evlist__prepare_tracepoint_events(session->evlist,
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003627 session->tevent.pevent))
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003628 goto out_delete_evlist;
3629
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02003630 return 0;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003631out_errno:
3632 return -errno;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003633
3634out_delete_evlist:
Jiri Olsac12995a2019-07-21 13:23:56 +02003635 evlist__delete(session->evlist);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003636 session->evlist = NULL;
3637 return -ENOMEM;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003638}
Frederic Weisbecker0d3a5c82009-08-16 20:56:37 +02003639
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003640int perf_event__synthesize_attr(struct perf_tool *tool,
Robert Richterf4d83432012-08-16 21:10:17 +02003641 struct perf_event_attr *attr, u32 ids, u64 *id,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02003642 perf_event__handler_t process)
Frederic Weisbecker0d3a5c82009-08-16 20:56:37 +02003643{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003644 union perf_event *ev;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003645 size_t size;
3646 int err;
3647
3648 size = sizeof(struct perf_event_attr);
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003649 size = PERF_ALIGN(size, sizeof(u64));
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003650 size += sizeof(struct perf_event_header);
3651 size += ids * sizeof(u64);
3652
Numfor Mbiziwo-Tiapo20f97812019-07-24 16:44:58 -07003653 ev = zalloc(size);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003654
Chris Samuelce47dc52010-11-13 13:35:06 +11003655 if (ev == NULL)
3656 return -ENOMEM;
3657
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003658 ev->attr.attr = *attr;
3659 memcpy(ev->attr.id, id, ids * sizeof(u64));
3660
3661 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
Robert Richterf4d83432012-08-16 21:10:17 +02003662 ev->attr.header.size = (u16)size;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003663
Robert Richterf4d83432012-08-16 21:10:17 +02003664 if (ev->attr.header.size == size)
3665 err = process(tool, ev, NULL, NULL);
3666 else
3667 err = -E2BIG;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003668
3669 free(ev);
3670
3671 return err;
3672}
3673
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003674int perf_event__synthesize_features(struct perf_tool *tool,
3675 struct perf_session *session,
Jiri Olsa63503db2019-07-21 13:23:52 +02003676 struct evlist *evlist,
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003677 perf_event__handler_t process)
3678{
3679 struct perf_header *header = &session->header;
3680 struct feat_fd ff;
Jiri Olsa72932372019-08-28 15:57:16 +02003681 struct perf_record_header_feature *fe;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003682 size_t sz, sz_hdr;
3683 int feat, ret;
3684
3685 sz_hdr = sizeof(fe->header);
3686 sz = sizeof(union perf_event);
3687 /* get a nice alignment */
3688 sz = PERF_ALIGN(sz, page_size);
3689
3690 memset(&ff, 0, sizeof(ff));
3691
3692 ff.buf = malloc(sz);
3693 if (!ff.buf)
3694 return -ENOMEM;
3695
3696 ff.size = sz - sz_hdr;
Song Liuc952b352019-06-19 18:04:53 -07003697 ff.ph = &session->header;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003698
3699 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3700 if (!feat_ops[feat].synthesize) {
3701 pr_debug("No record header feature for header :%d\n", feat);
3702 continue;
3703 }
3704
3705 ff.offset = sizeof(*fe);
3706
3707 ret = feat_ops[feat].write(&ff, evlist);
3708 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
3709 pr_debug("Error writing feature\n");
3710 continue;
3711 }
3712 /* ff.buf may have changed due to realloc in do_write() */
3713 fe = ff.buf;
3714 memset(fe, 0, sizeof(*fe));
3715
3716 fe->feat_id = feat;
3717 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3718 fe->header.size = ff.offset;
3719
3720 ret = process(tool, ff.buf, NULL, NULL);
3721 if (ret) {
3722 free(ff.buf);
3723 return ret;
3724 }
3725 }
Jiri Olsa57b5de42018-03-14 10:22:05 +01003726
3727 /* Send HEADER_LAST_FEATURE mark. */
3728 fe = ff.buf;
3729 fe->feat_id = HEADER_LAST_FEATURE;
3730 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3731 fe->header.size = sizeof(*fe);
3732
3733 ret = process(tool, ff.buf, NULL, NULL);
3734
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003735 free(ff.buf);
Jiri Olsa57b5de42018-03-14 10:22:05 +01003736 return ret;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003737}
3738
Jiri Olsa89f16882018-09-13 14:54:03 +02003739int perf_event__process_feature(struct perf_session *session,
3740 union perf_event *event)
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003741{
Jiri Olsa89f16882018-09-13 14:54:03 +02003742 struct perf_tool *tool = session->tool;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003743 struct feat_fd ff = { .fd = 0 };
Jiri Olsa72932372019-08-28 15:57:16 +02003744 struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003745 int type = fe->header.type;
3746 u64 feat = fe->feat_id;
3747
3748 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
3749 pr_warning("invalid record type %d in pipe-mode\n", type);
3750 return 0;
3751 }
Ravi Bangoria92ead7e2018-06-25 18:12:20 +05303752 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003753 pr_warning("invalid record type %d in pipe-mode\n", type);
3754 return -1;
3755 }
3756
3757 if (!feat_ops[feat].process)
3758 return 0;
3759
3760 ff.buf = (void *)fe->data;
Jiri Olsa79b2fe52019-07-15 16:04:26 +02003761 ff.size = event->header.size - sizeof(*fe);
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003762 ff.ph = &session->header;
3763
3764 if (feat_ops[feat].process(&ff, NULL))
3765 return -1;
3766
3767 if (!feat_ops[feat].print || !tool->show_feat_hdr)
3768 return 0;
3769
3770 if (!feat_ops[feat].full_only ||
3771 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
3772 feat_ops[feat].print(&ff, stdout);
3773 } else {
3774 fprintf(stdout, "# %s info available, use -I to display\n",
3775 feat_ops[feat].name);
3776 }
3777
3778 return 0;
3779}
3780
Jiri Olsa72932372019-08-28 15:57:16 +02003781static struct perf_record_event_update *
Jiri Olsaa6e52812015-10-25 15:51:37 +01003782event_update_event__new(size_t size, u64 type, u64 id)
3783{
Jiri Olsa72932372019-08-28 15:57:16 +02003784 struct perf_record_event_update *ev;
Jiri Olsaa6e52812015-10-25 15:51:37 +01003785
3786 size += sizeof(*ev);
3787 size = PERF_ALIGN(size, sizeof(u64));
3788
3789 ev = zalloc(size);
3790 if (ev) {
3791 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3792 ev->header.size = (u16)size;
3793 ev->type = type;
3794 ev->id = id;
3795 }
3796 return ev;
3797}
3798
3799int
3800perf_event__synthesize_event_update_unit(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02003801 struct evsel *evsel,
Jiri Olsaa6e52812015-10-25 15:51:37 +01003802 perf_event__handler_t process)
3803{
Jiri Olsa72932372019-08-28 15:57:16 +02003804 struct perf_record_event_update *ev;
Jiri Olsaa6e52812015-10-25 15:51:37 +01003805 size_t size = strlen(evsel->unit);
3806 int err;
3807
3808 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3809 if (ev == NULL)
3810 return -ENOMEM;
3811
Arnaldo Carvalho de Melo75725882018-12-06 11:02:57 -03003812 strlcpy(ev->data, evsel->unit, size + 1);
Jiri Olsaa6e52812015-10-25 15:51:37 +01003813 err = process(tool, (union perf_event *)ev, NULL, NULL);
3814 free(ev);
3815 return err;
3816}
3817
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003818int
3819perf_event__synthesize_event_update_scale(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02003820 struct evsel *evsel,
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003821 perf_event__handler_t process)
3822{
Jiri Olsa72932372019-08-28 15:57:16 +02003823 struct perf_record_event_update *ev;
3824 struct perf_record_event_update_scale *ev_data;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003825 int err;
3826
3827 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3828 if (ev == NULL)
3829 return -ENOMEM;
3830
Jiri Olsa72932372019-08-28 15:57:16 +02003831 ev_data = (struct perf_record_event_update_scale *)ev->data;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003832 ev_data->scale = evsel->scale;
3833 err = process(tool, (union perf_event*) ev, NULL, NULL);
3834 free(ev);
3835 return err;
3836}
3837
Jiri Olsa802c9042015-10-25 15:51:39 +01003838int
3839perf_event__synthesize_event_update_name(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02003840 struct evsel *evsel,
Jiri Olsa802c9042015-10-25 15:51:39 +01003841 perf_event__handler_t process)
3842{
Jiri Olsa72932372019-08-28 15:57:16 +02003843 struct perf_record_event_update *ev;
Jiri Olsa802c9042015-10-25 15:51:39 +01003844 size_t len = strlen(evsel->name);
3845 int err;
3846
3847 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3848 if (ev == NULL)
3849 return -ENOMEM;
3850
Arnaldo Carvalho de Melo5192bde2018-12-06 11:09:46 -03003851 strlcpy(ev->data, evsel->name, len + 1);
Jiri Olsa802c9042015-10-25 15:51:39 +01003852 err = process(tool, (union perf_event*) ev, NULL, NULL);
3853 free(ev);
3854 return err;
3855}
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003856
Jiri Olsa86ebb092015-10-25 15:51:40 +01003857int
3858perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02003859 struct evsel *evsel,
Jiri Olsa86ebb092015-10-25 15:51:40 +01003860 perf_event__handler_t process)
3861{
Jiri Olsa72932372019-08-28 15:57:16 +02003862 size_t size = sizeof(struct perf_record_event_update);
3863 struct perf_record_event_update *ev;
Jiri Olsa86ebb092015-10-25 15:51:40 +01003864 int max, err;
3865 u16 type;
3866
Jiri Olsafe1f61b2019-07-21 13:24:38 +02003867 if (!evsel->core.own_cpus)
Jiri Olsa86ebb092015-10-25 15:51:40 +01003868 return 0;
3869
Jiri Olsafe1f61b2019-07-21 13:24:38 +02003870 ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
Jiri Olsa86ebb092015-10-25 15:51:40 +01003871 if (!ev)
3872 return -ENOMEM;
3873
3874 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3875 ev->header.size = (u16)size;
3876 ev->type = PERF_EVENT_UPDATE__CPUS;
3877 ev->id = evsel->id[0];
3878
Jiri Olsa72932372019-08-28 15:57:16 +02003879 cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
Jiri Olsafe1f61b2019-07-21 13:24:38 +02003880 evsel->core.own_cpus,
Jiri Olsa86ebb092015-10-25 15:51:40 +01003881 type, max);
3882
3883 err = process(tool, (union perf_event*) ev, NULL, NULL);
3884 free(ev);
3885 return err;
3886}
3887
Jiri Olsac853f932015-10-25 15:51:41 +01003888size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3889{
Jiri Olsa72932372019-08-28 15:57:16 +02003890 struct perf_record_event_update *ev = &event->event_update;
3891 struct perf_record_event_update_scale *ev_scale;
3892 struct perf_record_event_update_cpus *ev_cpus;
Jiri Olsaf8548392019-07-21 13:23:49 +02003893 struct perf_cpu_map *map;
Jiri Olsac853f932015-10-25 15:51:41 +01003894 size_t ret;
3895
Jiri Olsa5ded0682019-08-28 15:56:57 +02003896 ret = fprintf(fp, "\n... id: %" PRI_lu64 "\n", ev->id);
Jiri Olsac853f932015-10-25 15:51:41 +01003897
3898 switch (ev->type) {
3899 case PERF_EVENT_UPDATE__SCALE:
Jiri Olsa72932372019-08-28 15:57:16 +02003900 ev_scale = (struct perf_record_event_update_scale *)ev->data;
Jiri Olsac853f932015-10-25 15:51:41 +01003901 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3902 break;
3903 case PERF_EVENT_UPDATE__UNIT:
3904 ret += fprintf(fp, "... unit: %s\n", ev->data);
3905 break;
3906 case PERF_EVENT_UPDATE__NAME:
3907 ret += fprintf(fp, "... name: %s\n", ev->data);
3908 break;
3909 case PERF_EVENT_UPDATE__CPUS:
Jiri Olsa72932372019-08-28 15:57:16 +02003910 ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
Jiri Olsac853f932015-10-25 15:51:41 +01003911 ret += fprintf(fp, "... ");
3912
3913 map = cpu_map__new_data(&ev_cpus->cpus);
3914 if (map)
3915 ret += cpu_map__fprintf(map, fp);
3916 else
3917 ret += fprintf(fp, "failed to get cpus\n");
3918 break;
3919 default:
3920 ret += fprintf(fp, "... unknown type\n");
3921 break;
3922 }
3923
3924 return ret;
3925}
Jiri Olsa86ebb092015-10-25 15:51:40 +01003926
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003927int perf_event__synthesize_attrs(struct perf_tool *tool,
Jiri Olsa63503db2019-07-21 13:23:52 +02003928 struct evlist *evlist,
Jiri Olsa318ec182018-08-30 08:32:15 +02003929 perf_event__handler_t process)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003930{
Jiri Olsa32dcd022019-07-21 13:23:51 +02003931 struct evsel *evsel;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003932 int err = 0;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003933
Jiri Olsa318ec182018-08-30 08:32:15 +02003934 evlist__for_each_entry(evlist, evsel) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003935 err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->ids,
Robert Richter6606f872012-08-16 21:10:19 +02003936 evsel->id, process);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003937 if (err) {
3938 pr_debug("failed to create perf header attribute\n");
3939 return err;
3940 }
3941 }
3942
3943 return err;
3944}
3945
Jiri Olsa32dcd022019-07-21 13:23:51 +02003946static bool has_unit(struct evsel *counter)
Andi Kleenbfd8f722017-11-17 13:42:58 -08003947{
3948 return counter->unit && *counter->unit;
3949}
3950
Jiri Olsa32dcd022019-07-21 13:23:51 +02003951static bool has_scale(struct evsel *counter)
Andi Kleenbfd8f722017-11-17 13:42:58 -08003952{
3953 return counter->scale != 1;
3954}
3955
3956int perf_event__synthesize_extra_attr(struct perf_tool *tool,
Jiri Olsa63503db2019-07-21 13:23:52 +02003957 struct evlist *evsel_list,
Andi Kleenbfd8f722017-11-17 13:42:58 -08003958 perf_event__handler_t process,
3959 bool is_pipe)
3960{
Jiri Olsa32dcd022019-07-21 13:23:51 +02003961 struct evsel *counter;
Andi Kleenbfd8f722017-11-17 13:42:58 -08003962 int err;
3963
3964 /*
3965 * Synthesize other events stuff not carried within
3966 * attr event - unit, scale, name
3967 */
3968 evlist__for_each_entry(evsel_list, counter) {
3969 if (!counter->supported)
3970 continue;
3971
3972 /*
3973 * Synthesize unit and scale only if it's defined.
3974 */
3975 if (has_unit(counter)) {
3976 err = perf_event__synthesize_event_update_unit(tool, counter, process);
3977 if (err < 0) {
3978 pr_err("Couldn't synthesize evsel unit.\n");
3979 return err;
3980 }
3981 }
3982
3983 if (has_scale(counter)) {
3984 err = perf_event__synthesize_event_update_scale(tool, counter, process);
3985 if (err < 0) {
3986 pr_err("Couldn't synthesize evsel counter.\n");
3987 return err;
3988 }
3989 }
3990
Jiri Olsafe1f61b2019-07-21 13:24:38 +02003991 if (counter->core.own_cpus) {
Andi Kleenbfd8f722017-11-17 13:42:58 -08003992 err = perf_event__synthesize_event_update_cpus(tool, counter, process);
3993 if (err < 0) {
3994 pr_err("Couldn't synthesize evsel cpus.\n");
3995 return err;
3996 }
3997 }
3998
3999 /*
4000 * Name is needed only for pipe output,
4001 * perf.data carries event names.
4002 */
4003 if (is_pipe) {
4004 err = perf_event__synthesize_event_update_name(tool, counter, process);
4005 if (err < 0) {
4006 pr_err("Couldn't synthesize evsel name.\n");
4007 return err;
4008 }
4009 }
4010 }
4011 return 0;
4012}
4013
Adrian Hunter47c3d102013-07-04 16:20:21 +03004014int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
4015 union perf_event *event,
Jiri Olsa63503db2019-07-21 13:23:52 +02004016 struct evlist **pevlist)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004017{
Robert Richterf4d83432012-08-16 21:10:17 +02004018 u32 i, ids, n_ids;
Jiri Olsa32dcd022019-07-21 13:23:51 +02004019 struct evsel *evsel;
Jiri Olsa63503db2019-07-21 13:23:52 +02004020 struct evlist *evlist = *pevlist;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004021
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02004022 if (evlist == NULL) {
Jiri Olsa0f98b112019-07-21 13:23:55 +02004023 *pevlist = evlist = evlist__new();
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02004024 if (evlist == NULL)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004025 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004026 }
4027
Jiri Olsa365c3ae2019-07-21 13:23:58 +02004028 evsel = evsel__new(&event->attr.attr);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03004029 if (evsel == NULL)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004030 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004031
Jiri Olsaa1cf3a72019-07-21 13:23:59 +02004032 evlist__add(evlist, evsel);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03004033
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004034 ids = event->header.size;
4035 ids -= (void *)&event->attr.id - (void *)event;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004036 n_ids = ids / sizeof(u64);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03004037 /*
4038 * We don't have the cpu and thread maps on the header, so
4039 * for allocating the perf_sample_id table we fake 1 cpu and
4040 * hattr->ids threads.
4041 */
4042 if (perf_evsel__alloc_id(evsel, 1, n_ids))
4043 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004044
4045 for (i = 0; i < n_ids; i++) {
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02004046 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004047 }
4048
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004049 return 0;
4050}
Tom Zanussicd19a032010-04-01 23:59:20 -05004051
Jiri Olsaffe777252015-10-25 15:51:36 +01004052int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
4053 union perf_event *event,
Jiri Olsa63503db2019-07-21 13:23:52 +02004054 struct evlist **pevlist)
Jiri Olsaffe777252015-10-25 15:51:36 +01004055{
Jiri Olsa72932372019-08-28 15:57:16 +02004056 struct perf_record_event_update *ev = &event->event_update;
4057 struct perf_record_event_update_scale *ev_scale;
4058 struct perf_record_event_update_cpus *ev_cpus;
Jiri Olsa63503db2019-07-21 13:23:52 +02004059 struct evlist *evlist;
Jiri Olsa32dcd022019-07-21 13:23:51 +02004060 struct evsel *evsel;
Jiri Olsaf8548392019-07-21 13:23:49 +02004061 struct perf_cpu_map *map;
Jiri Olsaffe777252015-10-25 15:51:36 +01004062
4063 if (!pevlist || *pevlist == NULL)
4064 return -EINVAL;
4065
4066 evlist = *pevlist;
4067
4068 evsel = perf_evlist__id2evsel(evlist, ev->id);
4069 if (evsel == NULL)
4070 return -EINVAL;
4071
Jiri Olsaa6e52812015-10-25 15:51:37 +01004072 switch (ev->type) {
4073 case PERF_EVENT_UPDATE__UNIT:
4074 evsel->unit = strdup(ev->data);
Jiri Olsadaeecbc2015-10-25 15:51:38 +01004075 break;
Jiri Olsa802c9042015-10-25 15:51:39 +01004076 case PERF_EVENT_UPDATE__NAME:
4077 evsel->name = strdup(ev->data);
4078 break;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01004079 case PERF_EVENT_UPDATE__SCALE:
Jiri Olsa72932372019-08-28 15:57:16 +02004080 ev_scale = (struct perf_record_event_update_scale *)ev->data;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01004081 evsel->scale = ev_scale->scale;
Arnaldo Carvalho de Melo8434a2e2017-02-08 21:57:22 -03004082 break;
Jiri Olsa86ebb092015-10-25 15:51:40 +01004083 case PERF_EVENT_UPDATE__CPUS:
Jiri Olsa72932372019-08-28 15:57:16 +02004084 ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
Jiri Olsa86ebb092015-10-25 15:51:40 +01004085
4086 map = cpu_map__new_data(&ev_cpus->cpus);
4087 if (map)
Jiri Olsafe1f61b2019-07-21 13:24:38 +02004088 evsel->core.own_cpus = map;
Jiri Olsa86ebb092015-10-25 15:51:40 +01004089 else
4090 pr_err("failed to get event_update cpus\n");
Jiri Olsaa6e52812015-10-25 15:51:37 +01004091 default:
4092 break;
4093 }
4094
Jiri Olsaffe777252015-10-25 15:51:36 +01004095 return 0;
4096}
4097
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02004098int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
Jiri Olsa63503db2019-07-21 13:23:52 +02004099 struct evlist *evlist,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02004100 perf_event__handler_t process)
Tom Zanussi92155452010-04-01 23:59:21 -05004101{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004102 union perf_event ev;
Jiri Olsa29208e52011-10-20 15:59:43 +02004103 struct tracing_data *tdata;
Tom Zanussi92155452010-04-01 23:59:21 -05004104 ssize_t size = 0, aligned_size = 0, padding;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07004105 struct feat_fd ff;
Irina Tirdea1d037ca2012-09-11 01:15:03 +03004106 int err __maybe_unused = 0;
Tom Zanussi92155452010-04-01 23:59:21 -05004107
Jiri Olsa29208e52011-10-20 15:59:43 +02004108 /*
4109 * We are going to store the size of the data followed
4110 * by the data contents. Since the fd descriptor is a pipe,
4111 * we cannot seek back to store the size of the data once
4112 * we know it. Instead we:
4113 *
4114 * - write the tracing data to the temp file
4115 * - get/write the data size to pipe
4116 * - write the tracing data from the temp file
4117 * to the pipe
4118 */
Jiri Olsace9036a2019-07-21 13:24:23 +02004119 tdata = tracing_data_get(&evlist->core.entries, fd, true);
Jiri Olsa29208e52011-10-20 15:59:43 +02004120 if (!tdata)
4121 return -1;
4122
Tom Zanussi92155452010-04-01 23:59:21 -05004123 memset(&ev, 0, sizeof(ev));
4124
4125 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
Jiri Olsa29208e52011-10-20 15:59:43 +02004126 size = tdata->size;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03004127 aligned_size = PERF_ALIGN(size, sizeof(u64));
Tom Zanussi92155452010-04-01 23:59:21 -05004128 padding = aligned_size - size;
4129 ev.tracing_data.header.size = sizeof(ev.tracing_data);
4130 ev.tracing_data.size = aligned_size;
4131
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02004132 process(tool, &ev, NULL, NULL);
Tom Zanussi92155452010-04-01 23:59:21 -05004133
Jiri Olsa29208e52011-10-20 15:59:43 +02004134 /*
4135 * The put function will copy all the tracing data
4136 * stored in temp file to the pipe.
4137 */
4138 tracing_data_put(tdata);
4139
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07004140 ff = (struct feat_fd){ .fd = fd };
4141 if (write_padded(&ff, NULL, 0, padding))
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -07004142 return -1;
Tom Zanussi92155452010-04-01 23:59:21 -05004143
4144 return aligned_size;
4145}
4146
Jiri Olsa89f16882018-09-13 14:54:03 +02004147int perf_event__process_tracing_data(struct perf_session *session,
4148 union perf_event *event)
Tom Zanussi92155452010-04-01 23:59:21 -05004149{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004150 ssize_t size_read, padding, size = event->tracing_data.size;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01004151 int fd = perf_data__fd(session->data);
Jiri Olsacc9784bd2013-10-15 16:27:34 +02004152 off_t offset = lseek(fd, 0, SEEK_CUR);
Tom Zanussi92155452010-04-01 23:59:21 -05004153 char buf[BUFSIZ];
4154
4155 /* setup for reading amidst mmap */
Jiri Olsa72932372019-08-28 15:57:16 +02004156 lseek(fd, offset + sizeof(struct perf_record_header_tracing_data),
Tom Zanussi92155452010-04-01 23:59:21 -05004157 SEEK_SET);
4158
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01004159 size_read = trace_report(fd, &session->tevent,
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03004160 session->repipe);
Irina Tirdea9ac3e482012-09-11 01:15:01 +03004161 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
Tom Zanussi92155452010-04-01 23:59:21 -05004162
Jiri Olsacc9784bd2013-10-15 16:27:34 +02004163 if (readn(fd, buf, padding) < 0) {
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03004164 pr_err("%s: reading input file", __func__);
4165 return -1;
4166 }
Tom Zanussi454c4072010-05-01 01:41:20 -05004167 if (session->repipe) {
4168 int retw = write(STDOUT_FILENO, buf, padding);
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03004169 if (retw <= 0 || retw != padding) {
4170 pr_err("%s: repiping tracing data padding", __func__);
4171 return -1;
4172 }
Tom Zanussi454c4072010-05-01 01:41:20 -05004173 }
Tom Zanussi92155452010-04-01 23:59:21 -05004174
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03004175 if (size_read + padding != size) {
4176 pr_err("%s: tracing data size mismatch", __func__);
4177 return -1;
4178 }
Tom Zanussi92155452010-04-01 23:59:21 -05004179
Namhyung Kim831394b2012-09-06 11:10:46 +09004180 perf_evlist__prepare_tracepoint_events(session->evlist,
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01004181 session->tevent.pevent);
Arnaldo Carvalho de Melo8b6ee4c2012-08-07 23:36:16 -03004182
Tom Zanussi92155452010-04-01 23:59:21 -05004183 return size_read + padding;
4184}
Tom Zanussic7929e42010-04-01 23:59:22 -05004185
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02004186int perf_event__synthesize_build_id(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02004187 struct dso *pos, u16 misc,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004188 perf_event__handler_t process,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02004189 struct machine *machine)
Tom Zanussic7929e42010-04-01 23:59:22 -05004190{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004191 union perf_event ev;
Tom Zanussic7929e42010-04-01 23:59:22 -05004192 size_t len;
4193 int err = 0;
4194
4195 if (!pos->hit)
4196 return err;
4197
4198 memset(&ev, 0, sizeof(ev));
4199
4200 len = pos->long_name_len + 1;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03004201 len = PERF_ALIGN(len, NAME_ALIGN);
Tom Zanussic7929e42010-04-01 23:59:22 -05004202 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
4203 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
4204 ev.build_id.header.misc = misc;
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -03004205 ev.build_id.pid = machine->pid;
Tom Zanussic7929e42010-04-01 23:59:22 -05004206 ev.build_id.header.size = sizeof(ev.build_id) + len;
4207 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
4208
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02004209 err = process(tool, &ev, NULL, machine);
Tom Zanussic7929e42010-04-01 23:59:22 -05004210
4211 return err;
4212}
4213
Jiri Olsa89f16882018-09-13 14:54:03 +02004214int perf_event__process_build_id(struct perf_session *session,
4215 union perf_event *event)
Tom Zanussic7929e42010-04-01 23:59:22 -05004216{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004217 __event_process_build_id(&event->build_id,
4218 event->build_id.filename,
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08004219 session);
Tom Zanussic7929e42010-04-01 23:59:22 -05004220 return 0;
4221}