blob: fa914ba8cd56aabcf0a1898e8921bdd1c3dbf4ac [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -03002#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -03003#include <inttypes.h>
Arnaldo Carvalho de Meloa0675582017-04-17 16:51:59 -03004#include "string2.h"
Arnaldo Carvalho de Melo391e4202017-04-19 18:51:14 -03005#include <sys/param.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02006#include <sys/types.h>
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02007#include <byteswap.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02008#include <unistd.h>
9#include <stdio.h>
10#include <stdlib.h>
Arnaldo Carvalho de Melo03536312017-06-16 12:18:27 -030011#include <linux/compiler.h>
Frederic Weisbecker8671dab2009-11-11 04:51:03 +010012#include <linux/list.h>
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -020013#include <linux/kernel.h>
Robert Richterb1e5a9b2011-12-07 10:02:57 +010014#include <linux/bitops.h>
Arnaldo Carvalho de Melofc6a1722019-06-25 21:33:14 -030015#include <linux/string.h>
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -070016#include <linux/stringify.h>
Arnaldo Carvalho de Melo7f7c5362019-07-04 11:32:27 -030017#include <linux/zalloc.h>
Arnaldo Carvalho de Melo7a8ef4c2017-04-19 20:57:47 -030018#include <sys/stat.h>
Stephane Eranianfbe96f22011-09-30 15:40:40 +020019#include <sys/utsname.h>
Jin Yao60115182017-12-08 21:13:41 +080020#include <linux/time64.h>
Jiri Olsae2091ce2018-03-07 16:50:08 +010021#include <dirent.h>
Song Liu606f9722019-03-11 22:30:43 -070022#include <bpf/libbpf.h>
Jiri Olsa9c3516d2019-07-21 13:24:30 +020023#include <perf/cpumap.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020024
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020025#include "evlist.h"
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030026#include "evsel.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020027#include "header.h"
Arnaldo Carvalho de Melo98521b32017-04-25 15:45:35 -030028#include "memswap.h"
Frederic Weisbecker03456a12009-10-06 23:36:47 +020029#include "../perf.h"
30#include "trace-event.h"
Arnaldo Carvalho de Melo301a0b02009-12-13 19:50:25 -020031#include "session.h"
Frederic Weisbecker8671dab2009-11-11 04:51:03 +010032#include "symbol.h"
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +010033#include "debug.h"
Stephane Eranianfbe96f22011-09-30 15:40:40 +020034#include "cpumap.h"
Robert Richter50a96672012-08-16 21:10:24 +020035#include "pmu.h"
Jiri Olsa7dbf4dc2012-09-10 18:50:19 +020036#include "vdso.h"
Namhyung Kima1ae5652012-09-24 17:14:59 +090037#include "strbuf.h"
Jiri Olsaebb296c2012-10-27 23:18:28 +020038#include "build-id.h"
Jiri Olsacc9784bd2013-10-15 16:27:34 +020039#include "data.h"
Jiri Olsa720e98b2016-02-16 16:01:43 +010040#include <api/fs/fs.h>
41#include "asm/bug.h"
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -070042#include "tool.h"
Jin Yao60115182017-12-08 21:13:41 +080043#include "time-utils.h"
Jiri Olsae2091ce2018-03-07 16:50:08 +010044#include "units.h"
Jiri Olsa5135d5e2019-02-19 10:58:13 +010045#include "cputopo.h"
Song Liu606f9722019-03-11 22:30:43 -070046#include "bpf-event.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020047
Arnaldo Carvalho de Melo3052ba52019-06-25 17:27:31 -030048#include <linux/ctype.h>
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -030049
Stephane Eranian73323f52012-02-02 13:54:44 +010050/*
51 * magic2 = "PERFILE2"
52 * must be a numerical value to let the endianness
53 * determine the memory layout. That way we are able
54 * to detect endianness when reading the perf.data file
55 * back.
56 *
57 * we check for legacy (PERFFILE) format.
58 */
59static const char *__perf_magic1 = "PERFFILE";
60static const u64 __perf_magic2 = 0x32454c4946524550ULL;
61static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020062
Stephane Eranian73323f52012-02-02 13:54:44 +010063#define PERF_MAGIC __perf_magic2
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020064
Soramichi AKIYAMAd25ed5d2017-01-17 00:22:37 +090065const char perf_version_string[] = PERF_VERSION;
66
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020067struct perf_file_attr {
Ingo Molnarcdd6c482009-09-21 12:02:48 +020068 struct perf_event_attr attr;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020069 struct perf_file_section ids;
70};
71
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -070072struct feat_fd {
73 struct perf_header *ph;
74 int fd;
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -070075 void *buf; /* Either buf != NULL or fd >= 0 */
David Carrillo-Cisneros62552452017-07-17 21:25:42 -070076 ssize_t offset;
77 size_t size;
Jiri Olsa32dcd022019-07-21 13:23:51 +020078 struct evsel *events;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -070079};
80
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030081void perf_header__set_feat(struct perf_header *header, int feat)
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020082{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030083 set_bit(feat, header->adds_features);
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020084}
85
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030086void perf_header__clear_feat(struct perf_header *header, int feat)
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -020087{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030088 clear_bit(feat, header->adds_features);
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -020089}
90
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030091bool perf_header__has_feat(const struct perf_header *header, int feat)
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020092{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030093 return test_bit(feat, header->adds_features);
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020094}
95
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -070096static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
97{
98 ssize_t ret = writen(ff->fd, buf, size);
99
100 if (ret != (ssize_t)size)
101 return ret < 0 ? (int)ret : -1;
102 return 0;
103}
104
105static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
106{
107 /* struct perf_event_header::size is u16 */
108 const size_t max_size = 0xffff - sizeof(struct perf_event_header);
109 size_t new_size = ff->size;
110 void *addr;
111
112 if (size + ff->offset > max_size)
113 return -E2BIG;
114
115 while (size > (new_size - ff->offset))
116 new_size <<= 1;
117 new_size = min(max_size, new_size);
118
119 if (ff->size < new_size) {
120 addr = realloc(ff->buf, new_size);
121 if (!addr)
122 return -ENOMEM;
123 ff->buf = addr;
124 ff->size = new_size;
125 }
126
127 memcpy(ff->buf + ff->offset, buf, size);
128 ff->offset += size;
129
130 return 0;
131}
132
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -0700133/* Return: 0 if succeded, -ERR if failed. */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700134int do_write(struct feat_fd *ff, const void *buf, size_t size)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200135{
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700136 if (!ff->buf)
137 return __do_write_fd(ff, buf, size);
138 return __do_write_buf(ff, buf, size);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200139}
140
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -0700141/* Return: 0 if succeded, -ERR if failed. */
Jiri Olsae2091ce2018-03-07 16:50:08 +0100142static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
143{
144 u64 *p = (u64 *) set;
145 int i, ret;
146
147 ret = do_write(ff, &size, sizeof(size));
148 if (ret < 0)
149 return ret;
150
151 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
152 ret = do_write(ff, p + i, sizeof(*p));
153 if (ret < 0)
154 return ret;
155 }
156
157 return 0;
158}
159
160/* Return: 0 if succeded, -ERR if failed. */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700161int write_padded(struct feat_fd *ff, const void *bf,
162 size_t count, size_t count_aligned)
Arnaldo Carvalho de Melof92cb242010-01-04 16:19:28 -0200163{
164 static const char zero_buf[NAME_ALIGN];
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700165 int err = do_write(ff, bf, count);
Arnaldo Carvalho de Melof92cb242010-01-04 16:19:28 -0200166
167 if (!err)
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700168 err = do_write(ff, zero_buf, count_aligned - count);
Arnaldo Carvalho de Melof92cb242010-01-04 16:19:28 -0200169
170 return err;
171}
172
Kan Liang2bb00d22015-09-01 09:58:12 -0400173#define string_size(str) \
174 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
175
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -0700176/* Return: 0 if succeded, -ERR if failed. */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700177static int do_write_string(struct feat_fd *ff, const char *str)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200178{
179 u32 len, olen;
180 int ret;
181
182 olen = strlen(str) + 1;
Irina Tirdea9ac3e482012-09-11 01:15:01 +0300183 len = PERF_ALIGN(olen, NAME_ALIGN);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200184
185 /* write len, incl. \0 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700186 ret = do_write(ff, &len, sizeof(len));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200187 if (ret < 0)
188 return ret;
189
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700190 return write_padded(ff, str, olen, len);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200191}
192
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700193static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700194{
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700195 ssize_t ret = readn(ff->fd, addr, size);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700196
197 if (ret != size)
198 return ret < 0 ? (int)ret : -1;
199 return 0;
200}
201
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700202static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
203{
204 if (size > (ssize_t)ff->size - ff->offset)
205 return -1;
206
207 memcpy(addr, ff->buf + ff->offset, size);
208 ff->offset += size;
209
210 return 0;
211
212}
213
214static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
215{
216 if (!ff->buf)
217 return __do_read_fd(ff, addr, size);
218 return __do_read_buf(ff, addr, size);
219}
220
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700221static int do_read_u32(struct feat_fd *ff, u32 *addr)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700222{
223 int ret;
224
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700225 ret = __do_read(ff, addr, sizeof(*addr));
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700226 if (ret)
227 return ret;
228
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700229 if (ff->ph->needs_swap)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700230 *addr = bswap_32(*addr);
231 return 0;
232}
233
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700234static int do_read_u64(struct feat_fd *ff, u64 *addr)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700235{
236 int ret;
237
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700238 ret = __do_read(ff, addr, sizeof(*addr));
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700239 if (ret)
240 return ret;
241
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700242 if (ff->ph->needs_swap)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700243 *addr = bswap_64(*addr);
244 return 0;
245}
246
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700247static char *do_read_string(struct feat_fd *ff)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200248{
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200249 u32 len;
250 char *buf;
251
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700252 if (do_read_u32(ff, &len))
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200253 return NULL;
254
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200255 buf = malloc(len);
256 if (!buf)
257 return NULL;
258
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700259 if (!__do_read(ff, buf, len)) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200260 /*
261 * strings are padded by zeroes
262 * thus the actual strlen of buf
263 * may be less than len
264 */
265 return buf;
266 }
267
268 free(buf);
269 return NULL;
270}
271
Jiri Olsae2091ce2018-03-07 16:50:08 +0100272/* Return: 0 if succeded, -ERR if failed. */
273static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
274{
275 unsigned long *set;
276 u64 size, *p;
277 int i, ret;
278
279 ret = do_read_u64(ff, &size);
280 if (ret)
281 return ret;
282
283 set = bitmap_alloc(size);
284 if (!set)
285 return -ENOMEM;
286
Jiri Olsae2091ce2018-03-07 16:50:08 +0100287 p = (u64 *) set;
288
289 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
290 ret = do_read_u64(ff, p + i);
291 if (ret < 0) {
292 free(set);
293 return ret;
294 }
295 }
296
297 *pset = set;
298 *psize = size;
299 return 0;
300}
301
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700302static int write_tracing_data(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200303 struct evlist *evlist)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200304{
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700305 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
306 return -1;
307
Jiri Olsace9036a2019-07-21 13:24:23 +0200308 return read_tracing_data(ff->fd, &evlist->core.entries);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200309}
310
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700311static int write_build_id(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200312 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200313{
314 struct perf_session *session;
315 int err;
316
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700317 session = container_of(ff->ph, struct perf_session, header);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200318
Robert Richtere20960c2011-12-07 10:02:55 +0100319 if (!perf_session__read_build_ids(session, true))
320 return -1;
321
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700322 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
323 return -1;
324
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700325 err = perf_session__write_buildid_table(session, ff);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200326 if (err < 0) {
327 pr_debug("failed to write buildid table\n");
328 return err;
329 }
Namhyung Kim73c5d222014-11-07 22:57:56 +0900330 perf_session__cache_build_ids(session);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200331
332 return 0;
333}
334
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700335static int write_hostname(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200336 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200337{
338 struct utsname uts;
339 int ret;
340
341 ret = uname(&uts);
342 if (ret < 0)
343 return -1;
344
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700345 return do_write_string(ff, uts.nodename);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200346}
347
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700348static int write_osrelease(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200349 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200350{
351 struct utsname uts;
352 int ret;
353
354 ret = uname(&uts);
355 if (ret < 0)
356 return -1;
357
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700358 return do_write_string(ff, uts.release);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200359}
360
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700361static int write_arch(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200362 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200363{
364 struct utsname uts;
365 int ret;
366
367 ret = uname(&uts);
368 if (ret < 0)
369 return -1;
370
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700371 return do_write_string(ff, uts.machine);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200372}
373
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700374static int write_version(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200375 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200376{
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700377 return do_write_string(ff, perf_version_string);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200378}
379
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700380static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200381{
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200382 FILE *file;
383 char *buf = NULL;
384 char *s, *p;
Wang Nan493c3032014-10-24 09:45:26 +0800385 const char *search = cpuinfo_proc;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200386 size_t len = 0;
387 int ret = -1;
388
389 if (!search)
390 return -1;
391
392 file = fopen("/proc/cpuinfo", "r");
393 if (!file)
394 return -1;
395
396 while (getline(&buf, &len, file) > 0) {
397 ret = strncmp(buf, search, strlen(search));
398 if (!ret)
399 break;
400 }
401
Wang Naned307752014-10-16 11:08:29 +0800402 if (ret) {
403 ret = -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200404 goto done;
Wang Naned307752014-10-16 11:08:29 +0800405 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200406
407 s = buf;
408
409 p = strchr(buf, ':');
410 if (p && *(p+1) == ' ' && *(p+2))
411 s = p + 2;
412 p = strchr(s, '\n');
413 if (p)
414 *p = '\0';
415
416 /* squash extra space characters (branding string) */
417 p = s;
418 while (*p) {
419 if (isspace(*p)) {
420 char *r = p + 1;
Arnaldo Carvalho de Melofc6a1722019-06-25 21:33:14 -0300421 char *q = skip_spaces(r);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200422 *p = ' ';
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200423 if (q != (p+1))
424 while ((*r++ = *q++));
425 }
426 p++;
427 }
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700428 ret = do_write_string(ff, s);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200429done:
430 free(buf);
431 fclose(file);
432 return ret;
433}
434
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700435static int write_cpudesc(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200436 struct evlist *evlist __maybe_unused)
Wang Nan493c3032014-10-24 09:45:26 +0800437{
Wang Nan493c3032014-10-24 09:45:26 +0800438 const char *cpuinfo_procs[] = CPUINFO_PROC;
439 unsigned int i;
440
441 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
442 int ret;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700443 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
Wang Nan493c3032014-10-24 09:45:26 +0800444 if (ret >= 0)
445 return ret;
446 }
447 return -1;
448}
449
450
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700451static int write_nrcpus(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200452 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200453{
454 long nr;
455 u32 nrc, nra;
456 int ret;
457
Jan Stancekda8a58b2017-02-17 12:10:26 +0100458 nrc = cpu__max_present_cpu();
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200459
460 nr = sysconf(_SC_NPROCESSORS_ONLN);
461 if (nr < 0)
462 return -1;
463
464 nra = (u32)(nr & UINT_MAX);
465
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700466 ret = do_write(ff, &nrc, sizeof(nrc));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200467 if (ret < 0)
468 return ret;
469
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700470 return do_write(ff, &nra, sizeof(nra));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200471}
472
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700473static int write_event_desc(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200474 struct evlist *evlist)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200475{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200476 struct evsel *evsel;
Namhyung Kim74ba9e12012-09-05 14:02:47 +0900477 u32 nre, nri, sz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200478 int ret;
479
Jiri Olsa6484d2f2019-07-21 13:24:28 +0200480 nre = evlist->core.nr_entries;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200481
482 /*
483 * write number of events
484 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700485 ret = do_write(ff, &nre, sizeof(nre));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200486 if (ret < 0)
487 return ret;
488
489 /*
490 * size of perf_event_attr struct
491 */
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200492 sz = (u32)sizeof(evsel->core.attr);
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700493 ret = do_write(ff, &sz, sizeof(sz));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200494 if (ret < 0)
495 return ret;
496
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300497 evlist__for_each_entry(evlist, evsel) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200498 ret = do_write(ff, &evsel->core.attr, sz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200499 if (ret < 0)
500 return ret;
501 /*
502 * write number of unique id per event
503 * there is one id per instance of an event
504 *
505 * copy into an nri to be independent of the
506 * type of ids,
507 */
Robert Richter6606f872012-08-16 21:10:19 +0200508 nri = evsel->ids;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700509 ret = do_write(ff, &nri, sizeof(nri));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200510 if (ret < 0)
511 return ret;
512
513 /*
514 * write event string as passed on cmdline
515 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700516 ret = do_write_string(ff, perf_evsel__name(evsel));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200517 if (ret < 0)
518 return ret;
519 /*
520 * write unique ids for this event
521 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700522 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200523 if (ret < 0)
524 return ret;
525 }
526 return 0;
527}
528
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700529static int write_cmdline(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200530 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200531{
Andi Kleen94816ad2019-02-24 07:37:19 -0800532 char pbuf[MAXPATHLEN], *buf;
533 int i, ret, n;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200534
Tommi Rantala55f771282017-03-22 15:06:24 +0200535 /* actual path to perf binary */
Andi Kleen94816ad2019-02-24 07:37:19 -0800536 buf = perf_exe(pbuf, MAXPATHLEN);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200537
538 /* account for binary path */
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300539 n = perf_env.nr_cmdline + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200540
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700541 ret = do_write(ff, &n, sizeof(n));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200542 if (ret < 0)
543 return ret;
544
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700545 ret = do_write_string(ff, buf);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200546 if (ret < 0)
547 return ret;
548
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300549 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700550 ret = do_write_string(ff, perf_env.cmdline_argv[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200551 if (ret < 0)
552 return ret;
553 }
554 return 0;
555}
556
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200557
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700558static int write_cpu_topology(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200559 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200560{
Jiri Olsa5135d5e2019-02-19 10:58:13 +0100561 struct cpu_topology *tp;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200562 u32 i;
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300563 int ret, j;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200564
Jiri Olsa5135d5e2019-02-19 10:58:13 +0100565 tp = cpu_topology__new();
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200566 if (!tp)
567 return -1;
568
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700569 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200570 if (ret < 0)
571 goto done;
572
573 for (i = 0; i < tp->core_sib; i++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700574 ret = do_write_string(ff, tp->core_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200575 if (ret < 0)
576 goto done;
577 }
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700578 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200579 if (ret < 0)
580 goto done;
581
582 for (i = 0; i < tp->thread_sib; i++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700583 ret = do_write_string(ff, tp->thread_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200584 if (ret < 0)
585 break;
586 }
Kan Liang2bb00d22015-09-01 09:58:12 -0400587
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300588 ret = perf_env__read_cpu_topology_map(&perf_env);
589 if (ret < 0)
590 goto done;
591
592 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700593 ret = do_write(ff, &perf_env.cpu[j].core_id,
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300594 sizeof(perf_env.cpu[j].core_id));
Kan Liang2bb00d22015-09-01 09:58:12 -0400595 if (ret < 0)
596 return ret;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700597 ret = do_write(ff, &perf_env.cpu[j].socket_id,
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300598 sizeof(perf_env.cpu[j].socket_id));
Kan Liang2bb00d22015-09-01 09:58:12 -0400599 if (ret < 0)
600 return ret;
601 }
Kan Liangacae8b32019-06-04 15:50:41 -0700602
603 if (!tp->die_sib)
604 goto done;
605
606 ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib));
607 if (ret < 0)
608 goto done;
609
610 for (i = 0; i < tp->die_sib; i++) {
611 ret = do_write_string(ff, tp->die_siblings[i]);
612 if (ret < 0)
613 goto done;
614 }
615
616 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
617 ret = do_write(ff, &perf_env.cpu[j].die_id,
618 sizeof(perf_env.cpu[j].die_id));
619 if (ret < 0)
620 return ret;
621 }
622
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200623done:
Jiri Olsa5135d5e2019-02-19 10:58:13 +0100624 cpu_topology__delete(tp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200625 return ret;
626}
627
628
629
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700630static int write_total_mem(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200631 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200632{
633 char *buf = NULL;
634 FILE *fp;
635 size_t len = 0;
636 int ret = -1, n;
637 uint64_t mem;
638
639 fp = fopen("/proc/meminfo", "r");
640 if (!fp)
641 return -1;
642
643 while (getline(&buf, &len, fp) > 0) {
644 ret = strncmp(buf, "MemTotal:", 9);
645 if (!ret)
646 break;
647 }
648 if (!ret) {
649 n = sscanf(buf, "%*s %"PRIu64, &mem);
650 if (n == 1)
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700651 ret = do_write(ff, &mem, sizeof(mem));
Wang Naned307752014-10-16 11:08:29 +0800652 } else
653 ret = -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200654 free(buf);
655 fclose(fp);
656 return ret;
657}
658
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700659static int write_numa_topology(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200660 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200661{
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100662 struct numa_topology *tp;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200663 int ret = -1;
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100664 u32 i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200665
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100666 tp = numa_topology__new();
667 if (!tp)
668 return -ENOMEM;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200669
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100670 ret = do_write(ff, &tp->nr, sizeof(u32));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200671 if (ret < 0)
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100672 goto err;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200673
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100674 for (i = 0; i < tp->nr; i++) {
675 struct numa_topology_node *n = &tp->nodes[i];
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200676
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100677 ret = do_write(ff, &n->node, sizeof(u32));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200678 if (ret < 0)
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100679 goto err;
680
681 ret = do_write(ff, &n->mem_total, sizeof(u64));
682 if (ret)
683 goto err;
684
685 ret = do_write(ff, &n->mem_free, sizeof(u64));
686 if (ret)
687 goto err;
688
689 ret = do_write_string(ff, n->cpus);
690 if (ret < 0)
691 goto err;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200692 }
Jiri Olsa48e6c5a2019-02-19 10:58:14 +0100693
694 ret = 0;
695
696err:
697 numa_topology__delete(tp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200698 return ret;
699}
700
701/*
Robert Richter50a96672012-08-16 21:10:24 +0200702 * File format:
703 *
704 * struct pmu_mappings {
705 * u32 pmu_num;
706 * struct pmu_map {
707 * u32 type;
708 * char name[];
709 * }[pmu_num];
710 * };
711 */
712
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700713static int write_pmu_mappings(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200714 struct evlist *evlist __maybe_unused)
Robert Richter50a96672012-08-16 21:10:24 +0200715{
716 struct perf_pmu *pmu = NULL;
David Carrillo-Cisnerosa02c3952017-07-17 21:25:44 -0700717 u32 pmu_num = 0;
Namhyung Kim5323f602012-12-17 15:38:54 +0900718 int ret;
Robert Richter50a96672012-08-16 21:10:24 +0200719
David Carrillo-Cisnerosa02c3952017-07-17 21:25:44 -0700720 /*
721 * Do a first pass to count number of pmu to avoid lseek so this
722 * works in pipe mode as well.
723 */
724 while ((pmu = perf_pmu__scan(pmu))) {
725 if (!pmu->name)
726 continue;
727 pmu_num++;
728 }
729
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700730 ret = do_write(ff, &pmu_num, sizeof(pmu_num));
Namhyung Kim5323f602012-12-17 15:38:54 +0900731 if (ret < 0)
732 return ret;
Robert Richter50a96672012-08-16 21:10:24 +0200733
734 while ((pmu = perf_pmu__scan(pmu))) {
735 if (!pmu->name)
736 continue;
Namhyung Kim5323f602012-12-17 15:38:54 +0900737
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700738 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
Namhyung Kim5323f602012-12-17 15:38:54 +0900739 if (ret < 0)
740 return ret;
741
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700742 ret = do_write_string(ff, pmu->name);
Namhyung Kim5323f602012-12-17 15:38:54 +0900743 if (ret < 0)
744 return ret;
Robert Richter50a96672012-08-16 21:10:24 +0200745 }
746
Robert Richter50a96672012-08-16 21:10:24 +0200747 return 0;
748}
749
750/*
Namhyung Kima8bb5592013-01-22 18:09:31 +0900751 * File format:
752 *
753 * struct group_descs {
754 * u32 nr_groups;
755 * struct group_desc {
756 * char name[];
757 * u32 leader_idx;
758 * u32 nr_members;
759 * }[nr_groups];
760 * };
761 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700762static int write_group_desc(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200763 struct evlist *evlist)
Namhyung Kima8bb5592013-01-22 18:09:31 +0900764{
765 u32 nr_groups = evlist->nr_groups;
Jiri Olsa32dcd022019-07-21 13:23:51 +0200766 struct evsel *evsel;
Namhyung Kima8bb5592013-01-22 18:09:31 +0900767 int ret;
768
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700769 ret = do_write(ff, &nr_groups, sizeof(nr_groups));
Namhyung Kima8bb5592013-01-22 18:09:31 +0900770 if (ret < 0)
771 return ret;
772
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300773 evlist__for_each_entry(evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +0900774 if (perf_evsel__is_group_leader(evsel) &&
775 evsel->nr_members > 1) {
776 const char *name = evsel->group_name ?: "{anon_group}";
777 u32 leader_idx = evsel->idx;
778 u32 nr_members = evsel->nr_members;
779
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700780 ret = do_write_string(ff, name);
Namhyung Kima8bb5592013-01-22 18:09:31 +0900781 if (ret < 0)
782 return ret;
783
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700784 ret = do_write(ff, &leader_idx, sizeof(leader_idx));
Namhyung Kima8bb5592013-01-22 18:09:31 +0900785 if (ret < 0)
786 return ret;
787
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700788 ret = do_write(ff, &nr_members, sizeof(nr_members));
Namhyung Kima8bb5592013-01-22 18:09:31 +0900789 if (ret < 0)
790 return ret;
791 }
792 }
793 return 0;
794}
795
796/*
Kan Liangf4a07422018-11-21 08:49:39 -0800797 * Return the CPU id as a raw string.
798 *
799 * Each architecture should provide a more precise id string that
800 * can be use to match the architecture's "mapfile".
801 */
802char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
803{
804 return NULL;
805}
806
807/* Return zero when the cpuid from the mapfile.csv matches the
808 * cpuid string generated on this platform.
809 * Otherwise return non-zero.
810 */
811int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
812{
813 regex_t re;
814 regmatch_t pmatch[1];
815 int match;
816
817 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
818 /* Warn unable to generate match particular string. */
819 pr_info("Invalid regular expression %s\n", mapcpuid);
820 return 1;
821 }
822
823 match = !regexec(&re, cpuid, 1, pmatch, 0);
824 regfree(&re);
825 if (match) {
826 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
827
828 /* Verify the entire string matched. */
829 if (match_len == strlen(cpuid))
830 return 0;
831 }
832 return 1;
833}
834
835/*
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200836 * default get_cpuid(): nothing gets recorded
Jiada Wang7a759cd2017-04-09 20:02:37 -0700837 * actual implementation must be in arch/$(SRCARCH)/util/header.c
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200838 */
Rui Teng11d8f872016-07-28 10:05:57 +0800839int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200840{
841 return -1;
842}
843
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700844static int write_cpuid(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200845 struct evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200846{
847 char buffer[64];
848 int ret;
849
850 ret = get_cpuid(buffer, sizeof(buffer));
Jiri Olsaa9aeb872019-02-13 13:32:43 +0100851 if (ret)
852 return -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200853
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700854 return do_write_string(ff, buffer);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200855}
856
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700857static int write_branch_stack(struct feat_fd *ff __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +0200858 struct evlist *evlist __maybe_unused)
Stephane Eranian330aa672012-03-08 23:47:46 +0100859{
860 return 0;
861}
862
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700863static int write_auxtrace(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200864 struct evlist *evlist __maybe_unused)
Adrian Hunter4025ea42015-04-09 18:53:41 +0300865{
Adrian Hunter99fa2982015-04-30 17:37:25 +0300866 struct perf_session *session;
867 int err;
868
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700869 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
870 return -1;
871
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700872 session = container_of(ff->ph, struct perf_session, header);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300873
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700874 err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300875 if (err < 0)
876 pr_err("Failed to write auxtrace index\n");
877 return err;
Adrian Hunter4025ea42015-04-09 18:53:41 +0300878}
879
Alexey Budankovcf790512018-10-09 17:36:24 +0300880static int write_clockid(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200881 struct evlist *evlist __maybe_unused)
Alexey Budankovcf790512018-10-09 17:36:24 +0300882{
883 return do_write(ff, &ff->ph->env.clockid_res_ns,
884 sizeof(ff->ph->env.clockid_res_ns));
885}
886
Jiri Olsa258031c2019-03-08 14:47:39 +0100887static int write_dir_format(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200888 struct evlist *evlist __maybe_unused)
Jiri Olsa258031c2019-03-08 14:47:39 +0100889{
890 struct perf_session *session;
891 struct perf_data *data;
892
893 session = container_of(ff->ph, struct perf_session, header);
894 data = session->data;
895
896 if (WARN_ON(!perf_data__is_dir(data)))
897 return -1;
898
899 return do_write(ff, &data->dir.version, sizeof(data->dir.version));
900}
901
Song Liu606f9722019-03-11 22:30:43 -0700902#ifdef HAVE_LIBBPF_SUPPORT
903static int write_bpf_prog_info(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200904 struct evlist *evlist __maybe_unused)
Song Liu606f9722019-03-11 22:30:43 -0700905{
906 struct perf_env *env = &ff->ph->env;
907 struct rb_root *root;
908 struct rb_node *next;
909 int ret;
910
911 down_read(&env->bpf_progs.lock);
912
913 ret = do_write(ff, &env->bpf_progs.infos_cnt,
914 sizeof(env->bpf_progs.infos_cnt));
915 if (ret < 0)
916 goto out;
917
918 root = &env->bpf_progs.infos;
919 next = rb_first(root);
920 while (next) {
921 struct bpf_prog_info_node *node;
922 size_t len;
923
924 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
925 next = rb_next(&node->rb_node);
926 len = sizeof(struct bpf_prog_info_linear) +
927 node->info_linear->data_len;
928
929 /* before writing to file, translate address to offset */
930 bpf_program__bpil_addr_to_offs(node->info_linear);
931 ret = do_write(ff, node->info_linear, len);
932 /*
933 * translate back to address even when do_write() fails,
934 * so that this function never changes the data.
935 */
936 bpf_program__bpil_offs_to_addr(node->info_linear);
937 if (ret < 0)
938 goto out;
939 }
940out:
941 up_read(&env->bpf_progs.lock);
942 return ret;
943}
944#else // HAVE_LIBBPF_SUPPORT
945static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +0200946 struct evlist *evlist __maybe_unused)
Song Liu606f9722019-03-11 22:30:43 -0700947{
948 return 0;
949}
950#endif // HAVE_LIBBPF_SUPPORT
951
Song Liua70a112312019-03-11 22:30:45 -0700952static int write_bpf_btf(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +0200953 struct evlist *evlist __maybe_unused)
Song Liua70a112312019-03-11 22:30:45 -0700954{
955 struct perf_env *env = &ff->ph->env;
956 struct rb_root *root;
957 struct rb_node *next;
958 int ret;
959
960 down_read(&env->bpf_progs.lock);
961
962 ret = do_write(ff, &env->bpf_progs.btfs_cnt,
963 sizeof(env->bpf_progs.btfs_cnt));
964
965 if (ret < 0)
966 goto out;
967
968 root = &env->bpf_progs.btfs;
969 next = rb_first(root);
970 while (next) {
971 struct btf_node *node;
972
973 node = rb_entry(next, struct btf_node, rb_node);
974 next = rb_next(&node->rb_node);
975 ret = do_write(ff, &node->id,
976 sizeof(u32) * 2 + node->data_size);
977 if (ret < 0)
978 goto out;
979 }
980out:
981 up_read(&env->bpf_progs.lock);
982 return ret;
983}
984
Jiri Olsa720e98b2016-02-16 16:01:43 +0100985static int cpu_cache_level__sort(const void *a, const void *b)
986{
987 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
988 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
989
990 return cache_a->level - cache_b->level;
991}
992
993static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
994{
995 if (a->level != b->level)
996 return false;
997
998 if (a->line_size != b->line_size)
999 return false;
1000
1001 if (a->sets != b->sets)
1002 return false;
1003
1004 if (a->ways != b->ways)
1005 return false;
1006
1007 if (strcmp(a->type, b->type))
1008 return false;
1009
1010 if (strcmp(a->size, b->size))
1011 return false;
1012
1013 if (strcmp(a->map, b->map))
1014 return false;
1015
1016 return true;
1017}
1018
1019static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1020{
1021 char path[PATH_MAX], file[PATH_MAX];
1022 struct stat st;
1023 size_t len;
1024
1025 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1026 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1027
1028 if (stat(file, &st))
1029 return 1;
1030
1031 scnprintf(file, PATH_MAX, "%s/level", path);
1032 if (sysfs__read_int(file, (int *) &cache->level))
1033 return -1;
1034
1035 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1036 if (sysfs__read_int(file, (int *) &cache->line_size))
1037 return -1;
1038
1039 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1040 if (sysfs__read_int(file, (int *) &cache->sets))
1041 return -1;
1042
1043 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1044 if (sysfs__read_int(file, (int *) &cache->ways))
1045 return -1;
1046
1047 scnprintf(file, PATH_MAX, "%s/type", path);
1048 if (sysfs__read_str(file, &cache->type, &len))
1049 return -1;
1050
1051 cache->type[len] = 0;
Arnaldo Carvalho de Melo13c230a2019-06-26 12:13:13 -03001052 cache->type = strim(cache->type);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001053
1054 scnprintf(file, PATH_MAX, "%s/size", path);
1055 if (sysfs__read_str(file, &cache->size, &len)) {
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001056 zfree(&cache->type);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001057 return -1;
1058 }
1059
1060 cache->size[len] = 0;
Arnaldo Carvalho de Melo13c230a2019-06-26 12:13:13 -03001061 cache->size = strim(cache->size);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001062
1063 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1064 if (sysfs__read_str(file, &cache->map, &len)) {
Arnaldo Carvalho de Melod8f9da22019-07-04 12:06:20 -03001065 zfree(&cache->map);
1066 zfree(&cache->type);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001067 return -1;
1068 }
1069
1070 cache->map[len] = 0;
Arnaldo Carvalho de Melo13c230a2019-06-26 12:13:13 -03001071 cache->map = strim(cache->map);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001072 return 0;
1073}
1074
1075static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1076{
1077 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1078}
1079
1080static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
1081{
1082 u32 i, cnt = 0;
1083 long ncpus;
1084 u32 nr, cpu;
1085 u16 level;
1086
1087 ncpus = sysconf(_SC_NPROCESSORS_CONF);
1088 if (ncpus < 0)
1089 return -1;
1090
1091 nr = (u32)(ncpus & UINT_MAX);
1092
1093 for (cpu = 0; cpu < nr; cpu++) {
1094 for (level = 0; level < 10; level++) {
1095 struct cpu_cache_level c;
1096 int err;
1097
1098 err = cpu_cache_level__read(&c, cpu, level);
1099 if (err < 0)
1100 return err;
1101
1102 if (err == 1)
1103 break;
1104
1105 for (i = 0; i < cnt; i++) {
1106 if (cpu_cache_level__cmp(&c, &caches[i]))
1107 break;
1108 }
1109
1110 if (i == cnt)
1111 caches[cnt++] = c;
1112 else
1113 cpu_cache_level__free(&c);
1114
1115 if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1116 goto out;
1117 }
1118 }
1119 out:
1120 *cntp = cnt;
1121 return 0;
1122}
1123
Kyle Meyer9f94c7f2019-06-20 14:36:30 -05001124#define MAX_CACHES (MAX_NR_CPUS * 4)
Jiri Olsa720e98b2016-02-16 16:01:43 +01001125
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001126static int write_cache(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +02001127 struct evlist *evlist __maybe_unused)
Jiri Olsa720e98b2016-02-16 16:01:43 +01001128{
1129 struct cpu_cache_level caches[MAX_CACHES];
1130 u32 cnt = 0, i, version = 1;
1131 int ret;
1132
1133 ret = build_caches(caches, MAX_CACHES, &cnt);
1134 if (ret)
1135 goto out;
1136
1137 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1138
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001139 ret = do_write(ff, &version, sizeof(u32));
Jiri Olsa720e98b2016-02-16 16:01:43 +01001140 if (ret < 0)
1141 goto out;
1142
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001143 ret = do_write(ff, &cnt, sizeof(u32));
Jiri Olsa720e98b2016-02-16 16:01:43 +01001144 if (ret < 0)
1145 goto out;
1146
1147 for (i = 0; i < cnt; i++) {
1148 struct cpu_cache_level *c = &caches[i];
1149
1150 #define _W(v) \
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001151 ret = do_write(ff, &c->v, sizeof(u32)); \
Jiri Olsa720e98b2016-02-16 16:01:43 +01001152 if (ret < 0) \
1153 goto out;
1154
1155 _W(level)
1156 _W(line_size)
1157 _W(sets)
1158 _W(ways)
1159 #undef _W
1160
1161 #define _W(v) \
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001162 ret = do_write_string(ff, (const char *) c->v); \
Jiri Olsa720e98b2016-02-16 16:01:43 +01001163 if (ret < 0) \
1164 goto out;
1165
1166 _W(type)
1167 _W(size)
1168 _W(map)
1169 #undef _W
1170 }
1171
1172out:
1173 for (i = 0; i < cnt; i++)
1174 cpu_cache_level__free(&caches[i]);
1175 return ret;
1176}
1177
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001178static int write_stat(struct feat_fd *ff __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +02001179 struct evlist *evlist __maybe_unused)
Jiri Olsaffa517a2015-10-25 15:51:43 +01001180{
1181 return 0;
1182}
1183
Jin Yao60115182017-12-08 21:13:41 +08001184static int write_sample_time(struct feat_fd *ff,
Jiri Olsa63503db2019-07-21 13:23:52 +02001185 struct evlist *evlist)
Jin Yao60115182017-12-08 21:13:41 +08001186{
1187 int ret;
1188
1189 ret = do_write(ff, &evlist->first_sample_time,
1190 sizeof(evlist->first_sample_time));
1191 if (ret < 0)
1192 return ret;
1193
1194 return do_write(ff, &evlist->last_sample_time,
1195 sizeof(evlist->last_sample_time));
1196}
1197
Jiri Olsae2091ce2018-03-07 16:50:08 +01001198
1199static int memory_node__read(struct memory_node *n, unsigned long idx)
1200{
1201 unsigned int phys, size = 0;
1202 char path[PATH_MAX];
1203 struct dirent *ent;
1204 DIR *dir;
1205
1206#define for_each_memory(mem, dir) \
1207 while ((ent = readdir(dir))) \
1208 if (strcmp(ent->d_name, ".") && \
1209 strcmp(ent->d_name, "..") && \
1210 sscanf(ent->d_name, "memory%u", &mem) == 1)
1211
1212 scnprintf(path, PATH_MAX,
1213 "%s/devices/system/node/node%lu",
1214 sysfs__mountpoint(), idx);
1215
1216 dir = opendir(path);
1217 if (!dir) {
1218 pr_warning("failed: cant' open memory sysfs data\n");
1219 return -1;
1220 }
1221
1222 for_each_memory(phys, dir) {
1223 size = max(phys, size);
1224 }
1225
1226 size++;
1227
1228 n->set = bitmap_alloc(size);
1229 if (!n->set) {
1230 closedir(dir);
1231 return -ENOMEM;
1232 }
1233
Jiri Olsae2091ce2018-03-07 16:50:08 +01001234 n->node = idx;
1235 n->size = size;
1236
1237 rewinddir(dir);
1238
1239 for_each_memory(phys, dir) {
1240 set_bit(phys, n->set);
1241 }
1242
1243 closedir(dir);
1244 return 0;
1245}
1246
1247static int memory_node__sort(const void *a, const void *b)
1248{
1249 const struct memory_node *na = a;
1250 const struct memory_node *nb = b;
1251
1252 return na->node - nb->node;
1253}
1254
1255static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
1256{
1257 char path[PATH_MAX];
1258 struct dirent *ent;
1259 DIR *dir;
1260 u64 cnt = 0;
1261 int ret = 0;
1262
1263 scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1264 sysfs__mountpoint());
1265
1266 dir = opendir(path);
1267 if (!dir) {
Thomas Richter4f75f1cb2018-04-12 15:32:46 +02001268 pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1269 __func__, path);
Jiri Olsae2091ce2018-03-07 16:50:08 +01001270 return -1;
1271 }
1272
1273 while (!ret && (ent = readdir(dir))) {
1274 unsigned int idx;
1275 int r;
1276
1277 if (!strcmp(ent->d_name, ".") ||
1278 !strcmp(ent->d_name, ".."))
1279 continue;
1280
1281 r = sscanf(ent->d_name, "node%u", &idx);
1282 if (r != 1)
1283 continue;
1284
1285 if (WARN_ONCE(cnt >= size,
1286 "failed to write MEM_TOPOLOGY, way too many nodes\n"))
1287 return -1;
1288
1289 ret = memory_node__read(&nodes[cnt++], idx);
1290 }
1291
1292 *cntp = cnt;
1293 closedir(dir);
1294
1295 if (!ret)
1296 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1297
1298 return ret;
1299}
1300
1301#define MAX_MEMORY_NODES 2000
1302
1303/*
1304 * The MEM_TOPOLOGY holds physical memory map for every
1305 * node in system. The format of data is as follows:
1306 *
1307 * 0 - version | for future changes
1308 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1309 * 16 - count | number of nodes
1310 *
1311 * For each node we store map of physical indexes for
1312 * each node:
1313 *
1314 * 32 - node id | node index
1315 * 40 - size | size of bitmap
1316 * 48 - bitmap | bitmap of memory indexes that belongs to node
1317 */
1318static int write_mem_topology(struct feat_fd *ff __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +02001319 struct evlist *evlist __maybe_unused)
Jiri Olsae2091ce2018-03-07 16:50:08 +01001320{
1321 static struct memory_node nodes[MAX_MEMORY_NODES];
1322 u64 bsize, version = 1, i, nr;
1323 int ret;
1324
1325 ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1326 (unsigned long long *) &bsize);
1327 if (ret)
1328 return ret;
1329
1330 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
1331 if (ret)
1332 return ret;
1333
1334 ret = do_write(ff, &version, sizeof(version));
1335 if (ret < 0)
1336 goto out;
1337
1338 ret = do_write(ff, &bsize, sizeof(bsize));
1339 if (ret < 0)
1340 goto out;
1341
1342 ret = do_write(ff, &nr, sizeof(nr));
1343 if (ret < 0)
1344 goto out;
1345
1346 for (i = 0; i < nr; i++) {
1347 struct memory_node *n = &nodes[i];
1348
1349 #define _W(v) \
1350 ret = do_write(ff, &n->v, sizeof(n->v)); \
1351 if (ret < 0) \
1352 goto out;
1353
1354 _W(node)
1355 _W(size)
1356
1357 #undef _W
1358
1359 ret = do_write_bitmap(ff, n->set, n->size);
1360 if (ret < 0)
1361 goto out;
1362 }
1363
1364out:
1365 return ret;
1366}
1367
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001368static int write_compressed(struct feat_fd *ff __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +02001369 struct evlist *evlist __maybe_unused)
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001370{
1371 int ret;
1372
1373 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
1374 if (ret)
1375 return ret;
1376
1377 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
1378 if (ret)
1379 return ret;
1380
1381 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
1382 if (ret)
1383 return ret;
1384
1385 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
1386 if (ret)
1387 return ret;
1388
1389 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
1390}
1391
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001392static void print_hostname(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001393{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001394 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001395}
1396
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001397static void print_osrelease(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001398{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001399 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001400}
1401
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001402static void print_arch(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001403{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001404 fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001405}
1406
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001407static void print_cpudesc(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001408{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001409 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001410}
1411
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001412static void print_nrcpus(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001413{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001414 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1415 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001416}
1417
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001418static void print_version(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001419{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001420 fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001421}
1422
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001423static void print_cmdline(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001424{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001425 int nr, i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001426
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001427 nr = ff->ph->env.nr_cmdline;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001428
1429 fprintf(fp, "# cmdline : ");
1430
Alexey Budankovf92da712018-06-04 09:50:56 +03001431 for (i = 0; i < nr; i++) {
1432 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1433 if (!argv_i) {
1434 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1435 } else {
1436 char *mem = argv_i;
1437 do {
1438 char *quote = strchr(argv_i, '\'');
1439 if (!quote)
1440 break;
1441 *quote++ = '\0';
1442 fprintf(fp, "%s\\\'", argv_i);
1443 argv_i = quote;
1444 } while (1);
1445 fprintf(fp, "%s ", argv_i);
1446 free(mem);
1447 }
1448 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001449 fputc('\n', fp);
1450}
1451
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001452static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001453{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001454 struct perf_header *ph = ff->ph;
1455 int cpu_nr = ph->env.nr_cpus_avail;
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001456 int nr, i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001457 char *str;
1458
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001459 nr = ph->env.nr_sibling_cores;
1460 str = ph->env.sibling_cores;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001461
1462 for (i = 0; i < nr; i++) {
Kan Liange05a8992019-06-04 15:50:43 -07001463 fprintf(fp, "# sibling sockets : %s\n", str);
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001464 str += strlen(str) + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001465 }
1466
Kan Liangacae8b32019-06-04 15:50:41 -07001467 if (ph->env.nr_sibling_dies) {
1468 nr = ph->env.nr_sibling_dies;
1469 str = ph->env.sibling_dies;
1470
1471 for (i = 0; i < nr; i++) {
1472 fprintf(fp, "# sibling dies : %s\n", str);
1473 str += strlen(str) + 1;
1474 }
1475 }
1476
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001477 nr = ph->env.nr_sibling_threads;
1478 str = ph->env.sibling_threads;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001479
1480 for (i = 0; i < nr; i++) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001481 fprintf(fp, "# sibling threads : %s\n", str);
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001482 str += strlen(str) + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001483 }
Kan Liang2bb00d22015-09-01 09:58:12 -04001484
Kan Liangacae8b32019-06-04 15:50:41 -07001485 if (ph->env.nr_sibling_dies) {
1486 if (ph->env.cpu != NULL) {
1487 for (i = 0; i < cpu_nr; i++)
1488 fprintf(fp, "# CPU %d: Core ID %d, "
1489 "Die ID %d, Socket ID %d\n",
1490 i, ph->env.cpu[i].core_id,
1491 ph->env.cpu[i].die_id,
1492 ph->env.cpu[i].socket_id);
1493 } else
1494 fprintf(fp, "# Core ID, Die ID and Socket ID "
1495 "information is not available\n");
1496 } else {
1497 if (ph->env.cpu != NULL) {
1498 for (i = 0; i < cpu_nr; i++)
1499 fprintf(fp, "# CPU %d: Core ID %d, "
1500 "Socket ID %d\n",
1501 i, ph->env.cpu[i].core_id,
1502 ph->env.cpu[i].socket_id);
1503 } else
1504 fprintf(fp, "# Core ID and Socket ID "
1505 "information is not available\n");
1506 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001507}
1508
Alexey Budankovcf790512018-10-09 17:36:24 +03001509static void print_clockid(struct feat_fd *ff, FILE *fp)
1510{
1511 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1512 ff->ph->env.clockid_res_ns * 1000);
1513}
1514
Jiri Olsa258031c2019-03-08 14:47:39 +01001515static void print_dir_format(struct feat_fd *ff, FILE *fp)
1516{
1517 struct perf_session *session;
1518 struct perf_data *data;
1519
1520 session = container_of(ff->ph, struct perf_session, header);
1521 data = session->data;
1522
1523 fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
1524}
1525
Song Liu606f9722019-03-11 22:30:43 -07001526static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
1527{
1528 struct perf_env *env = &ff->ph->env;
1529 struct rb_root *root;
1530 struct rb_node *next;
1531
1532 down_read(&env->bpf_progs.lock);
1533
1534 root = &env->bpf_progs.infos;
1535 next = rb_first(root);
1536
1537 while (next) {
1538 struct bpf_prog_info_node *node;
1539
1540 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1541 next = rb_next(&node->rb_node);
Song Liuf8dfeae2019-03-19 09:54:54 -07001542
1543 bpf_event__print_bpf_prog_info(&node->info_linear->info,
1544 env, fp);
Song Liu606f9722019-03-11 22:30:43 -07001545 }
1546
1547 up_read(&env->bpf_progs.lock);
1548}
1549
Song Liua70a112312019-03-11 22:30:45 -07001550static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
1551{
1552 struct perf_env *env = &ff->ph->env;
1553 struct rb_root *root;
1554 struct rb_node *next;
1555
1556 down_read(&env->bpf_progs.lock);
1557
1558 root = &env->bpf_progs.btfs;
1559 next = rb_first(root);
1560
1561 while (next) {
1562 struct btf_node *node;
1563
1564 node = rb_entry(next, struct btf_node, rb_node);
1565 next = rb_next(&node->rb_node);
1566 fprintf(fp, "# btf info of id %u\n", node->id);
1567 }
1568
1569 up_read(&env->bpf_progs.lock);
1570}
1571
Jiri Olsa32dcd022019-07-21 13:23:51 +02001572static void free_event_desc(struct evsel *events)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001573{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001574 struct evsel *evsel;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001575
1576 if (!events)
1577 return;
1578
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001579 for (evsel = events; evsel->core.attr.size; evsel++) {
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -03001580 zfree(&evsel->name);
1581 zfree(&evsel->id);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001582 }
1583
1584 free(events);
1585}
1586
Jiri Olsa32dcd022019-07-21 13:23:51 +02001587static struct evsel *read_event_desc(struct feat_fd *ff)
Robert Richter4e1b9c62012-08-16 21:10:22 +02001588{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001589 struct evsel *evsel, *events = NULL;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001590 u64 *id;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001591 void *buf = NULL;
Stephane Eranian62db9062012-02-09 23:21:07 +01001592 u32 nre, sz, nr, i, j;
Stephane Eranian62db9062012-02-09 23:21:07 +01001593 size_t msz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001594
1595 /* number of events */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001596 if (do_read_u32(ff, &nre))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001597 goto error;
1598
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001599 if (do_read_u32(ff, &sz))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001600 goto error;
1601
Stephane Eranian62db9062012-02-09 23:21:07 +01001602 /* buffer to hold on file attr struct */
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001603 buf = malloc(sz);
1604 if (!buf)
1605 goto error;
1606
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001607 /* the last event terminates with evsel->core.attr.size == 0: */
Robert Richter4e1b9c62012-08-16 21:10:22 +02001608 events = calloc(nre + 1, sizeof(*events));
1609 if (!events)
1610 goto error;
1611
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001612 msz = sizeof(evsel->core.attr);
Jiri Olsa9fafd982012-03-20 19:15:39 +01001613 if (sz < msz)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001614 msz = sz;
1615
Robert Richter4e1b9c62012-08-16 21:10:22 +02001616 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1617 evsel->idx = i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001618
Stephane Eranian62db9062012-02-09 23:21:07 +01001619 /*
1620 * must read entire on-file attr struct to
1621 * sync up with layout.
1622 */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001623 if (__do_read(ff, buf, sz))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001624 goto error;
1625
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001626 if (ff->ph->needs_swap)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001627 perf_event__attr_swap(buf);
1628
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001629 memcpy(&evsel->core.attr, buf, msz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001630
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001631 if (do_read_u32(ff, &nr))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001632 goto error;
1633
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001634 if (ff->ph->needs_swap)
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001635 evsel->needs_swap = true;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001636
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001637 evsel->name = do_read_string(ff);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07001638 if (!evsel->name)
1639 goto error;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001640
1641 if (!nr)
1642 continue;
1643
1644 id = calloc(nr, sizeof(*id));
1645 if (!id)
1646 goto error;
1647 evsel->ids = nr;
1648 evsel->id = id;
1649
1650 for (j = 0 ; j < nr; j++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001651 if (do_read_u64(ff, id))
Robert Richter4e1b9c62012-08-16 21:10:22 +02001652 goto error;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001653 id++;
1654 }
1655 }
1656out:
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -03001657 free(buf);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001658 return events;
1659error:
Markus Elfring4cc97612015-06-25 17:12:32 +02001660 free_event_desc(events);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001661 events = NULL;
1662 goto out;
1663}
1664
Peter Zijlstra2c5e8c52015-04-07 11:09:54 +02001665static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
Arnaldo Carvalho de Melo03536312017-06-16 12:18:27 -03001666 void *priv __maybe_unused)
Peter Zijlstra2c5e8c52015-04-07 11:09:54 +02001667{
1668 return fprintf(fp, ", %s = %s", name, val);
1669}
1670
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001671static void print_event_desc(struct feat_fd *ff, FILE *fp)
Robert Richter4e1b9c62012-08-16 21:10:22 +02001672{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001673 struct evsel *evsel, *events;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001674 u32 j;
1675 u64 *id;
1676
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07001677 if (ff->events)
1678 events = ff->events;
1679 else
1680 events = read_event_desc(ff);
1681
Robert Richter4e1b9c62012-08-16 21:10:22 +02001682 if (!events) {
1683 fprintf(fp, "# event desc: not available or unable to read\n");
1684 return;
1685 }
1686
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001687 for (evsel = events; evsel->core.attr.size; evsel++) {
Robert Richter4e1b9c62012-08-16 21:10:22 +02001688 fprintf(fp, "# event : name = %s, ", evsel->name);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001689
Robert Richter4e1b9c62012-08-16 21:10:22 +02001690 if (evsel->ids) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001691 fprintf(fp, ", id = {");
Robert Richter4e1b9c62012-08-16 21:10:22 +02001692 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1693 if (j)
1694 fputc(',', fp);
1695 fprintf(fp, " %"PRIu64, *id);
1696 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001697 fprintf(fp, " }");
Robert Richter4e1b9c62012-08-16 21:10:22 +02001698 }
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001699
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001700 perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001701
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001702 fputc('\n', fp);
1703 }
Robert Richter4e1b9c62012-08-16 21:10:22 +02001704
1705 free_event_desc(events);
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07001706 ff->events = NULL;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001707}
1708
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001709static void print_total_mem(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001710{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001711 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001712}
1713
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001714static void print_numa_topology(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001715{
Jiri Olsac60da222016-07-04 14:16:20 +02001716 int i;
1717 struct numa_node *n;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001718
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001719 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1720 n = &ff->ph->env.numa_nodes[i];
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001721
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001722 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1723 " free = %"PRIu64" kB\n",
Jiri Olsac60da222016-07-04 14:16:20 +02001724 n->node, n->mem_total, n->mem_free);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001725
Jiri Olsac60da222016-07-04 14:16:20 +02001726 fprintf(fp, "# node%u cpu list : ", n->node);
1727 cpu_map__fprintf(n->map, fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001728 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001729}
1730
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001731static void print_cpuid(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001732{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001733 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001734}
1735
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001736static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
Stephane Eranian330aa672012-03-08 23:47:46 +01001737{
1738 fprintf(fp, "# contains samples with branch stack\n");
1739}
1740
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001741static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
Adrian Hunter4025ea42015-04-09 18:53:41 +03001742{
1743 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1744}
1745
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001746static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
Jiri Olsaffa517a2015-10-25 15:51:43 +01001747{
1748 fprintf(fp, "# contains stat data\n");
1749}
1750
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001751static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
Jiri Olsa720e98b2016-02-16 16:01:43 +01001752{
1753 int i;
1754
1755 fprintf(fp, "# CPU cache info:\n");
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001756 for (i = 0; i < ff->ph->env.caches_cnt; i++) {
Jiri Olsa720e98b2016-02-16 16:01:43 +01001757 fprintf(fp, "# ");
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001758 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001759 }
1760}
1761
Alexey Budankov42e1fd82019-03-18 20:41:33 +03001762static void print_compressed(struct feat_fd *ff, FILE *fp)
1763{
1764 fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
1765 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
1766 ff->ph->env.comp_level, ff->ph->env.comp_ratio);
1767}
1768
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001769static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
Robert Richter50a96672012-08-16 21:10:24 +02001770{
1771 const char *delimiter = "# pmu mappings: ";
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001772 char *str, *tmp;
Robert Richter50a96672012-08-16 21:10:24 +02001773 u32 pmu_num;
1774 u32 type;
1775
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001776 pmu_num = ff->ph->env.nr_pmu_mappings;
Robert Richter50a96672012-08-16 21:10:24 +02001777 if (!pmu_num) {
1778 fprintf(fp, "# pmu mappings: not available\n");
1779 return;
1780 }
1781
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001782 str = ff->ph->env.pmu_mappings;
Namhyung Kimbe4a2de2012-09-05 14:02:49 +09001783
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001784 while (pmu_num) {
1785 type = strtoul(str, &tmp, 0);
1786 if (*tmp != ':')
1787 goto error;
1788
1789 str = tmp + 1;
1790 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1791
Robert Richter50a96672012-08-16 21:10:24 +02001792 delimiter = ", ";
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001793 str += strlen(str) + 1;
1794 pmu_num--;
Robert Richter50a96672012-08-16 21:10:24 +02001795 }
1796
1797 fprintf(fp, "\n");
1798
1799 if (!pmu_num)
1800 return;
1801error:
1802 fprintf(fp, "# pmu mappings: unable to read\n");
1803}
1804
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001805static void print_group_desc(struct feat_fd *ff, FILE *fp)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001806{
1807 struct perf_session *session;
Jiri Olsa32dcd022019-07-21 13:23:51 +02001808 struct evsel *evsel;
Namhyung Kima8bb5592013-01-22 18:09:31 +09001809 u32 nr = 0;
1810
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001811 session = container_of(ff->ph, struct perf_session, header);
Namhyung Kima8bb5592013-01-22 18:09:31 +09001812
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001813 evlist__for_each_entry(session->evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09001814 if (perf_evsel__is_group_leader(evsel) &&
1815 evsel->nr_members > 1) {
1816 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1817 perf_evsel__name(evsel));
1818
1819 nr = evsel->nr_members - 1;
1820 } else if (nr) {
1821 fprintf(fp, ",%s", perf_evsel__name(evsel));
1822
1823 if (--nr == 0)
1824 fprintf(fp, "}\n");
1825 }
1826 }
1827}
1828
Jin Yao60115182017-12-08 21:13:41 +08001829static void print_sample_time(struct feat_fd *ff, FILE *fp)
1830{
1831 struct perf_session *session;
1832 char time_buf[32];
1833 double d;
1834
1835 session = container_of(ff->ph, struct perf_session, header);
1836
1837 timestamp__scnprintf_usec(session->evlist->first_sample_time,
1838 time_buf, sizeof(time_buf));
1839 fprintf(fp, "# time of first sample : %s\n", time_buf);
1840
1841 timestamp__scnprintf_usec(session->evlist->last_sample_time,
1842 time_buf, sizeof(time_buf));
1843 fprintf(fp, "# time of last sample : %s\n", time_buf);
1844
1845 d = (double)(session->evlist->last_sample_time -
1846 session->evlist->first_sample_time) / NSEC_PER_MSEC;
1847
1848 fprintf(fp, "# sample duration : %10.3f ms\n", d);
1849}
1850
Jiri Olsae2091ce2018-03-07 16:50:08 +01001851static void memory_node__fprintf(struct memory_node *n,
1852 unsigned long long bsize, FILE *fp)
1853{
1854 char buf_map[100], buf_size[50];
1855 unsigned long long size;
1856
1857 size = bsize * bitmap_weight(n->set, n->size);
1858 unit_number__scnprintf(buf_size, 50, size);
1859
1860 bitmap_scnprintf(n->set, n->size, buf_map, 100);
1861 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
1862}
1863
1864static void print_mem_topology(struct feat_fd *ff, FILE *fp)
1865{
1866 struct memory_node *nodes;
1867 int i, nr;
1868
1869 nodes = ff->ph->env.memory_nodes;
1870 nr = ff->ph->env.nr_memory_nodes;
1871
1872 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
1873 nr, ff->ph->env.memory_bsize);
1874
1875 for (i = 0; i < nr; i++) {
1876 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
1877 }
1878}
1879
Robert Richter08d95bd2012-02-10 15:41:55 +01001880static int __event_process_build_id(struct build_id_event *bev,
1881 char *filename,
1882 struct perf_session *session)
1883{
1884 int err = -1;
Robert Richter08d95bd2012-02-10 15:41:55 +01001885 struct machine *machine;
Wang Nan1f121b02015-06-03 08:52:21 +00001886 u16 cpumode;
Robert Richter08d95bd2012-02-10 15:41:55 +01001887 struct dso *dso;
1888 enum dso_kernel_type dso_type;
1889
1890 machine = perf_session__findnew_machine(session, bev->pid);
1891 if (!machine)
1892 goto out;
1893
Wang Nan1f121b02015-06-03 08:52:21 +00001894 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
Robert Richter08d95bd2012-02-10 15:41:55 +01001895
Wang Nan1f121b02015-06-03 08:52:21 +00001896 switch (cpumode) {
Robert Richter08d95bd2012-02-10 15:41:55 +01001897 case PERF_RECORD_MISC_KERNEL:
1898 dso_type = DSO_TYPE_KERNEL;
Robert Richter08d95bd2012-02-10 15:41:55 +01001899 break;
1900 case PERF_RECORD_MISC_GUEST_KERNEL:
1901 dso_type = DSO_TYPE_GUEST_KERNEL;
Robert Richter08d95bd2012-02-10 15:41:55 +01001902 break;
1903 case PERF_RECORD_MISC_USER:
1904 case PERF_RECORD_MISC_GUEST_USER:
1905 dso_type = DSO_TYPE_USER;
Robert Richter08d95bd2012-02-10 15:41:55 +01001906 break;
1907 default:
1908 goto out;
1909 }
1910
Arnaldo Carvalho de Meloaa7cc2a2015-05-29 11:31:12 -03001911 dso = machine__findnew_dso(machine, filename);
Robert Richter08d95bd2012-02-10 15:41:55 +01001912 if (dso != NULL) {
Masami Hiramatsub5d8bbe2016-05-11 22:51:59 +09001913 char sbuild_id[SBUILD_ID_SIZE];
Robert Richter08d95bd2012-02-10 15:41:55 +01001914
1915 dso__set_build_id(dso, &bev->build_id);
1916
Namhyung Kim1deec1b2017-05-31 21:01:03 +09001917 if (dso_type != DSO_TYPE_USER) {
1918 struct kmod_path m = { .name = NULL, };
1919
1920 if (!kmod_path__parse_name(&m, filename) && m.kmod)
Namhyung Kim6b335e82017-05-31 21:01:04 +09001921 dso__set_module_info(dso, &m, machine);
Namhyung Kim1deec1b2017-05-31 21:01:03 +09001922 else
1923 dso->kernel = dso_type;
1924
1925 free(m.name);
1926 }
Robert Richter08d95bd2012-02-10 15:41:55 +01001927
1928 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1929 sbuild_id);
1930 pr_debug("build id event received for %s: %s\n",
1931 dso->long_name, sbuild_id);
Arnaldo Carvalho de Melod3a7c482015-06-02 11:53:26 -03001932 dso__put(dso);
Robert Richter08d95bd2012-02-10 15:41:55 +01001933 }
1934
1935 err = 0;
1936out:
1937 return err;
1938}
1939
1940static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1941 int input, u64 offset, u64 size)
1942{
1943 struct perf_session *session = container_of(header, struct perf_session, header);
1944 struct {
1945 struct perf_event_header header;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03001946 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
Robert Richter08d95bd2012-02-10 15:41:55 +01001947 char filename[0];
1948 } old_bev;
1949 struct build_id_event bev;
1950 char filename[PATH_MAX];
1951 u64 limit = offset + size;
1952
1953 while (offset < limit) {
1954 ssize_t len;
1955
Namhyung Kim5323f602012-12-17 15:38:54 +09001956 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
Robert Richter08d95bd2012-02-10 15:41:55 +01001957 return -1;
1958
1959 if (header->needs_swap)
1960 perf_event_header__bswap(&old_bev.header);
1961
1962 len = old_bev.header.size - sizeof(old_bev);
Namhyung Kim5323f602012-12-17 15:38:54 +09001963 if (readn(input, filename, len) != len)
Robert Richter08d95bd2012-02-10 15:41:55 +01001964 return -1;
1965
1966 bev.header = old_bev.header;
1967
1968 /*
1969 * As the pid is the missing value, we need to fill
1970 * it properly. The header.misc value give us nice hint.
1971 */
1972 bev.pid = HOST_KERNEL_ID;
1973 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1974 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1975 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1976
1977 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1978 __event_process_build_id(&bev, filename, session);
1979
1980 offset += bev.header.size;
1981 }
1982
1983 return 0;
1984}
1985
1986static int perf_header__read_build_ids(struct perf_header *header,
1987 int input, u64 offset, u64 size)
1988{
1989 struct perf_session *session = container_of(header, struct perf_session, header);
1990 struct build_id_event bev;
1991 char filename[PATH_MAX];
1992 u64 limit = offset + size, orig_offset = offset;
1993 int err = -1;
1994
1995 while (offset < limit) {
1996 ssize_t len;
1997
Namhyung Kim5323f602012-12-17 15:38:54 +09001998 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
Robert Richter08d95bd2012-02-10 15:41:55 +01001999 goto out;
2000
2001 if (header->needs_swap)
2002 perf_event_header__bswap(&bev.header);
2003
2004 len = bev.header.size - sizeof(bev);
Namhyung Kim5323f602012-12-17 15:38:54 +09002005 if (readn(input, filename, len) != len)
Robert Richter08d95bd2012-02-10 15:41:55 +01002006 goto out;
2007 /*
2008 * The a1645ce1 changeset:
2009 *
2010 * "perf: 'perf kvm' tool for monitoring guest performance from host"
2011 *
2012 * Added a field to struct build_id_event that broke the file
2013 * format.
2014 *
2015 * Since the kernel build-id is the first entry, process the
2016 * table using the old format if the well known
2017 * '[kernel.kallsyms]' string for the kernel build-id has the
2018 * first 4 characters chopped off (where the pid_t sits).
2019 */
2020 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
2021 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
2022 return -1;
2023 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
2024 }
2025
2026 __event_process_build_id(&bev, filename, session);
2027
2028 offset += bev.header.size;
2029 }
2030 err = 0;
2031out:
2032 return err;
2033}
2034
David Carrillo-Cisnerosdfaa1582017-07-17 21:25:35 -07002035/* Macro for features that simply need to read and store a string. */
2036#define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002037static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
David Carrillo-Cisnerosdfaa1582017-07-17 21:25:35 -07002038{\
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002039 ff->ph->env.__feat_env = do_read_string(ff); \
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002040 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
David Carrillo-Cisnerosdfaa1582017-07-17 21:25:35 -07002041}
2042
2043FEAT_PROCESS_STR_FUN(hostname, hostname);
2044FEAT_PROCESS_STR_FUN(osrelease, os_release);
2045FEAT_PROCESS_STR_FUN(version, version);
2046FEAT_PROCESS_STR_FUN(arch, arch);
2047FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
2048FEAT_PROCESS_STR_FUN(cpuid, cpuid);
2049
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002050static int process_tracing_data(struct feat_fd *ff, void *data)
Robert Richterf1c67db2012-02-10 15:41:56 +01002051{
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002052 ssize_t ret = trace_report(ff->fd, data, false);
2053
Namhyung Kim3dce2ce2013-03-21 16:18:48 +09002054 return ret < 0 ? -1 : 0;
Robert Richterf1c67db2012-02-10 15:41:56 +01002055}
2056
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002057static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
Robert Richterf1c67db2012-02-10 15:41:56 +01002058{
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002059 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
Robert Richterf1c67db2012-02-10 15:41:56 +01002060 pr_debug("Failed to read buildids, continuing...\n");
2061 return 0;
2062}
2063
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002064static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002065{
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002066 int ret;
2067 u32 nr_cpus_avail, nr_cpus_online;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002068
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002069 ret = do_read_u32(ff, &nr_cpus_avail);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002070 if (ret)
2071 return ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002072
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002073 ret = do_read_u32(ff, &nr_cpus_online);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002074 if (ret)
2075 return ret;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002076 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
2077 ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002078 return 0;
2079}
2080
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002081static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002082{
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002083 u64 total_mem;
2084 int ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002085
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002086 ret = do_read_u64(ff, &total_mem);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002087 if (ret)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002088 return -1;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002089 ff->ph->env.total_mem = (unsigned long long)total_mem;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002090 return 0;
2091}
2092
Jiri Olsa32dcd022019-07-21 13:23:51 +02002093static struct evsel *
Jiri Olsa63503db2019-07-21 13:23:52 +02002094perf_evlist__find_by_index(struct evlist *evlist, int idx)
Robert Richter7c2f7af2012-08-16 21:10:23 +02002095{
Jiri Olsa32dcd022019-07-21 13:23:51 +02002096 struct evsel *evsel;
Robert Richter7c2f7af2012-08-16 21:10:23 +02002097
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002098 evlist__for_each_entry(evlist, evsel) {
Robert Richter7c2f7af2012-08-16 21:10:23 +02002099 if (evsel->idx == idx)
2100 return evsel;
2101 }
2102
2103 return NULL;
2104}
2105
2106static void
Jiri Olsa63503db2019-07-21 13:23:52 +02002107perf_evlist__set_event_name(struct evlist *evlist,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002108 struct evsel *event)
Robert Richter7c2f7af2012-08-16 21:10:23 +02002109{
Jiri Olsa32dcd022019-07-21 13:23:51 +02002110 struct evsel *evsel;
Robert Richter7c2f7af2012-08-16 21:10:23 +02002111
2112 if (!event->name)
2113 return;
2114
2115 evsel = perf_evlist__find_by_index(evlist, event->idx);
2116 if (!evsel)
2117 return;
2118
2119 if (evsel->name)
2120 return;
2121
2122 evsel->name = strdup(event->name);
2123}
2124
2125static int
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002126process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
Robert Richter7c2f7af2012-08-16 21:10:23 +02002127{
Namhyung Kim3d7eb862012-09-24 17:15:01 +09002128 struct perf_session *session;
Jiri Olsa32dcd022019-07-21 13:23:51 +02002129 struct evsel *evsel, *events = read_event_desc(ff);
Robert Richter7c2f7af2012-08-16 21:10:23 +02002130
2131 if (!events)
2132 return 0;
2133
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002134 session = container_of(ff->ph, struct perf_session, header);
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07002135
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002136 if (session->data->is_pipe) {
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07002137 /* Save events for reading later by print_event_desc,
2138 * since they can't be read again in pipe mode. */
2139 ff->events = events;
2140 }
2141
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002142 for (evsel = events; evsel->core.attr.size; evsel++)
Robert Richter7c2f7af2012-08-16 21:10:23 +02002143 perf_evlist__set_event_name(session->evlist, evsel);
2144
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002145 if (!session->data->is_pipe)
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07002146 free_event_desc(events);
Robert Richter7c2f7af2012-08-16 21:10:23 +02002147
2148 return 0;
2149}
2150
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002151static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002152{
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002153 char *str, *cmdline = NULL, **argv = NULL;
2154 u32 nr, i, len = 0;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002155
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002156 if (do_read_u32(ff, &nr))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002157 return -1;
2158
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002159 ff->ph->env.nr_cmdline = nr;
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002160
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002161 cmdline = zalloc(ff->size + nr + 1);
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002162 if (!cmdline)
2163 return -1;
2164
2165 argv = zalloc(sizeof(char *) * (nr + 1));
2166 if (!argv)
2167 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002168
2169 for (i = 0; i < nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002170 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002171 if (!str)
2172 goto error;
2173
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002174 argv[i] = cmdline + len;
2175 memcpy(argv[i], str, strlen(str) + 1);
2176 len += strlen(str) + 1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002177 free(str);
2178 }
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002179 ff->ph->env.cmdline = cmdline;
2180 ff->ph->env.cmdline_argv = (const char **) argv;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002181 return 0;
2182
2183error:
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002184 free(argv);
2185 free(cmdline);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002186 return -1;
2187}
2188
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002189static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002190{
Namhyung Kima1ae5652012-09-24 17:14:59 +09002191 u32 nr, i;
2192 char *str;
2193 struct strbuf sb;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002194 int cpu_nr = ff->ph->env.nr_cpus_avail;
Kan Liang2bb00d22015-09-01 09:58:12 -04002195 u64 size = 0;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002196 struct perf_header *ph = ff->ph;
Thomas Richter01766222018-06-11 09:31:52 +02002197 bool do_core_id_test = true;
Kan Liang2bb00d22015-09-01 09:58:12 -04002198
2199 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
2200 if (!ph->env.cpu)
2201 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002202
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002203 if (do_read_u32(ff, &nr))
Kan Liang2bb00d22015-09-01 09:58:12 -04002204 goto free_cpu;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002205
Namhyung Kima1ae5652012-09-24 17:14:59 +09002206 ph->env.nr_sibling_cores = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04002207 size += sizeof(u32);
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002208 if (strbuf_init(&sb, 128) < 0)
2209 goto free_cpu;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002210
2211 for (i = 0; i < nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002212 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002213 if (!str)
2214 goto error;
2215
2216 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002217 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2218 goto error;
Kan Liang2bb00d22015-09-01 09:58:12 -04002219 size += string_size(str);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002220 free(str);
2221 }
2222 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
2223
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002224 if (do_read_u32(ff, &nr))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002225 return -1;
2226
Namhyung Kima1ae5652012-09-24 17:14:59 +09002227 ph->env.nr_sibling_threads = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04002228 size += sizeof(u32);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002229
2230 for (i = 0; i < nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002231 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002232 if (!str)
2233 goto error;
2234
2235 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002236 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2237 goto error;
Kan Liang2bb00d22015-09-01 09:58:12 -04002238 size += string_size(str);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002239 free(str);
2240 }
2241 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
Kan Liang2bb00d22015-09-01 09:58:12 -04002242
2243 /*
2244 * The header may be from old perf,
2245 * which doesn't include core id and socket id information.
2246 */
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002247 if (ff->size <= size) {
Kan Liang2bb00d22015-09-01 09:58:12 -04002248 zfree(&ph->env.cpu);
2249 return 0;
2250 }
2251
Thomas Richter01766222018-06-11 09:31:52 +02002252 /* On s390 the socket_id number is not related to the numbers of cpus.
2253 * The socket_id number might be higher than the numbers of cpus.
2254 * This depends on the configuration.
2255 */
2256 if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
2257 do_core_id_test = false;
2258
Kan Liang2bb00d22015-09-01 09:58:12 -04002259 for (i = 0; i < (u32)cpu_nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002260 if (do_read_u32(ff, &nr))
Kan Liang2bb00d22015-09-01 09:58:12 -04002261 goto free_cpu;
2262
Kan Liang2bb00d22015-09-01 09:58:12 -04002263 ph->env.cpu[i].core_id = nr;
Kan Liangacae8b32019-06-04 15:50:41 -07002264 size += sizeof(u32);
Kan Liang2bb00d22015-09-01 09:58:12 -04002265
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002266 if (do_read_u32(ff, &nr))
Kan Liang2bb00d22015-09-01 09:58:12 -04002267 goto free_cpu;
2268
Thomas Richter01766222018-06-11 09:31:52 +02002269 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
Kan Liang2bb00d22015-09-01 09:58:12 -04002270 pr_debug("socket_id number is too big."
2271 "You may need to upgrade the perf tool.\n");
2272 goto free_cpu;
2273 }
2274
2275 ph->env.cpu[i].socket_id = nr;
Kan Liangacae8b32019-06-04 15:50:41 -07002276 size += sizeof(u32);
2277 }
2278
2279 /*
2280 * The header may be from old perf,
2281 * which doesn't include die information.
2282 */
2283 if (ff->size <= size)
2284 return 0;
2285
2286 if (do_read_u32(ff, &nr))
2287 return -1;
2288
2289 ph->env.nr_sibling_dies = nr;
2290 size += sizeof(u32);
2291
2292 for (i = 0; i < nr; i++) {
2293 str = do_read_string(ff);
2294 if (!str)
2295 goto error;
2296
2297 /* include a NULL character at the end */
2298 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2299 goto error;
2300 size += string_size(str);
2301 free(str);
2302 }
2303 ph->env.sibling_dies = strbuf_detach(&sb, NULL);
2304
2305 for (i = 0; i < (u32)cpu_nr; i++) {
2306 if (do_read_u32(ff, &nr))
2307 goto free_cpu;
2308
2309 ph->env.cpu[i].die_id = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04002310 }
2311
Namhyung Kima1ae5652012-09-24 17:14:59 +09002312 return 0;
2313
2314error:
2315 strbuf_release(&sb);
Kan Liang2bb00d22015-09-01 09:58:12 -04002316free_cpu:
2317 zfree(&ph->env.cpu);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002318 return -1;
2319}
2320
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002321static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002322{
Jiri Olsac60da222016-07-04 14:16:20 +02002323 struct numa_node *nodes, *n;
Jiri Olsac60da222016-07-04 14:16:20 +02002324 u32 nr, i;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002325 char *str;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002326
2327 /* nr nodes */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002328 if (do_read_u32(ff, &nr))
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002329 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002330
Jiri Olsac60da222016-07-04 14:16:20 +02002331 nodes = zalloc(sizeof(*nodes) * nr);
2332 if (!nodes)
2333 return -ENOMEM;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002334
2335 for (i = 0; i < nr; i++) {
Jiri Olsac60da222016-07-04 14:16:20 +02002336 n = &nodes[i];
2337
Namhyung Kima1ae5652012-09-24 17:14:59 +09002338 /* node number */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002339 if (do_read_u32(ff, &n->node))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002340 goto error;
2341
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002342 if (do_read_u64(ff, &n->mem_total))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002343 goto error;
2344
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002345 if (do_read_u64(ff, &n->mem_free))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002346 goto error;
2347
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002348 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002349 if (!str)
2350 goto error;
2351
Jiri Olsa9c3516d2019-07-21 13:24:30 +02002352 n->map = perf_cpu_map__new(str);
Jiri Olsac60da222016-07-04 14:16:20 +02002353 if (!n->map)
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002354 goto error;
Jiri Olsac60da222016-07-04 14:16:20 +02002355
Namhyung Kima1ae5652012-09-24 17:14:59 +09002356 free(str);
2357 }
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002358 ff->ph->env.nr_numa_nodes = nr;
2359 ff->ph->env.numa_nodes = nodes;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002360 return 0;
2361
2362error:
Jiri Olsac60da222016-07-04 14:16:20 +02002363 free(nodes);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002364 return -1;
2365}
2366
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002367static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002368{
Namhyung Kima1ae5652012-09-24 17:14:59 +09002369 char *name;
2370 u32 pmu_num;
2371 u32 type;
2372 struct strbuf sb;
2373
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002374 if (do_read_u32(ff, &pmu_num))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002375 return -1;
2376
Namhyung Kima1ae5652012-09-24 17:14:59 +09002377 if (!pmu_num) {
2378 pr_debug("pmu mappings not available\n");
2379 return 0;
2380 }
2381
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002382 ff->ph->env.nr_pmu_mappings = pmu_num;
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002383 if (strbuf_init(&sb, 128) < 0)
2384 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002385
2386 while (pmu_num) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002387 if (do_read_u32(ff, &type))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002388 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002389
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002390 name = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002391 if (!name)
2392 goto error;
2393
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002394 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2395 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002396 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002397 if (strbuf_add(&sb, "", 1) < 0)
2398 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002399
Kan Liange0838e02015-09-10 11:03:05 -03002400 if (!strcmp(name, "msr"))
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002401 ff->ph->env.msr_pmu_type = type;
Kan Liange0838e02015-09-10 11:03:05 -03002402
Namhyung Kima1ae5652012-09-24 17:14:59 +09002403 free(name);
2404 pmu_num--;
2405 }
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002406 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002407 return 0;
2408
2409error:
2410 strbuf_release(&sb);
2411 return -1;
2412}
2413
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002414static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima8bb5592013-01-22 18:09:31 +09002415{
2416 size_t ret = -1;
2417 u32 i, nr, nr_groups;
2418 struct perf_session *session;
Jiri Olsa32dcd022019-07-21 13:23:51 +02002419 struct evsel *evsel, *leader = NULL;
Namhyung Kima8bb5592013-01-22 18:09:31 +09002420 struct group_desc {
2421 char *name;
2422 u32 leader_idx;
2423 u32 nr_members;
2424 } *desc;
2425
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002426 if (do_read_u32(ff, &nr_groups))
Namhyung Kima8bb5592013-01-22 18:09:31 +09002427 return -1;
2428
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002429 ff->ph->env.nr_groups = nr_groups;
Namhyung Kima8bb5592013-01-22 18:09:31 +09002430 if (!nr_groups) {
2431 pr_debug("group desc not available\n");
2432 return 0;
2433 }
2434
2435 desc = calloc(nr_groups, sizeof(*desc));
2436 if (!desc)
2437 return -1;
2438
2439 for (i = 0; i < nr_groups; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002440 desc[i].name = do_read_string(ff);
Namhyung Kima8bb5592013-01-22 18:09:31 +09002441 if (!desc[i].name)
2442 goto out_free;
2443
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002444 if (do_read_u32(ff, &desc[i].leader_idx))
Namhyung Kima8bb5592013-01-22 18:09:31 +09002445 goto out_free;
2446
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002447 if (do_read_u32(ff, &desc[i].nr_members))
Namhyung Kima8bb5592013-01-22 18:09:31 +09002448 goto out_free;
Namhyung Kima8bb5592013-01-22 18:09:31 +09002449 }
2450
2451 /*
2452 * Rebuild group relationship based on the group_desc
2453 */
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002454 session = container_of(ff->ph, struct perf_session, header);
Namhyung Kima8bb5592013-01-22 18:09:31 +09002455 session->evlist->nr_groups = nr_groups;
2456
2457 i = nr = 0;
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002458 evlist__for_each_entry(session->evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09002459 if (evsel->idx == (int) desc[i].leader_idx) {
2460 evsel->leader = evsel;
2461 /* {anon_group} is a dummy name */
Namhyung Kim210e8122013-11-18 11:20:43 +09002462 if (strcmp(desc[i].name, "{anon_group}")) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09002463 evsel->group_name = desc[i].name;
Namhyung Kim210e8122013-11-18 11:20:43 +09002464 desc[i].name = NULL;
2465 }
Namhyung Kima8bb5592013-01-22 18:09:31 +09002466 evsel->nr_members = desc[i].nr_members;
2467
2468 if (i >= nr_groups || nr > 0) {
2469 pr_debug("invalid group desc\n");
2470 goto out_free;
2471 }
2472
2473 leader = evsel;
2474 nr = evsel->nr_members - 1;
2475 i++;
2476 } else if (nr) {
2477 /* This is a group member */
2478 evsel->leader = leader;
2479
2480 nr--;
2481 }
2482 }
2483
2484 if (i != nr_groups || nr != 0) {
2485 pr_debug("invalid group desc\n");
2486 goto out_free;
2487 }
2488
2489 ret = 0;
2490out_free:
Namhyung Kim50a27402013-11-18 11:20:44 +09002491 for (i = 0; i < nr_groups; i++)
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -03002492 zfree(&desc[i].name);
Namhyung Kima8bb5592013-01-22 18:09:31 +09002493 free(desc);
2494
2495 return ret;
2496}
2497
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002498static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
Adrian Hunter99fa2982015-04-30 17:37:25 +03002499{
2500 struct perf_session *session;
2501 int err;
2502
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002503 session = container_of(ff->ph, struct perf_session, header);
Adrian Hunter99fa2982015-04-30 17:37:25 +03002504
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002505 err = auxtrace_index__process(ff->fd, ff->size, session,
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002506 ff->ph->needs_swap);
Adrian Hunter99fa2982015-04-30 17:37:25 +03002507 if (err < 0)
2508 pr_err("Failed to process auxtrace index\n");
2509 return err;
2510}
2511
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002512static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
Jiri Olsa720e98b2016-02-16 16:01:43 +01002513{
2514 struct cpu_cache_level *caches;
2515 u32 cnt, i, version;
2516
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002517 if (do_read_u32(ff, &version))
Jiri Olsa720e98b2016-02-16 16:01:43 +01002518 return -1;
2519
Jiri Olsa720e98b2016-02-16 16:01:43 +01002520 if (version != 1)
2521 return -1;
2522
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002523 if (do_read_u32(ff, &cnt))
Jiri Olsa720e98b2016-02-16 16:01:43 +01002524 return -1;
2525
Jiri Olsa720e98b2016-02-16 16:01:43 +01002526 caches = zalloc(sizeof(*caches) * cnt);
2527 if (!caches)
2528 return -1;
2529
2530 for (i = 0; i < cnt; i++) {
2531 struct cpu_cache_level c;
2532
2533 #define _R(v) \
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002534 if (do_read_u32(ff, &c.v))\
Jiri Olsa720e98b2016-02-16 16:01:43 +01002535 goto out_free_caches; \
Jiri Olsa720e98b2016-02-16 16:01:43 +01002536
2537 _R(level)
2538 _R(line_size)
2539 _R(sets)
2540 _R(ways)
2541 #undef _R
2542
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002543 #define _R(v) \
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002544 c.v = do_read_string(ff); \
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002545 if (!c.v) \
Jiri Olsa720e98b2016-02-16 16:01:43 +01002546 goto out_free_caches;
2547
2548 _R(type)
2549 _R(size)
2550 _R(map)
2551 #undef _R
2552
2553 caches[i] = c;
2554 }
2555
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002556 ff->ph->env.caches = caches;
2557 ff->ph->env.caches_cnt = cnt;
Jiri Olsa720e98b2016-02-16 16:01:43 +01002558 return 0;
2559out_free_caches:
2560 free(caches);
2561 return -1;
2562}
2563
Jin Yao60115182017-12-08 21:13:41 +08002564static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2565{
2566 struct perf_session *session;
2567 u64 first_sample_time, last_sample_time;
2568 int ret;
2569
2570 session = container_of(ff->ph, struct perf_session, header);
2571
2572 ret = do_read_u64(ff, &first_sample_time);
2573 if (ret)
2574 return -1;
2575
2576 ret = do_read_u64(ff, &last_sample_time);
2577 if (ret)
2578 return -1;
2579
2580 session->evlist->first_sample_time = first_sample_time;
2581 session->evlist->last_sample_time = last_sample_time;
2582 return 0;
2583}
2584
Jiri Olsae2091ce2018-03-07 16:50:08 +01002585static int process_mem_topology(struct feat_fd *ff,
2586 void *data __maybe_unused)
2587{
2588 struct memory_node *nodes;
2589 u64 version, i, nr, bsize;
2590 int ret = -1;
2591
2592 if (do_read_u64(ff, &version))
2593 return -1;
2594
2595 if (version != 1)
2596 return -1;
2597
2598 if (do_read_u64(ff, &bsize))
2599 return -1;
2600
2601 if (do_read_u64(ff, &nr))
2602 return -1;
2603
2604 nodes = zalloc(sizeof(*nodes) * nr);
2605 if (!nodes)
2606 return -1;
2607
2608 for (i = 0; i < nr; i++) {
2609 struct memory_node n;
2610
2611 #define _R(v) \
2612 if (do_read_u64(ff, &n.v)) \
2613 goto out; \
2614
2615 _R(node)
2616 _R(size)
2617
2618 #undef _R
2619
2620 if (do_read_bitmap(ff, &n.set, &n.size))
2621 goto out;
2622
2623 nodes[i] = n;
2624 }
2625
2626 ff->ph->env.memory_bsize = bsize;
2627 ff->ph->env.memory_nodes = nodes;
2628 ff->ph->env.nr_memory_nodes = nr;
2629 ret = 0;
2630
2631out:
2632 if (ret)
2633 free(nodes);
2634 return ret;
2635}
2636
Alexey Budankovcf790512018-10-09 17:36:24 +03002637static int process_clockid(struct feat_fd *ff,
2638 void *data __maybe_unused)
2639{
2640 if (do_read_u64(ff, &ff->ph->env.clockid_res_ns))
2641 return -1;
2642
2643 return 0;
2644}
2645
Jiri Olsa258031c2019-03-08 14:47:39 +01002646static int process_dir_format(struct feat_fd *ff,
2647 void *_data __maybe_unused)
2648{
2649 struct perf_session *session;
2650 struct perf_data *data;
2651
2652 session = container_of(ff->ph, struct perf_session, header);
2653 data = session->data;
2654
2655 if (WARN_ON(!perf_data__is_dir(data)))
2656 return -1;
2657
2658 return do_read_u64(ff, &data->dir.version);
2659}
2660
Song Liu606f9722019-03-11 22:30:43 -07002661#ifdef HAVE_LIBBPF_SUPPORT
2662static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
2663{
2664 struct bpf_prog_info_linear *info_linear;
2665 struct bpf_prog_info_node *info_node;
2666 struct perf_env *env = &ff->ph->env;
2667 u32 count, i;
2668 int err = -1;
2669
2670 if (ff->ph->needs_swap) {
2671 pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
2672 return 0;
2673 }
2674
2675 if (do_read_u32(ff, &count))
2676 return -1;
2677
2678 down_write(&env->bpf_progs.lock);
2679
2680 for (i = 0; i < count; ++i) {
2681 u32 info_len, data_len;
2682
2683 info_linear = NULL;
2684 info_node = NULL;
2685 if (do_read_u32(ff, &info_len))
2686 goto out;
2687 if (do_read_u32(ff, &data_len))
2688 goto out;
2689
2690 if (info_len > sizeof(struct bpf_prog_info)) {
2691 pr_warning("detected invalid bpf_prog_info\n");
2692 goto out;
2693 }
2694
2695 info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
2696 data_len);
2697 if (!info_linear)
2698 goto out;
2699 info_linear->info_len = sizeof(struct bpf_prog_info);
2700 info_linear->data_len = data_len;
2701 if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
2702 goto out;
2703 if (__do_read(ff, &info_linear->info, info_len))
2704 goto out;
2705 if (info_len < sizeof(struct bpf_prog_info))
2706 memset(((void *)(&info_linear->info)) + info_len, 0,
2707 sizeof(struct bpf_prog_info) - info_len);
2708
2709 if (__do_read(ff, info_linear->data, data_len))
2710 goto out;
2711
2712 info_node = malloc(sizeof(struct bpf_prog_info_node));
2713 if (!info_node)
2714 goto out;
2715
2716 /* after reading from file, translate offset to address */
2717 bpf_program__bpil_offs_to_addr(info_linear);
2718 info_node->info_linear = info_linear;
2719 perf_env__insert_bpf_prog_info(env, info_node);
2720 }
2721
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002722 up_write(&env->bpf_progs.lock);
Song Liu606f9722019-03-11 22:30:43 -07002723 return 0;
2724out:
2725 free(info_linear);
2726 free(info_node);
2727 up_write(&env->bpf_progs.lock);
2728 return err;
2729}
2730#else // HAVE_LIBBPF_SUPPORT
2731static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
2732{
2733 return 0;
2734}
2735#endif // HAVE_LIBBPF_SUPPORT
2736
Song Liua70a112312019-03-11 22:30:45 -07002737static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
2738{
2739 struct perf_env *env = &ff->ph->env;
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002740 struct btf_node *node = NULL;
Song Liua70a112312019-03-11 22:30:45 -07002741 u32 count, i;
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002742 int err = -1;
Song Liua70a112312019-03-11 22:30:45 -07002743
2744 if (ff->ph->needs_swap) {
2745 pr_warning("interpreting btf from systems with endianity is not yet supported\n");
2746 return 0;
2747 }
2748
2749 if (do_read_u32(ff, &count))
2750 return -1;
2751
2752 down_write(&env->bpf_progs.lock);
2753
2754 for (i = 0; i < count; ++i) {
Song Liua70a112312019-03-11 22:30:45 -07002755 u32 id, data_size;
2756
2757 if (do_read_u32(ff, &id))
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002758 goto out;
Song Liua70a112312019-03-11 22:30:45 -07002759 if (do_read_u32(ff, &data_size))
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002760 goto out;
Song Liua70a112312019-03-11 22:30:45 -07002761
2762 node = malloc(sizeof(struct btf_node) + data_size);
2763 if (!node)
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002764 goto out;
Song Liua70a112312019-03-11 22:30:45 -07002765
2766 node->id = id;
2767 node->data_size = data_size;
2768
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002769 if (__do_read(ff, node->data, data_size))
2770 goto out;
Song Liua70a112312019-03-11 22:30:45 -07002771
2772 perf_env__insert_btf(env, node);
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002773 node = NULL;
Song Liua70a112312019-03-11 22:30:45 -07002774 }
2775
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002776 err = 0;
2777out:
Song Liua70a112312019-03-11 22:30:45 -07002778 up_write(&env->bpf_progs.lock);
Gustavo A. R. Silva14c9b312019-04-08 12:33:55 -05002779 free(node);
2780 return err;
Song Liua70a112312019-03-11 22:30:45 -07002781}
2782
Alexey Budankov42e1fd82019-03-18 20:41:33 +03002783static int process_compressed(struct feat_fd *ff,
2784 void *data __maybe_unused)
2785{
2786 if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
2787 return -1;
2788
2789 if (do_read_u32(ff, &(ff->ph->env.comp_type)))
2790 return -1;
2791
2792 if (do_read_u32(ff, &(ff->ph->env.comp_level)))
2793 return -1;
2794
2795 if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
2796 return -1;
2797
2798 if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
2799 return -1;
2800
2801 return 0;
2802}
2803
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002804struct feature_ops {
Jiri Olsa63503db2019-07-21 13:23:52 +02002805 int (*write)(struct feat_fd *ff, struct evlist *evlist);
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002806 void (*print)(struct feat_fd *ff, FILE *fp);
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002807 int (*process)(struct feat_fd *ff, void *data);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002808 const char *name;
2809 bool full_only;
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002810 bool synthesize;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002811};
2812
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002813#define FEAT_OPR(n, func, __full_only) \
2814 [HEADER_##n] = { \
2815 .name = __stringify(n), \
2816 .write = write_##func, \
2817 .print = print_##func, \
2818 .full_only = __full_only, \
2819 .process = process_##func, \
2820 .synthesize = true \
2821 }
2822
2823#define FEAT_OPN(n, func, __full_only) \
2824 [HEADER_##n] = { \
2825 .name = __stringify(n), \
2826 .write = write_##func, \
2827 .print = print_##func, \
2828 .full_only = __full_only, \
2829 .process = process_##func \
2830 }
Robert Richter8cdfa782011-12-07 10:02:56 +01002831
2832/* feature_ops not implemented: */
Stephane Eranian2eeaaa02012-05-15 13:28:13 +02002833#define print_tracing_data NULL
2834#define print_build_id NULL
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002835
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002836#define process_branch_stack NULL
2837#define process_stat NULL
2838
2839
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002840static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002841 FEAT_OPN(TRACING_DATA, tracing_data, false),
2842 FEAT_OPN(BUILD_ID, build_id, false),
2843 FEAT_OPR(HOSTNAME, hostname, false),
2844 FEAT_OPR(OSRELEASE, osrelease, false),
2845 FEAT_OPR(VERSION, version, false),
2846 FEAT_OPR(ARCH, arch, false),
2847 FEAT_OPR(NRCPUS, nrcpus, false),
2848 FEAT_OPR(CPUDESC, cpudesc, false),
2849 FEAT_OPR(CPUID, cpuid, false),
2850 FEAT_OPR(TOTAL_MEM, total_mem, false),
2851 FEAT_OPR(EVENT_DESC, event_desc, false),
2852 FEAT_OPR(CMDLINE, cmdline, false),
2853 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
2854 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
2855 FEAT_OPN(BRANCH_STACK, branch_stack, false),
2856 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
Jiri Olsae8fedff2018-07-12 15:52:02 +02002857 FEAT_OPR(GROUP_DESC, group_desc, false),
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002858 FEAT_OPN(AUXTRACE, auxtrace, false),
2859 FEAT_OPN(STAT, stat, false),
2860 FEAT_OPN(CACHE, cache, true),
Jin Yao60115182017-12-08 21:13:41 +08002861 FEAT_OPR(SAMPLE_TIME, sample_time, false),
Jiri Olsae2091ce2018-03-07 16:50:08 +01002862 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
Jiri Olsa258031c2019-03-08 14:47:39 +01002863 FEAT_OPR(CLOCKID, clockid, false),
Song Liu606f9722019-03-11 22:30:43 -07002864 FEAT_OPN(DIR_FORMAT, dir_format, false),
Song Liua70a112312019-03-11 22:30:45 -07002865 FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
2866 FEAT_OPR(BPF_BTF, bpf_btf, false),
Alexey Budankov42e1fd82019-03-18 20:41:33 +03002867 FEAT_OPR(COMPRESSED, compressed, false),
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002868};
2869
2870struct header_print_data {
2871 FILE *fp;
2872 bool full; /* extended list of headers */
2873};
2874
2875static int perf_file_section__fprintf_info(struct perf_file_section *section,
2876 struct perf_header *ph,
2877 int feat, int fd, void *data)
2878{
2879 struct header_print_data *hd = data;
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002880 struct feat_fd ff;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002881
2882 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2883 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2884 "%d, continuing...\n", section->offset, feat);
2885 return 0;
2886 }
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002887 if (feat >= HEADER_LAST_FEATURE) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002888 pr_warning("unknown feature %d\n", feat);
Robert Richterf7a8a132011-12-07 10:02:51 +01002889 return 0;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002890 }
2891 if (!feat_ops[feat].print)
2892 return 0;
2893
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002894 ff = (struct feat_fd) {
2895 .fd = fd,
2896 .ph = ph,
2897 };
2898
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002899 if (!feat_ops[feat].full_only || hd->full)
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002900 feat_ops[feat].print(&ff, hd->fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002901 else
2902 fprintf(hd->fp, "# %s info available, use -I to display\n",
2903 feat_ops[feat].name);
2904
2905 return 0;
2906}
2907
2908int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2909{
2910 struct header_print_data hd;
2911 struct perf_header *header = &session->header;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002912 int fd = perf_data__fd(session->data);
Jiri Olsaf45f5612016-10-10 09:03:07 +02002913 struct stat st;
Arnaldo Carvalho de Melo0afcf292018-12-11 16:11:54 -03002914 time_t stctime;
Jiri Olsaaabae162016-10-10 09:35:50 +02002915 int ret, bit;
Jiri Olsaf45f5612016-10-10 09:03:07 +02002916
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002917 hd.fp = fp;
2918 hd.full = full;
2919
Jiri Olsaf45f5612016-10-10 09:03:07 +02002920 ret = fstat(fd, &st);
2921 if (ret == -1)
2922 return -1;
2923
Arnaldo Carvalho de Melo0afcf292018-12-11 16:11:54 -03002924 stctime = st.st_ctime;
2925 fprintf(fp, "# captured on : %s", ctime(&stctime));
Jiri Olsae971a5a2018-03-07 16:50:03 +01002926
2927 fprintf(fp, "# header version : %u\n", header->version);
2928 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
2929 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
2930 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
Jiri Olsaf45f5612016-10-10 09:03:07 +02002931
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002932 perf_header__process_sections(header, fd, &hd,
2933 perf_file_section__fprintf_info);
Jiri Olsaaabae162016-10-10 09:35:50 +02002934
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002935 if (session->data->is_pipe)
David Carrillo-Cisnerosc9d1c932017-04-10 13:14:32 -07002936 return 0;
2937
Jiri Olsaaabae162016-10-10 09:35:50 +02002938 fprintf(fp, "# missing features: ");
2939 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2940 if (bit)
2941 fprintf(fp, "%s ", feat_ops[bit].name);
2942 }
2943
2944 fprintf(fp, "\n");
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002945 return 0;
2946}
2947
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002948static int do_write_feat(struct feat_fd *ff, int type,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002949 struct perf_file_section **p,
Jiri Olsa63503db2019-07-21 13:23:52 +02002950 struct evlist *evlist)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002951{
2952 int err;
2953 int ret = 0;
2954
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002955 if (perf_header__has_feat(ff->ph, type)) {
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002956 if (!feat_ops[type].write)
2957 return -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002958
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -07002959 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
2960 return -1;
2961
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002962 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002963
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002964 err = feat_ops[type].write(ff, evlist);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002965 if (err < 0) {
Jiri Olsa0c2aff42016-10-10 09:38:02 +02002966 pr_debug("failed to write feature %s\n", feat_ops[type].name);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002967
2968 /* undo anything written */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002969 lseek(ff->fd, (*p)->offset, SEEK_SET);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002970
2971 return -1;
2972 }
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002973 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002974 (*p)++;
2975 }
2976 return ret;
2977}
2978
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002979static int perf_header__adds_write(struct perf_header *header,
Jiri Olsa63503db2019-07-21 13:23:52 +02002980 struct evlist *evlist, int fd)
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002981{
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002982 int nr_sections;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002983 struct feat_fd ff;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002984 struct perf_file_section *feat_sec, *p;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002985 int sec_size;
2986 u64 sec_start;
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002987 int feat;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002988 int err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002989
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002990 ff = (struct feat_fd){
2991 .fd = fd,
2992 .ph = header,
2993 };
2994
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002995 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002996 if (!nr_sections)
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002997 return 0;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002998
Paul Gortmaker91b98802013-01-30 20:05:49 -05002999 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003000 if (feat_sec == NULL)
3001 return -ENOMEM;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003002
3003 sec_size = sizeof(*feat_sec) * nr_sections;
3004
Jiri Olsa8d541e92013-07-17 19:49:44 +02003005 sec_start = header->feat_offset;
Xiao Guangrongf887f302010-02-04 16:46:42 +08003006 lseek(fd, sec_start + sec_size, SEEK_SET);
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003007
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003008 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003009 if (do_write_feat(&ff, feat, &p, evlist))
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003010 perf_header__clear_feat(header, feat);
3011 }
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003012
Xiao Guangrongf887f302010-02-04 16:46:42 +08003013 lseek(fd, sec_start, SEEK_SET);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02003014 /*
3015 * may write more than needed due to dropped feature, but
Ingo Molnaradba1632018-12-03 11:22:00 +01003016 * this is okay, reader will skip the missing entries
Stephane Eranianfbe96f22011-09-30 15:40:40 +02003017 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003018 err = do_write(&ff, feat_sec, sec_size);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003019 if (err < 0)
3020 pr_debug("failed to write feature section\n");
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003021 free(feat_sec);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003022 return err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003023}
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003024
Tom Zanussi8dc58102010-04-01 23:59:15 -05003025int perf_header__write_pipe(int fd)
3026{
3027 struct perf_pipe_file_header f_header;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003028 struct feat_fd ff;
Tom Zanussi8dc58102010-04-01 23:59:15 -05003029 int err;
3030
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003031 ff = (struct feat_fd){ .fd = fd };
3032
Tom Zanussi8dc58102010-04-01 23:59:15 -05003033 f_header = (struct perf_pipe_file_header){
3034 .magic = PERF_MAGIC,
3035 .size = sizeof(f_header),
3036 };
3037
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003038 err = do_write(&ff, &f_header, sizeof(f_header));
Tom Zanussi8dc58102010-04-01 23:59:15 -05003039 if (err < 0) {
3040 pr_debug("failed to write perf pipe header\n");
3041 return err;
3042 }
3043
3044 return 0;
3045}
3046
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003047int perf_session__write_header(struct perf_session *session,
Jiri Olsa63503db2019-07-21 13:23:52 +02003048 struct evlist *evlist,
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003049 int fd, bool at_exit)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003050{
3051 struct perf_file_header f_header;
3052 struct perf_file_attr f_attr;
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003053 struct perf_header *header = &session->header;
Jiri Olsa32dcd022019-07-21 13:23:51 +02003054 struct evsel *evsel;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003055 struct feat_fd ff;
Jiri Olsa944d62b2013-07-17 19:49:43 +02003056 u64 attr_offset;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003057 int err;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003058
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003059 ff = (struct feat_fd){ .fd = fd};
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003060 lseek(fd, sizeof(f_header), SEEK_SET);
3061
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03003062 evlist__for_each_entry(session->evlist, evsel) {
Robert Richter6606f872012-08-16 21:10:19 +02003063 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003064 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003065 if (err < 0) {
3066 pr_debug("failed to write perf header\n");
3067 return err;
3068 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003069 }
3070
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003071 attr_offset = lseek(ff.fd, 0, SEEK_CUR);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003072
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03003073 evlist__for_each_entry(evlist, evsel) {
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003074 f_attr = (struct perf_file_attr){
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003075 .attr = evsel->core.attr,
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003076 .ids = {
Robert Richter6606f872012-08-16 21:10:19 +02003077 .offset = evsel->id_offset,
3078 .size = evsel->ids * sizeof(u64),
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003079 }
3080 };
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003081 err = do_write(&ff, &f_attr, sizeof(f_attr));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003082 if (err < 0) {
3083 pr_debug("failed to write perf header attribute\n");
3084 return err;
3085 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003086 }
3087
Adrian Hunterd645c442013-12-11 14:36:28 +02003088 if (!header->data_offset)
3089 header->data_offset = lseek(fd, 0, SEEK_CUR);
Jiri Olsa8d541e92013-07-17 19:49:44 +02003090 header->feat_offset = header->data_offset + header->data_size;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003091
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003092 if (at_exit) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003093 err = perf_header__adds_write(header, evlist, fd);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003094 if (err < 0)
3095 return err;
3096 }
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003097
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003098 f_header = (struct perf_file_header){
3099 .magic = PERF_MAGIC,
3100 .size = sizeof(f_header),
3101 .attr_size = sizeof(f_attr),
3102 .attrs = {
Jiri Olsa944d62b2013-07-17 19:49:43 +02003103 .offset = attr_offset,
Jiri Olsa6484d2f2019-07-21 13:24:28 +02003104 .size = evlist->core.nr_entries * sizeof(f_attr),
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003105 },
3106 .data = {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003107 .offset = header->data_offset,
3108 .size = header->data_size,
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003109 },
Jiri Olsa44b3c572013-07-11 17:28:31 +02003110 /* event_types is ignored, store zeros */
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003111 };
3112
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003113 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003114
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003115 lseek(fd, 0, SEEK_SET);
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003116 err = do_write(&ff, &f_header, sizeof(f_header));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003117 if (err < 0) {
3118 pr_debug("failed to write perf header\n");
3119 return err;
3120 }
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003121 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003122
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02003123 return 0;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003124}
3125
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003126static int perf_header__getbuffer64(struct perf_header *header,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003127 int fd, void *buf, size_t size)
3128{
Arnaldo Carvalho de Melo1e7972c2011-01-03 16:50:55 -02003129 if (readn(fd, buf, size) <= 0)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003130 return -1;
3131
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003132 if (header->needs_swap)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003133 mem_bswap_64(buf, size);
3134
3135 return 0;
3136}
3137
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003138int perf_header__process_sections(struct perf_header *header, int fd,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02003139 void *data,
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003140 int (*process)(struct perf_file_section *section,
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003141 struct perf_header *ph,
3142 int feat, int fd, void *data))
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003143{
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003144 struct perf_file_section *feat_sec, *sec;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003145 int nr_sections;
3146 int sec_size;
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003147 int feat;
3148 int err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003149
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003150 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003151 if (!nr_sections)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003152 return 0;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003153
Paul Gortmaker91b98802013-01-30 20:05:49 -05003154 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003155 if (!feat_sec)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003156 return -1;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003157
3158 sec_size = sizeof(*feat_sec) * nr_sections;
3159
Jiri Olsa8d541e92013-07-17 19:49:44 +02003160 lseek(fd, header->feat_offset, SEEK_SET);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003161
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003162 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
3163 if (err < 0)
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003164 goto out_free;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003165
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003166 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
3167 err = process(sec++, header, feat, fd, data);
3168 if (err < 0)
3169 goto out_free;
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +01003170 }
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003171 err = 0;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003172out_free:
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01003173 free(feat_sec);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003174 return err;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003175}
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02003176
Stephane Eranian114382a2012-02-09 23:21:08 +01003177static const int attr_file_abi_sizes[] = {
3178 [0] = PERF_ATTR_SIZE_VER0,
3179 [1] = PERF_ATTR_SIZE_VER1,
Jiri Olsa239cc472012-08-07 15:20:42 +02003180 [2] = PERF_ATTR_SIZE_VER2,
Jiri Olsa0f6a3012012-08-07 15:20:45 +02003181 [3] = PERF_ATTR_SIZE_VER3,
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02003182 [4] = PERF_ATTR_SIZE_VER4,
Stephane Eranian114382a2012-02-09 23:21:08 +01003183 0,
3184};
3185
3186/*
3187 * In the legacy file format, the magic number is not used to encode endianness.
3188 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
3189 * on ABI revisions, we need to try all combinations for all endianness to
3190 * detect the endianness.
3191 */
3192static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
3193{
3194 uint64_t ref_size, attr_size;
3195 int i;
3196
3197 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
3198 ref_size = attr_file_abi_sizes[i]
3199 + sizeof(struct perf_file_section);
3200 if (hdr_sz != ref_size) {
3201 attr_size = bswap_64(hdr_sz);
3202 if (attr_size != ref_size)
3203 continue;
3204
3205 ph->needs_swap = true;
3206 }
3207 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
3208 i,
3209 ph->needs_swap);
3210 return 0;
3211 }
3212 /* could not determine endianness */
3213 return -1;
3214}
3215
3216#define PERF_PIPE_HDR_VER0 16
3217
3218static const size_t attr_pipe_abi_sizes[] = {
3219 [0] = PERF_PIPE_HDR_VER0,
3220 0,
3221};
3222
3223/*
3224 * In the legacy pipe format, there is an implicit assumption that endiannesss
3225 * between host recording the samples, and host parsing the samples is the
3226 * same. This is not always the case given that the pipe output may always be
3227 * redirected into a file and analyzed on a different machine with possibly a
3228 * different endianness and perf_event ABI revsions in the perf tool itself.
3229 */
3230static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
3231{
3232 u64 attr_size;
3233 int i;
3234
3235 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
3236 if (hdr_sz != attr_pipe_abi_sizes[i]) {
3237 attr_size = bswap_64(hdr_sz);
3238 if (attr_size != hdr_sz)
3239 continue;
3240
3241 ph->needs_swap = true;
3242 }
3243 pr_debug("Pipe ABI%d perf.data file detected\n", i);
3244 return 0;
3245 }
3246 return -1;
3247}
3248
Feng Tange84ba4e2012-10-30 11:56:07 +08003249bool is_perf_magic(u64 magic)
3250{
3251 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
3252 || magic == __perf_magic2
3253 || magic == __perf_magic2_sw)
3254 return true;
3255
3256 return false;
3257}
3258
Stephane Eranian114382a2012-02-09 23:21:08 +01003259static int check_magic_endian(u64 magic, uint64_t hdr_sz,
3260 bool is_pipe, struct perf_header *ph)
Stephane Eranian73323f52012-02-02 13:54:44 +01003261{
3262 int ret;
3263
3264 /* check for legacy format */
Stephane Eranian114382a2012-02-09 23:21:08 +01003265 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
Stephane Eranian73323f52012-02-02 13:54:44 +01003266 if (ret == 0) {
Jiri Olsa2a08c3e2013-07-17 19:49:47 +02003267 ph->version = PERF_HEADER_VERSION_1;
Stephane Eranian73323f52012-02-02 13:54:44 +01003268 pr_debug("legacy perf.data format\n");
Stephane Eranian114382a2012-02-09 23:21:08 +01003269 if (is_pipe)
3270 return try_all_pipe_abis(hdr_sz, ph);
Stephane Eranian73323f52012-02-02 13:54:44 +01003271
Stephane Eranian114382a2012-02-09 23:21:08 +01003272 return try_all_file_abis(hdr_sz, ph);
Stephane Eranian73323f52012-02-02 13:54:44 +01003273 }
Stephane Eranian114382a2012-02-09 23:21:08 +01003274 /*
3275 * the new magic number serves two purposes:
3276 * - unique number to identify actual perf.data files
3277 * - encode endianness of file
3278 */
Namhyung Kimf7913972015-01-29 17:06:45 +09003279 ph->version = PERF_HEADER_VERSION_2;
Stephane Eranian73323f52012-02-02 13:54:44 +01003280
Stephane Eranian114382a2012-02-09 23:21:08 +01003281 /* check magic number with one endianness */
3282 if (magic == __perf_magic2)
Stephane Eranian73323f52012-02-02 13:54:44 +01003283 return 0;
3284
Stephane Eranian114382a2012-02-09 23:21:08 +01003285 /* check magic number with opposite endianness */
3286 if (magic != __perf_magic2_sw)
Stephane Eranian73323f52012-02-02 13:54:44 +01003287 return -1;
3288
3289 ph->needs_swap = true;
3290
3291 return 0;
3292}
3293
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003294int perf_file_header__read(struct perf_file_header *header,
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003295 struct perf_header *ph, int fd)
3296{
Jiri Olsa727ebd52013-11-28 11:30:14 +01003297 ssize_t ret;
Stephane Eranian73323f52012-02-02 13:54:44 +01003298
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003299 lseek(fd, 0, SEEK_SET);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003300
Stephane Eranian73323f52012-02-02 13:54:44 +01003301 ret = readn(fd, header, sizeof(*header));
3302 if (ret <= 0)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003303 return -1;
3304
Stephane Eranian114382a2012-02-09 23:21:08 +01003305 if (check_magic_endian(header->magic,
3306 header->attr_size, false, ph) < 0) {
3307 pr_debug("magic/endian check failed\n");
Stephane Eranian73323f52012-02-02 13:54:44 +01003308 return -1;
Stephane Eranian114382a2012-02-09 23:21:08 +01003309 }
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003310
Stephane Eranian73323f52012-02-02 13:54:44 +01003311 if (ph->needs_swap) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003312 mem_bswap_64(header, offsetof(struct perf_file_header,
Stephane Eranian73323f52012-02-02 13:54:44 +01003313 adds_features));
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003314 }
3315
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003316 if (header->size != sizeof(*header)) {
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003317 /* Support the previous format */
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003318 if (header->size == offsetof(typeof(*header), adds_features))
3319 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003320 else
3321 return -1;
David Ahernd327fa42011-10-18 17:34:01 -06003322 } else if (ph->needs_swap) {
David Ahernd327fa42011-10-18 17:34:01 -06003323 /*
3324 * feature bitmap is declared as an array of unsigned longs --
3325 * not good since its size can differ between the host that
3326 * generated the data file and the host analyzing the file.
3327 *
3328 * We need to handle endianness, but we don't know the size of
3329 * the unsigned long where the file was generated. Take a best
3330 * guess at determining it: try 64-bit swap first (ie., file
3331 * created on a 64-bit host), and check if the hostname feature
3332 * bit is set (this feature bit is forced on as of fbe96f2).
3333 * If the bit is not, undo the 64-bit swap and try a 32-bit
3334 * swap. If the hostname bit is still not set (e.g., older data
3335 * file), punt and fallback to the original behavior --
3336 * clearing all feature bits and setting buildid.
3337 */
David Ahern80c01202012-06-08 11:47:51 -03003338 mem_bswap_64(&header->adds_features,
3339 BITS_TO_U64(HEADER_FEAT_BITS));
David Ahernd327fa42011-10-18 17:34:01 -06003340
3341 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
David Ahern80c01202012-06-08 11:47:51 -03003342 /* unswap as u64 */
3343 mem_bswap_64(&header->adds_features,
3344 BITS_TO_U64(HEADER_FEAT_BITS));
3345
3346 /* unswap as u32 */
3347 mem_bswap_32(&header->adds_features,
3348 BITS_TO_U32(HEADER_FEAT_BITS));
David Ahernd327fa42011-10-18 17:34:01 -06003349 }
3350
3351 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3352 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3353 set_bit(HEADER_BUILD_ID, header->adds_features);
3354 }
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003355 }
3356
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003357 memcpy(&ph->adds_features, &header->adds_features,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003358 sizeof(ph->adds_features));
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003359
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003360 ph->data_offset = header->data.offset;
3361 ph->data_size = header->data.size;
Jiri Olsa8d541e92013-07-17 19:49:44 +02003362 ph->feat_offset = header->data.offset + header->data.size;
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003363 return 0;
3364}
3365
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003366static int perf_file_section__process(struct perf_file_section *section,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003367 struct perf_header *ph,
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03003368 int feat, int fd, void *data)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003369{
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07003370 struct feat_fd fdd = {
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07003371 .fd = fd,
3372 .ph = ph,
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07003373 .size = section->size,
3374 .offset = section->offset,
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07003375 };
3376
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003377 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -02003378 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003379 "%d, continuing...\n", section->offset, feat);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003380 return 0;
3381 }
3382
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003383 if (feat >= HEADER_LAST_FEATURE) {
3384 pr_debug("unknown feature %d, continuing...\n", feat);
3385 return 0;
3386 }
3387
Robert Richterf1c67db2012-02-10 15:41:56 +01003388 if (!feat_ops[feat].process)
3389 return 0;
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003390
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07003391 return feat_ops[feat].process(&fdd, data);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003392}
3393
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003394static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
Tom Zanussi454c4072010-05-01 01:41:20 -05003395 struct perf_header *ph, int fd,
3396 bool repipe)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003397{
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003398 struct feat_fd ff = {
3399 .fd = STDOUT_FILENO,
3400 .ph = ph,
3401 };
Jiri Olsa727ebd52013-11-28 11:30:14 +01003402 ssize_t ret;
Stephane Eranian73323f52012-02-02 13:54:44 +01003403
3404 ret = readn(fd, header, sizeof(*header));
3405 if (ret <= 0)
3406 return -1;
3407
Stephane Eranian114382a2012-02-09 23:21:08 +01003408 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
3409 pr_debug("endian/magic failed\n");
Tom Zanussi8dc58102010-04-01 23:59:15 -05003410 return -1;
Stephane Eranian114382a2012-02-09 23:21:08 +01003411 }
3412
3413 if (ph->needs_swap)
3414 header->size = bswap_64(header->size);
Tom Zanussi8dc58102010-04-01 23:59:15 -05003415
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003416 if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
Tom Zanussi454c4072010-05-01 01:41:20 -05003417 return -1;
3418
Tom Zanussi8dc58102010-04-01 23:59:15 -05003419 return 0;
3420}
3421
Jiri Olsad4339562013-07-17 19:49:41 +02003422static int perf_header__read_pipe(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05003423{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003424 struct perf_header *header = &session->header;
Tom Zanussi8dc58102010-04-01 23:59:15 -05003425 struct perf_pipe_file_header f_header;
3426
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003427 if (perf_file_header__read_pipe(&f_header, header,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003428 perf_data__fd(session->data),
Tom Zanussi454c4072010-05-01 01:41:20 -05003429 session->repipe) < 0) {
Tom Zanussi8dc58102010-04-01 23:59:15 -05003430 pr_debug("incompatible file format\n");
3431 return -EINVAL;
3432 }
3433
Tom Zanussi8dc58102010-04-01 23:59:15 -05003434 return 0;
3435}
3436
Stephane Eranian69996df2012-02-09 23:21:06 +01003437static int read_attr(int fd, struct perf_header *ph,
3438 struct perf_file_attr *f_attr)
3439{
3440 struct perf_event_attr *attr = &f_attr->attr;
3441 size_t sz, left;
3442 size_t our_sz = sizeof(f_attr->attr);
Jiri Olsa727ebd52013-11-28 11:30:14 +01003443 ssize_t ret;
Stephane Eranian69996df2012-02-09 23:21:06 +01003444
3445 memset(f_attr, 0, sizeof(*f_attr));
3446
3447 /* read minimal guaranteed structure */
3448 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
3449 if (ret <= 0) {
3450 pr_debug("cannot read %d bytes of header attr\n",
3451 PERF_ATTR_SIZE_VER0);
3452 return -1;
3453 }
3454
3455 /* on file perf_event_attr size */
3456 sz = attr->size;
Stephane Eranian114382a2012-02-09 23:21:08 +01003457
Stephane Eranian69996df2012-02-09 23:21:06 +01003458 if (ph->needs_swap)
3459 sz = bswap_32(sz);
3460
3461 if (sz == 0) {
3462 /* assume ABI0 */
3463 sz = PERF_ATTR_SIZE_VER0;
3464 } else if (sz > our_sz) {
3465 pr_debug("file uses a more recent and unsupported ABI"
3466 " (%zu bytes extra)\n", sz - our_sz);
3467 return -1;
3468 }
3469 /* what we have not yet read and that we know about */
3470 left = sz - PERF_ATTR_SIZE_VER0;
3471 if (left) {
3472 void *ptr = attr;
3473 ptr += PERF_ATTR_SIZE_VER0;
3474
3475 ret = readn(fd, ptr, left);
3476 }
3477 /* read perf_file_section, ids are read in caller */
3478 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
3479
3480 return ret <= 0 ? -1 : 0;
3481}
3482
Jiri Olsa32dcd022019-07-21 13:23:51 +02003483static int perf_evsel__prepare_tracepoint_event(struct evsel *evsel,
Tzvetomir Stoyanov (VMware)096177a2018-08-08 14:02:46 -04003484 struct tep_handle *pevent)
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003485{
Tzvetomir Stoyanov97fbf3f2018-11-30 10:44:07 -05003486 struct tep_event *event;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003487 char bf[128];
3488
Namhyung Kim831394b2012-09-06 11:10:46 +09003489 /* already prepared */
3490 if (evsel->tp_format)
3491 return 0;
3492
Namhyung Kim3dce2ce2013-03-21 16:18:48 +09003493 if (pevent == NULL) {
3494 pr_debug("broken or missing trace data\n");
3495 return -1;
3496 }
3497
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003498 event = tep_find_event(pevent, evsel->core.attr.config);
Namhyung Kima7619ae2013-04-18 21:24:16 +09003499 if (event == NULL) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003500 pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config);
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003501 return -1;
Namhyung Kima7619ae2013-04-18 21:24:16 +09003502 }
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003503
Namhyung Kim831394b2012-09-06 11:10:46 +09003504 if (!evsel->name) {
3505 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
3506 evsel->name = strdup(bf);
3507 if (evsel->name == NULL)
3508 return -1;
3509 }
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003510
Arnaldo Carvalho de Melofcf65bf2012-08-07 09:58:03 -03003511 evsel->tp_format = event;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003512 return 0;
3513}
3514
Jiri Olsa63503db2019-07-21 13:23:52 +02003515static int perf_evlist__prepare_tracepoint_events(struct evlist *evlist,
Tzvetomir Stoyanov (VMware)096177a2018-08-08 14:02:46 -04003516 struct tep_handle *pevent)
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003517{
Jiri Olsa32dcd022019-07-21 13:23:51 +02003518 struct evsel *pos;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003519
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03003520 evlist__for_each_entry(evlist, pos) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003521 if (pos->core.attr.type == PERF_TYPE_TRACEPOINT &&
Namhyung Kim831394b2012-09-06 11:10:46 +09003522 perf_evsel__prepare_tracepoint_event(pos, pevent))
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003523 return -1;
3524 }
3525
3526 return 0;
3527}
3528
Jiri Olsad4339562013-07-17 19:49:41 +02003529int perf_session__read_header(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05003530{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003531 struct perf_data *data = session->data;
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003532 struct perf_header *header = &session->header;
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003533 struct perf_file_header f_header;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003534 struct perf_file_attr f_attr;
3535 u64 f_id;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003536 int nr_attrs, nr_ids, i, j;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003537 int fd = perf_data__fd(data);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003538
Jiri Olsa0f98b112019-07-21 13:23:55 +02003539 session->evlist = evlist__new();
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003540 if (session->evlist == NULL)
3541 return -ENOMEM;
3542
Kan Liang2c071442015-08-28 05:48:05 -04003543 session->evlist->env = &header->env;
Arnaldo Carvalho de Melo4cde9982015-09-09 12:25:00 -03003544 session->machines.host.env = &header->env;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003545 if (perf_data__is_pipe(data))
Jiri Olsad4339562013-07-17 19:49:41 +02003546 return perf_header__read_pipe(session);
Tom Zanussi8dc58102010-04-01 23:59:15 -05003547
Stephane Eranian69996df2012-02-09 23:21:06 +01003548 if (perf_file_header__read(&f_header, header, fd) < 0)
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02003549 return -EINVAL;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003550
Namhyung Kimb314e5c2013-09-30 17:19:48 +09003551 /*
3552 * Sanity check that perf.data was written cleanly; data size is
3553 * initialized to 0 and updated only if the on_exit function is run.
3554 * If data size is still 0 then the file contains only partial
3555 * information. Just warn user and process it as much as it can.
3556 */
3557 if (f_header.data.size == 0) {
3558 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
3559 "Was the 'perf record' command properly terminated?\n",
Jiri Olsaeae8ad82017-01-23 22:25:41 +01003560 data->file.path);
Namhyung Kimb314e5c2013-09-30 17:19:48 +09003561 }
3562
Vince Weaver76222362019-07-23 11:06:01 -04003563 if (f_header.attr_size == 0) {
3564 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
3565 "Was the 'perf record' command properly terminated?\n",
3566 data->file.path);
3567 return -EINVAL;
3568 }
3569
Stephane Eranian69996df2012-02-09 23:21:06 +01003570 nr_attrs = f_header.attrs.size / f_header.attr_size;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003571 lseek(fd, f_header.attrs.offset, SEEK_SET);
3572
3573 for (i = 0; i < nr_attrs; i++) {
Jiri Olsa32dcd022019-07-21 13:23:51 +02003574 struct evsel *evsel;
Peter Zijlstra1c222bc2009-08-06 20:57:41 +02003575 off_t tmp;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003576
Stephane Eranian69996df2012-02-09 23:21:06 +01003577 if (read_attr(fd, header, &f_attr) < 0)
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003578 goto out_errno;
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003579
David Ahern1060ab82015-04-09 16:15:46 -04003580 if (header->needs_swap) {
3581 f_attr.ids.size = bswap_64(f_attr.ids.size);
3582 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
David Aherneda39132011-07-15 12:34:09 -06003583 perf_event__attr_swap(&f_attr.attr);
David Ahern1060ab82015-04-09 16:15:46 -04003584 }
David Aherneda39132011-07-15 12:34:09 -06003585
Peter Zijlstra1c222bc2009-08-06 20:57:41 +02003586 tmp = lseek(fd, 0, SEEK_CUR);
Jiri Olsa365c3ae2019-07-21 13:23:58 +02003587 evsel = evsel__new(&f_attr.attr);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003588
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003589 if (evsel == NULL)
3590 goto out_delete_evlist;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03003591
3592 evsel->needs_swap = header->needs_swap;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003593 /*
3594 * Do it before so that if perf_evsel__alloc_id fails, this
Jiri Olsac12995a2019-07-21 13:23:56 +02003595 * entry gets purged too at evlist__delete().
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003596 */
Jiri Olsaa1cf3a72019-07-21 13:23:59 +02003597 evlist__add(session->evlist, evsel);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003598
3599 nr_ids = f_attr.ids.size / sizeof(u64);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003600 /*
3601 * We don't have the cpu and thread maps on the header, so
3602 * for allocating the perf_sample_id table we fake 1 cpu and
3603 * hattr->ids threads.
3604 */
3605 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
3606 goto out_delete_evlist;
3607
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003608 lseek(fd, f_attr.ids.offset, SEEK_SET);
3609
3610 for (j = 0; j < nr_ids; j++) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003611 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003612 goto out_errno;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003613
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003614 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02003615 }
Arnaldo Carvalho de Melo11deb1f2009-11-17 01:18:09 -02003616
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003617 lseek(fd, tmp, SEEK_SET);
3618 }
3619
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003620 perf_header__process_sections(header, fd, &session->tevent,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02003621 perf_file_section__process);
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +01003622
Namhyung Kim831394b2012-09-06 11:10:46 +09003623 if (perf_evlist__prepare_tracepoint_events(session->evlist,
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003624 session->tevent.pevent))
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003625 goto out_delete_evlist;
3626
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02003627 return 0;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003628out_errno:
3629 return -errno;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003630
3631out_delete_evlist:
Jiri Olsac12995a2019-07-21 13:23:56 +02003632 evlist__delete(session->evlist);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003633 session->evlist = NULL;
3634 return -ENOMEM;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003635}
Frederic Weisbecker0d3a5c82009-08-16 20:56:37 +02003636
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003637int perf_event__synthesize_attr(struct perf_tool *tool,
Robert Richterf4d83432012-08-16 21:10:17 +02003638 struct perf_event_attr *attr, u32 ids, u64 *id,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02003639 perf_event__handler_t process)
Frederic Weisbecker0d3a5c82009-08-16 20:56:37 +02003640{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003641 union perf_event *ev;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003642 size_t size;
3643 int err;
3644
3645 size = sizeof(struct perf_event_attr);
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003646 size = PERF_ALIGN(size, sizeof(u64));
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003647 size += sizeof(struct perf_event_header);
3648 size += ids * sizeof(u64);
3649
Numfor Mbiziwo-Tiapo20f97812019-07-24 16:44:58 -07003650 ev = zalloc(size);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003651
Chris Samuelce47dc52010-11-13 13:35:06 +11003652 if (ev == NULL)
3653 return -ENOMEM;
3654
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003655 ev->attr.attr = *attr;
3656 memcpy(ev->attr.id, id, ids * sizeof(u64));
3657
3658 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
Robert Richterf4d83432012-08-16 21:10:17 +02003659 ev->attr.header.size = (u16)size;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003660
Robert Richterf4d83432012-08-16 21:10:17 +02003661 if (ev->attr.header.size == size)
3662 err = process(tool, ev, NULL, NULL);
3663 else
3664 err = -E2BIG;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003665
3666 free(ev);
3667
3668 return err;
3669}
3670
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003671int perf_event__synthesize_features(struct perf_tool *tool,
3672 struct perf_session *session,
Jiri Olsa63503db2019-07-21 13:23:52 +02003673 struct evlist *evlist,
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003674 perf_event__handler_t process)
3675{
3676 struct perf_header *header = &session->header;
3677 struct feat_fd ff;
3678 struct feature_event *fe;
3679 size_t sz, sz_hdr;
3680 int feat, ret;
3681
3682 sz_hdr = sizeof(fe->header);
3683 sz = sizeof(union perf_event);
3684 /* get a nice alignment */
3685 sz = PERF_ALIGN(sz, page_size);
3686
3687 memset(&ff, 0, sizeof(ff));
3688
3689 ff.buf = malloc(sz);
3690 if (!ff.buf)
3691 return -ENOMEM;
3692
3693 ff.size = sz - sz_hdr;
Song Liuc952b352019-06-19 18:04:53 -07003694 ff.ph = &session->header;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003695
3696 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3697 if (!feat_ops[feat].synthesize) {
3698 pr_debug("No record header feature for header :%d\n", feat);
3699 continue;
3700 }
3701
3702 ff.offset = sizeof(*fe);
3703
3704 ret = feat_ops[feat].write(&ff, evlist);
3705 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
3706 pr_debug("Error writing feature\n");
3707 continue;
3708 }
3709 /* ff.buf may have changed due to realloc in do_write() */
3710 fe = ff.buf;
3711 memset(fe, 0, sizeof(*fe));
3712
3713 fe->feat_id = feat;
3714 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3715 fe->header.size = ff.offset;
3716
3717 ret = process(tool, ff.buf, NULL, NULL);
3718 if (ret) {
3719 free(ff.buf);
3720 return ret;
3721 }
3722 }
Jiri Olsa57b5de42018-03-14 10:22:05 +01003723
3724 /* Send HEADER_LAST_FEATURE mark. */
3725 fe = ff.buf;
3726 fe->feat_id = HEADER_LAST_FEATURE;
3727 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3728 fe->header.size = sizeof(*fe);
3729
3730 ret = process(tool, ff.buf, NULL, NULL);
3731
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003732 free(ff.buf);
Jiri Olsa57b5de42018-03-14 10:22:05 +01003733 return ret;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003734}
3735
Jiri Olsa89f16882018-09-13 14:54:03 +02003736int perf_event__process_feature(struct perf_session *session,
3737 union perf_event *event)
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003738{
Jiri Olsa89f16882018-09-13 14:54:03 +02003739 struct perf_tool *tool = session->tool;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003740 struct feat_fd ff = { .fd = 0 };
3741 struct feature_event *fe = (struct feature_event *)event;
3742 int type = fe->header.type;
3743 u64 feat = fe->feat_id;
3744
3745 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
3746 pr_warning("invalid record type %d in pipe-mode\n", type);
3747 return 0;
3748 }
Ravi Bangoria92ead7e2018-06-25 18:12:20 +05303749 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003750 pr_warning("invalid record type %d in pipe-mode\n", type);
3751 return -1;
3752 }
3753
3754 if (!feat_ops[feat].process)
3755 return 0;
3756
3757 ff.buf = (void *)fe->data;
Jiri Olsa79b2fe52019-07-15 16:04:26 +02003758 ff.size = event->header.size - sizeof(*fe);
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003759 ff.ph = &session->header;
3760
3761 if (feat_ops[feat].process(&ff, NULL))
3762 return -1;
3763
3764 if (!feat_ops[feat].print || !tool->show_feat_hdr)
3765 return 0;
3766
3767 if (!feat_ops[feat].full_only ||
3768 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
3769 feat_ops[feat].print(&ff, stdout);
3770 } else {
3771 fprintf(stdout, "# %s info available, use -I to display\n",
3772 feat_ops[feat].name);
3773 }
3774
3775 return 0;
3776}
3777
Jiri Olsaa6e52812015-10-25 15:51:37 +01003778static struct event_update_event *
3779event_update_event__new(size_t size, u64 type, u64 id)
3780{
3781 struct event_update_event *ev;
3782
3783 size += sizeof(*ev);
3784 size = PERF_ALIGN(size, sizeof(u64));
3785
3786 ev = zalloc(size);
3787 if (ev) {
3788 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3789 ev->header.size = (u16)size;
3790 ev->type = type;
3791 ev->id = id;
3792 }
3793 return ev;
3794}
3795
3796int
3797perf_event__synthesize_event_update_unit(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02003798 struct evsel *evsel,
Jiri Olsaa6e52812015-10-25 15:51:37 +01003799 perf_event__handler_t process)
3800{
3801 struct event_update_event *ev;
3802 size_t size = strlen(evsel->unit);
3803 int err;
3804
3805 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3806 if (ev == NULL)
3807 return -ENOMEM;
3808
Arnaldo Carvalho de Melo75725882018-12-06 11:02:57 -03003809 strlcpy(ev->data, evsel->unit, size + 1);
Jiri Olsaa6e52812015-10-25 15:51:37 +01003810 err = process(tool, (union perf_event *)ev, NULL, NULL);
3811 free(ev);
3812 return err;
3813}
3814
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003815int
3816perf_event__synthesize_event_update_scale(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02003817 struct evsel *evsel,
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003818 perf_event__handler_t process)
3819{
3820 struct event_update_event *ev;
3821 struct event_update_event_scale *ev_data;
3822 int err;
3823
3824 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3825 if (ev == NULL)
3826 return -ENOMEM;
3827
3828 ev_data = (struct event_update_event_scale *) ev->data;
3829 ev_data->scale = evsel->scale;
3830 err = process(tool, (union perf_event*) ev, NULL, NULL);
3831 free(ev);
3832 return err;
3833}
3834
Jiri Olsa802c9042015-10-25 15:51:39 +01003835int
3836perf_event__synthesize_event_update_name(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02003837 struct evsel *evsel,
Jiri Olsa802c9042015-10-25 15:51:39 +01003838 perf_event__handler_t process)
3839{
3840 struct event_update_event *ev;
3841 size_t len = strlen(evsel->name);
3842 int err;
3843
3844 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3845 if (ev == NULL)
3846 return -ENOMEM;
3847
Arnaldo Carvalho de Melo5192bde2018-12-06 11:09:46 -03003848 strlcpy(ev->data, evsel->name, len + 1);
Jiri Olsa802c9042015-10-25 15:51:39 +01003849 err = process(tool, (union perf_event*) ev, NULL, NULL);
3850 free(ev);
3851 return err;
3852}
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003853
Jiri Olsa86ebb092015-10-25 15:51:40 +01003854int
3855perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02003856 struct evsel *evsel,
Jiri Olsa86ebb092015-10-25 15:51:40 +01003857 perf_event__handler_t process)
3858{
3859 size_t size = sizeof(struct event_update_event);
3860 struct event_update_event *ev;
3861 int max, err;
3862 u16 type;
3863
3864 if (!evsel->own_cpus)
3865 return 0;
3866
3867 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3868 if (!ev)
3869 return -ENOMEM;
3870
3871 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3872 ev->header.size = (u16)size;
3873 ev->type = PERF_EVENT_UPDATE__CPUS;
3874 ev->id = evsel->id[0];
3875
3876 cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3877 evsel->own_cpus,
3878 type, max);
3879
3880 err = process(tool, (union perf_event*) ev, NULL, NULL);
3881 free(ev);
3882 return err;
3883}
3884
Jiri Olsac853f932015-10-25 15:51:41 +01003885size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3886{
3887 struct event_update_event *ev = &event->event_update;
3888 struct event_update_event_scale *ev_scale;
3889 struct event_update_event_cpus *ev_cpus;
Jiri Olsaf8548392019-07-21 13:23:49 +02003890 struct perf_cpu_map *map;
Jiri Olsac853f932015-10-25 15:51:41 +01003891 size_t ret;
3892
3893 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id);
3894
3895 switch (ev->type) {
3896 case PERF_EVENT_UPDATE__SCALE:
3897 ev_scale = (struct event_update_event_scale *) ev->data;
3898 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3899 break;
3900 case PERF_EVENT_UPDATE__UNIT:
3901 ret += fprintf(fp, "... unit: %s\n", ev->data);
3902 break;
3903 case PERF_EVENT_UPDATE__NAME:
3904 ret += fprintf(fp, "... name: %s\n", ev->data);
3905 break;
3906 case PERF_EVENT_UPDATE__CPUS:
3907 ev_cpus = (struct event_update_event_cpus *) ev->data;
3908 ret += fprintf(fp, "... ");
3909
3910 map = cpu_map__new_data(&ev_cpus->cpus);
3911 if (map)
3912 ret += cpu_map__fprintf(map, fp);
3913 else
3914 ret += fprintf(fp, "failed to get cpus\n");
3915 break;
3916 default:
3917 ret += fprintf(fp, "... unknown type\n");
3918 break;
3919 }
3920
3921 return ret;
3922}
Jiri Olsa86ebb092015-10-25 15:51:40 +01003923
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003924int perf_event__synthesize_attrs(struct perf_tool *tool,
Jiri Olsa63503db2019-07-21 13:23:52 +02003925 struct evlist *evlist,
Jiri Olsa318ec182018-08-30 08:32:15 +02003926 perf_event__handler_t process)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003927{
Jiri Olsa32dcd022019-07-21 13:23:51 +02003928 struct evsel *evsel;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003929 int err = 0;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003930
Jiri Olsa318ec182018-08-30 08:32:15 +02003931 evlist__for_each_entry(evlist, evsel) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003932 err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->ids,
Robert Richter6606f872012-08-16 21:10:19 +02003933 evsel->id, process);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003934 if (err) {
3935 pr_debug("failed to create perf header attribute\n");
3936 return err;
3937 }
3938 }
3939
3940 return err;
3941}
3942
Jiri Olsa32dcd022019-07-21 13:23:51 +02003943static bool has_unit(struct evsel *counter)
Andi Kleenbfd8f722017-11-17 13:42:58 -08003944{
3945 return counter->unit && *counter->unit;
3946}
3947
Jiri Olsa32dcd022019-07-21 13:23:51 +02003948static bool has_scale(struct evsel *counter)
Andi Kleenbfd8f722017-11-17 13:42:58 -08003949{
3950 return counter->scale != 1;
3951}
3952
3953int perf_event__synthesize_extra_attr(struct perf_tool *tool,
Jiri Olsa63503db2019-07-21 13:23:52 +02003954 struct evlist *evsel_list,
Andi Kleenbfd8f722017-11-17 13:42:58 -08003955 perf_event__handler_t process,
3956 bool is_pipe)
3957{
Jiri Olsa32dcd022019-07-21 13:23:51 +02003958 struct evsel *counter;
Andi Kleenbfd8f722017-11-17 13:42:58 -08003959 int err;
3960
3961 /*
3962 * Synthesize other events stuff not carried within
3963 * attr event - unit, scale, name
3964 */
3965 evlist__for_each_entry(evsel_list, counter) {
3966 if (!counter->supported)
3967 continue;
3968
3969 /*
3970 * Synthesize unit and scale only if it's defined.
3971 */
3972 if (has_unit(counter)) {
3973 err = perf_event__synthesize_event_update_unit(tool, counter, process);
3974 if (err < 0) {
3975 pr_err("Couldn't synthesize evsel unit.\n");
3976 return err;
3977 }
3978 }
3979
3980 if (has_scale(counter)) {
3981 err = perf_event__synthesize_event_update_scale(tool, counter, process);
3982 if (err < 0) {
3983 pr_err("Couldn't synthesize evsel counter.\n");
3984 return err;
3985 }
3986 }
3987
3988 if (counter->own_cpus) {
3989 err = perf_event__synthesize_event_update_cpus(tool, counter, process);
3990 if (err < 0) {
3991 pr_err("Couldn't synthesize evsel cpus.\n");
3992 return err;
3993 }
3994 }
3995
3996 /*
3997 * Name is needed only for pipe output,
3998 * perf.data carries event names.
3999 */
4000 if (is_pipe) {
4001 err = perf_event__synthesize_event_update_name(tool, counter, process);
4002 if (err < 0) {
4003 pr_err("Couldn't synthesize evsel name.\n");
4004 return err;
4005 }
4006 }
4007 }
4008 return 0;
4009}
4010
Adrian Hunter47c3d102013-07-04 16:20:21 +03004011int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
4012 union perf_event *event,
Jiri Olsa63503db2019-07-21 13:23:52 +02004013 struct evlist **pevlist)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004014{
Robert Richterf4d83432012-08-16 21:10:17 +02004015 u32 i, ids, n_ids;
Jiri Olsa32dcd022019-07-21 13:23:51 +02004016 struct evsel *evsel;
Jiri Olsa63503db2019-07-21 13:23:52 +02004017 struct evlist *evlist = *pevlist;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004018
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02004019 if (evlist == NULL) {
Jiri Olsa0f98b112019-07-21 13:23:55 +02004020 *pevlist = evlist = evlist__new();
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02004021 if (evlist == NULL)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004022 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004023 }
4024
Jiri Olsa365c3ae2019-07-21 13:23:58 +02004025 evsel = evsel__new(&event->attr.attr);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03004026 if (evsel == NULL)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004027 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004028
Jiri Olsaa1cf3a72019-07-21 13:23:59 +02004029 evlist__add(evlist, evsel);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03004030
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004031 ids = event->header.size;
4032 ids -= (void *)&event->attr.id - (void *)event;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004033 n_ids = ids / sizeof(u64);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03004034 /*
4035 * We don't have the cpu and thread maps on the header, so
4036 * for allocating the perf_sample_id table we fake 1 cpu and
4037 * hattr->ids threads.
4038 */
4039 if (perf_evsel__alloc_id(evsel, 1, n_ids))
4040 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004041
4042 for (i = 0; i < n_ids; i++) {
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02004043 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004044 }
4045
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05004046 return 0;
4047}
Tom Zanussicd19a032010-04-01 23:59:20 -05004048
Jiri Olsaffe777252015-10-25 15:51:36 +01004049int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
4050 union perf_event *event,
Jiri Olsa63503db2019-07-21 13:23:52 +02004051 struct evlist **pevlist)
Jiri Olsaffe777252015-10-25 15:51:36 +01004052{
4053 struct event_update_event *ev = &event->event_update;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01004054 struct event_update_event_scale *ev_scale;
Jiri Olsa86ebb092015-10-25 15:51:40 +01004055 struct event_update_event_cpus *ev_cpus;
Jiri Olsa63503db2019-07-21 13:23:52 +02004056 struct evlist *evlist;
Jiri Olsa32dcd022019-07-21 13:23:51 +02004057 struct evsel *evsel;
Jiri Olsaf8548392019-07-21 13:23:49 +02004058 struct perf_cpu_map *map;
Jiri Olsaffe777252015-10-25 15:51:36 +01004059
4060 if (!pevlist || *pevlist == NULL)
4061 return -EINVAL;
4062
4063 evlist = *pevlist;
4064
4065 evsel = perf_evlist__id2evsel(evlist, ev->id);
4066 if (evsel == NULL)
4067 return -EINVAL;
4068
Jiri Olsaa6e52812015-10-25 15:51:37 +01004069 switch (ev->type) {
4070 case PERF_EVENT_UPDATE__UNIT:
4071 evsel->unit = strdup(ev->data);
Jiri Olsadaeecbc2015-10-25 15:51:38 +01004072 break;
Jiri Olsa802c9042015-10-25 15:51:39 +01004073 case PERF_EVENT_UPDATE__NAME:
4074 evsel->name = strdup(ev->data);
4075 break;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01004076 case PERF_EVENT_UPDATE__SCALE:
4077 ev_scale = (struct event_update_event_scale *) ev->data;
4078 evsel->scale = ev_scale->scale;
Arnaldo Carvalho de Melo8434a2e2017-02-08 21:57:22 -03004079 break;
Jiri Olsa86ebb092015-10-25 15:51:40 +01004080 case PERF_EVENT_UPDATE__CPUS:
4081 ev_cpus = (struct event_update_event_cpus *) ev->data;
4082
4083 map = cpu_map__new_data(&ev_cpus->cpus);
4084 if (map)
4085 evsel->own_cpus = map;
4086 else
4087 pr_err("failed to get event_update cpus\n");
Jiri Olsaa6e52812015-10-25 15:51:37 +01004088 default:
4089 break;
4090 }
4091
Jiri Olsaffe777252015-10-25 15:51:36 +01004092 return 0;
4093}
4094
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02004095int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
Jiri Olsa63503db2019-07-21 13:23:52 +02004096 struct evlist *evlist,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02004097 perf_event__handler_t process)
Tom Zanussi92155452010-04-01 23:59:21 -05004098{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004099 union perf_event ev;
Jiri Olsa29208e52011-10-20 15:59:43 +02004100 struct tracing_data *tdata;
Tom Zanussi92155452010-04-01 23:59:21 -05004101 ssize_t size = 0, aligned_size = 0, padding;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07004102 struct feat_fd ff;
Irina Tirdea1d037ca2012-09-11 01:15:03 +03004103 int err __maybe_unused = 0;
Tom Zanussi92155452010-04-01 23:59:21 -05004104
Jiri Olsa29208e52011-10-20 15:59:43 +02004105 /*
4106 * We are going to store the size of the data followed
4107 * by the data contents. Since the fd descriptor is a pipe,
4108 * we cannot seek back to store the size of the data once
4109 * we know it. Instead we:
4110 *
4111 * - write the tracing data to the temp file
4112 * - get/write the data size to pipe
4113 * - write the tracing data from the temp file
4114 * to the pipe
4115 */
Jiri Olsace9036a2019-07-21 13:24:23 +02004116 tdata = tracing_data_get(&evlist->core.entries, fd, true);
Jiri Olsa29208e52011-10-20 15:59:43 +02004117 if (!tdata)
4118 return -1;
4119
Tom Zanussi92155452010-04-01 23:59:21 -05004120 memset(&ev, 0, sizeof(ev));
4121
4122 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
Jiri Olsa29208e52011-10-20 15:59:43 +02004123 size = tdata->size;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03004124 aligned_size = PERF_ALIGN(size, sizeof(u64));
Tom Zanussi92155452010-04-01 23:59:21 -05004125 padding = aligned_size - size;
4126 ev.tracing_data.header.size = sizeof(ev.tracing_data);
4127 ev.tracing_data.size = aligned_size;
4128
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02004129 process(tool, &ev, NULL, NULL);
Tom Zanussi92155452010-04-01 23:59:21 -05004130
Jiri Olsa29208e52011-10-20 15:59:43 +02004131 /*
4132 * The put function will copy all the tracing data
4133 * stored in temp file to the pipe.
4134 */
4135 tracing_data_put(tdata);
4136
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07004137 ff = (struct feat_fd){ .fd = fd };
4138 if (write_padded(&ff, NULL, 0, padding))
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -07004139 return -1;
Tom Zanussi92155452010-04-01 23:59:21 -05004140
4141 return aligned_size;
4142}
4143
Jiri Olsa89f16882018-09-13 14:54:03 +02004144int perf_event__process_tracing_data(struct perf_session *session,
4145 union perf_event *event)
Tom Zanussi92155452010-04-01 23:59:21 -05004146{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004147 ssize_t size_read, padding, size = event->tracing_data.size;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01004148 int fd = perf_data__fd(session->data);
Jiri Olsacc9784bd2013-10-15 16:27:34 +02004149 off_t offset = lseek(fd, 0, SEEK_CUR);
Tom Zanussi92155452010-04-01 23:59:21 -05004150 char buf[BUFSIZ];
4151
4152 /* setup for reading amidst mmap */
Jiri Olsacc9784bd2013-10-15 16:27:34 +02004153 lseek(fd, offset + sizeof(struct tracing_data_event),
Tom Zanussi92155452010-04-01 23:59:21 -05004154 SEEK_SET);
4155
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01004156 size_read = trace_report(fd, &session->tevent,
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03004157 session->repipe);
Irina Tirdea9ac3e482012-09-11 01:15:01 +03004158 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
Tom Zanussi92155452010-04-01 23:59:21 -05004159
Jiri Olsacc9784bd2013-10-15 16:27:34 +02004160 if (readn(fd, buf, padding) < 0) {
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03004161 pr_err("%s: reading input file", __func__);
4162 return -1;
4163 }
Tom Zanussi454c4072010-05-01 01:41:20 -05004164 if (session->repipe) {
4165 int retw = write(STDOUT_FILENO, buf, padding);
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03004166 if (retw <= 0 || retw != padding) {
4167 pr_err("%s: repiping tracing data padding", __func__);
4168 return -1;
4169 }
Tom Zanussi454c4072010-05-01 01:41:20 -05004170 }
Tom Zanussi92155452010-04-01 23:59:21 -05004171
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03004172 if (size_read + padding != size) {
4173 pr_err("%s: tracing data size mismatch", __func__);
4174 return -1;
4175 }
Tom Zanussi92155452010-04-01 23:59:21 -05004176
Namhyung Kim831394b2012-09-06 11:10:46 +09004177 perf_evlist__prepare_tracepoint_events(session->evlist,
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01004178 session->tevent.pevent);
Arnaldo Carvalho de Melo8b6ee4c2012-08-07 23:36:16 -03004179
Tom Zanussi92155452010-04-01 23:59:21 -05004180 return size_read + padding;
4181}
Tom Zanussic7929e42010-04-01 23:59:22 -05004182
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02004183int perf_event__synthesize_build_id(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02004184 struct dso *pos, u16 misc,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004185 perf_event__handler_t process,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02004186 struct machine *machine)
Tom Zanussic7929e42010-04-01 23:59:22 -05004187{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004188 union perf_event ev;
Tom Zanussic7929e42010-04-01 23:59:22 -05004189 size_t len;
4190 int err = 0;
4191
4192 if (!pos->hit)
4193 return err;
4194
4195 memset(&ev, 0, sizeof(ev));
4196
4197 len = pos->long_name_len + 1;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03004198 len = PERF_ALIGN(len, NAME_ALIGN);
Tom Zanussic7929e42010-04-01 23:59:22 -05004199 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
4200 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
4201 ev.build_id.header.misc = misc;
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -03004202 ev.build_id.pid = machine->pid;
Tom Zanussic7929e42010-04-01 23:59:22 -05004203 ev.build_id.header.size = sizeof(ev.build_id) + len;
4204 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
4205
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02004206 err = process(tool, &ev, NULL, machine);
Tom Zanussic7929e42010-04-01 23:59:22 -05004207
4208 return err;
4209}
4210
Jiri Olsa89f16882018-09-13 14:54:03 +02004211int perf_event__process_build_id(struct perf_session *session,
4212 union perf_event *event)
Tom Zanussic7929e42010-04-01 23:59:22 -05004213{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02004214 __event_process_build_id(&event->build_id,
4215 event->build_id.filename,
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08004216 session);
Tom Zanussic7929e42010-04-01 23:59:22 -05004217 return 0;
4218}