blob: f1508adefc167cc8a6112e7ee9b66a888366ea88 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -03002#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -03003#include <inttypes.h>
Arnaldo Carvalho de Meloa9072bc2011-10-26 12:41:38 -02004#include "util.h"
Arnaldo Carvalho de Meloa0675582017-04-17 16:51:59 -03005#include "string2.h"
Arnaldo Carvalho de Melo391e4202017-04-19 18:51:14 -03006#include <sys/param.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02007#include <sys/types.h>
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02008#include <byteswap.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02009#include <unistd.h>
10#include <stdio.h>
11#include <stdlib.h>
Arnaldo Carvalho de Melo03536312017-06-16 12:18:27 -030012#include <linux/compiler.h>
Frederic Weisbecker8671dab2009-11-11 04:51:03 +010013#include <linux/list.h>
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -020014#include <linux/kernel.h>
Robert Richterb1e5a9b2011-12-07 10:02:57 +010015#include <linux/bitops.h>
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -070016#include <linux/stringify.h>
Arnaldo Carvalho de Melo7a8ef4c2017-04-19 20:57:47 -030017#include <sys/stat.h>
Stephane Eranianfbe96f22011-09-30 15:40:40 +020018#include <sys/utsname.h>
Jin Yao60115182017-12-08 21:13:41 +080019#include <linux/time64.h>
Jiri Olsae2091ce2018-03-07 16:50:08 +010020#include <dirent.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020021
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020022#include "evlist.h"
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030023#include "evsel.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020024#include "header.h"
Arnaldo Carvalho de Melo98521b32017-04-25 15:45:35 -030025#include "memswap.h"
Frederic Weisbecker03456a12009-10-06 23:36:47 +020026#include "../perf.h"
27#include "trace-event.h"
Arnaldo Carvalho de Melo301a0b02009-12-13 19:50:25 -020028#include "session.h"
Frederic Weisbecker8671dab2009-11-11 04:51:03 +010029#include "symbol.h"
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +010030#include "debug.h"
Stephane Eranianfbe96f22011-09-30 15:40:40 +020031#include "cpumap.h"
Robert Richter50a96672012-08-16 21:10:24 +020032#include "pmu.h"
Jiri Olsa7dbf4dc2012-09-10 18:50:19 +020033#include "vdso.h"
Namhyung Kima1ae5652012-09-24 17:14:59 +090034#include "strbuf.h"
Jiri Olsaebb296c2012-10-27 23:18:28 +020035#include "build-id.h"
Jiri Olsacc9784bd2013-10-15 16:27:34 +020036#include "data.h"
Jiri Olsa720e98b2016-02-16 16:01:43 +010037#include <api/fs/fs.h>
38#include "asm/bug.h"
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -070039#include "tool.h"
Jin Yao60115182017-12-08 21:13:41 +080040#include "time-utils.h"
Jiri Olsae2091ce2018-03-07 16:50:08 +010041#include "units.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020042
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -030043#include "sane_ctype.h"
44
Stephane Eranian73323f52012-02-02 13:54:44 +010045/*
46 * magic2 = "PERFILE2"
47 * must be a numerical value to let the endianness
48 * determine the memory layout. That way we are able
49 * to detect endianness when reading the perf.data file
50 * back.
51 *
52 * we check for legacy (PERFFILE) format.
53 */
54static const char *__perf_magic1 = "PERFFILE";
55static const u64 __perf_magic2 = 0x32454c4946524550ULL;
56static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020057
Stephane Eranian73323f52012-02-02 13:54:44 +010058#define PERF_MAGIC __perf_magic2
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020059
Soramichi AKIYAMAd25ed5d2017-01-17 00:22:37 +090060const char perf_version_string[] = PERF_VERSION;
61
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020062struct perf_file_attr {
Ingo Molnarcdd6c482009-09-21 12:02:48 +020063 struct perf_event_attr attr;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020064 struct perf_file_section ids;
65};
66
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -070067struct feat_fd {
68 struct perf_header *ph;
69 int fd;
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -070070 void *buf; /* Either buf != NULL or fd >= 0 */
David Carrillo-Cisneros62552452017-07-17 21:25:42 -070071 ssize_t offset;
72 size_t size;
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -070073 struct perf_evsel *events;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -070074};
75
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030076void perf_header__set_feat(struct perf_header *header, int feat)
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020077{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030078 set_bit(feat, header->adds_features);
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020079}
80
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030081void perf_header__clear_feat(struct perf_header *header, int feat)
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -020082{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030083 clear_bit(feat, header->adds_features);
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -020084}
85
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030086bool perf_header__has_feat(const struct perf_header *header, int feat)
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020087{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030088 return test_bit(feat, header->adds_features);
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020089}
90
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -070091static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
92{
93 ssize_t ret = writen(ff->fd, buf, size);
94
95 if (ret != (ssize_t)size)
96 return ret < 0 ? (int)ret : -1;
97 return 0;
98}
99
100static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
101{
102 /* struct perf_event_header::size is u16 */
103 const size_t max_size = 0xffff - sizeof(struct perf_event_header);
104 size_t new_size = ff->size;
105 void *addr;
106
107 if (size + ff->offset > max_size)
108 return -E2BIG;
109
110 while (size > (new_size - ff->offset))
111 new_size <<= 1;
112 new_size = min(max_size, new_size);
113
114 if (ff->size < new_size) {
115 addr = realloc(ff->buf, new_size);
116 if (!addr)
117 return -ENOMEM;
118 ff->buf = addr;
119 ff->size = new_size;
120 }
121
122 memcpy(ff->buf + ff->offset, buf, size);
123 ff->offset += size;
124
125 return 0;
126}
127
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -0700128/* Return: 0 if succeded, -ERR if failed. */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700129int do_write(struct feat_fd *ff, const void *buf, size_t size)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200130{
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700131 if (!ff->buf)
132 return __do_write_fd(ff, buf, size);
133 return __do_write_buf(ff, buf, size);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200134}
135
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -0700136/* Return: 0 if succeded, -ERR if failed. */
Jiri Olsae2091ce2018-03-07 16:50:08 +0100137static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
138{
139 u64 *p = (u64 *) set;
140 int i, ret;
141
142 ret = do_write(ff, &size, sizeof(size));
143 if (ret < 0)
144 return ret;
145
146 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
147 ret = do_write(ff, p + i, sizeof(*p));
148 if (ret < 0)
149 return ret;
150 }
151
152 return 0;
153}
154
155/* Return: 0 if succeded, -ERR if failed. */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700156int write_padded(struct feat_fd *ff, const void *bf,
157 size_t count, size_t count_aligned)
Arnaldo Carvalho de Melof92cb242010-01-04 16:19:28 -0200158{
159 static const char zero_buf[NAME_ALIGN];
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700160 int err = do_write(ff, bf, count);
Arnaldo Carvalho de Melof92cb242010-01-04 16:19:28 -0200161
162 if (!err)
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700163 err = do_write(ff, zero_buf, count_aligned - count);
Arnaldo Carvalho de Melof92cb242010-01-04 16:19:28 -0200164
165 return err;
166}
167
Kan Liang2bb00d22015-09-01 09:58:12 -0400168#define string_size(str) \
169 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
170
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -0700171/* Return: 0 if succeded, -ERR if failed. */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700172static int do_write_string(struct feat_fd *ff, const char *str)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200173{
174 u32 len, olen;
175 int ret;
176
177 olen = strlen(str) + 1;
Irina Tirdea9ac3e482012-09-11 01:15:01 +0300178 len = PERF_ALIGN(olen, NAME_ALIGN);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200179
180 /* write len, incl. \0 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700181 ret = do_write(ff, &len, sizeof(len));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200182 if (ret < 0)
183 return ret;
184
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700185 return write_padded(ff, str, olen, len);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200186}
187
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700188static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700189{
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700190 ssize_t ret = readn(ff->fd, addr, size);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700191
192 if (ret != size)
193 return ret < 0 ? (int)ret : -1;
194 return 0;
195}
196
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700197static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
198{
199 if (size > (ssize_t)ff->size - ff->offset)
200 return -1;
201
202 memcpy(addr, ff->buf + ff->offset, size);
203 ff->offset += size;
204
205 return 0;
206
207}
208
209static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
210{
211 if (!ff->buf)
212 return __do_read_fd(ff, addr, size);
213 return __do_read_buf(ff, addr, size);
214}
215
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700216static int do_read_u32(struct feat_fd *ff, u32 *addr)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700217{
218 int ret;
219
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700220 ret = __do_read(ff, addr, sizeof(*addr));
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700221 if (ret)
222 return ret;
223
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700224 if (ff->ph->needs_swap)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700225 *addr = bswap_32(*addr);
226 return 0;
227}
228
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700229static int do_read_u64(struct feat_fd *ff, u64 *addr)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700230{
231 int ret;
232
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700233 ret = __do_read(ff, addr, sizeof(*addr));
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700234 if (ret)
235 return ret;
236
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700237 if (ff->ph->needs_swap)
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -0700238 *addr = bswap_64(*addr);
239 return 0;
240}
241
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700242static char *do_read_string(struct feat_fd *ff)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200243{
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200244 u32 len;
245 char *buf;
246
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700247 if (do_read_u32(ff, &len))
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200248 return NULL;
249
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200250 buf = malloc(len);
251 if (!buf)
252 return NULL;
253
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -0700254 if (!__do_read(ff, buf, len)) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200255 /*
256 * strings are padded by zeroes
257 * thus the actual strlen of buf
258 * may be less than len
259 */
260 return buf;
261 }
262
263 free(buf);
264 return NULL;
265}
266
Jiri Olsae2091ce2018-03-07 16:50:08 +0100267/* Return: 0 if succeded, -ERR if failed. */
268static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
269{
270 unsigned long *set;
271 u64 size, *p;
272 int i, ret;
273
274 ret = do_read_u64(ff, &size);
275 if (ret)
276 return ret;
277
278 set = bitmap_alloc(size);
279 if (!set)
280 return -ENOMEM;
281
Jiri Olsae2091ce2018-03-07 16:50:08 +0100282 p = (u64 *) set;
283
284 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
285 ret = do_read_u64(ff, p + i);
286 if (ret < 0) {
287 free(set);
288 return ret;
289 }
290 }
291
292 *pset = set;
293 *psize = size;
294 return 0;
295}
296
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700297static int write_tracing_data(struct feat_fd *ff,
298 struct perf_evlist *evlist)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200299{
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700300 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
301 return -1;
302
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700303 return read_tracing_data(ff->fd, &evlist->entries);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200304}
305
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700306static int write_build_id(struct feat_fd *ff,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300307 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200308{
309 struct perf_session *session;
310 int err;
311
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700312 session = container_of(ff->ph, struct perf_session, header);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200313
Robert Richtere20960c2011-12-07 10:02:55 +0100314 if (!perf_session__read_build_ids(session, true))
315 return -1;
316
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -0700317 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
318 return -1;
319
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700320 err = perf_session__write_buildid_table(session, ff);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200321 if (err < 0) {
322 pr_debug("failed to write buildid table\n");
323 return err;
324 }
Namhyung Kim73c5d222014-11-07 22:57:56 +0900325 perf_session__cache_build_ids(session);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200326
327 return 0;
328}
329
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700330static int write_hostname(struct feat_fd *ff,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300331 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200332{
333 struct utsname uts;
334 int ret;
335
336 ret = uname(&uts);
337 if (ret < 0)
338 return -1;
339
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700340 return do_write_string(ff, uts.nodename);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200341}
342
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700343static int write_osrelease(struct feat_fd *ff,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300344 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200345{
346 struct utsname uts;
347 int ret;
348
349 ret = uname(&uts);
350 if (ret < 0)
351 return -1;
352
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700353 return do_write_string(ff, uts.release);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200354}
355
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700356static int write_arch(struct feat_fd *ff,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300357 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200358{
359 struct utsname uts;
360 int ret;
361
362 ret = uname(&uts);
363 if (ret < 0)
364 return -1;
365
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700366 return do_write_string(ff, uts.machine);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200367}
368
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700369static int write_version(struct feat_fd *ff,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300370 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200371{
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700372 return do_write_string(ff, perf_version_string);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200373}
374
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700375static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200376{
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200377 FILE *file;
378 char *buf = NULL;
379 char *s, *p;
Wang Nan493c3032014-10-24 09:45:26 +0800380 const char *search = cpuinfo_proc;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200381 size_t len = 0;
382 int ret = -1;
383
384 if (!search)
385 return -1;
386
387 file = fopen("/proc/cpuinfo", "r");
388 if (!file)
389 return -1;
390
391 while (getline(&buf, &len, file) > 0) {
392 ret = strncmp(buf, search, strlen(search));
393 if (!ret)
394 break;
395 }
396
Wang Naned307752014-10-16 11:08:29 +0800397 if (ret) {
398 ret = -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200399 goto done;
Wang Naned307752014-10-16 11:08:29 +0800400 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200401
402 s = buf;
403
404 p = strchr(buf, ':');
405 if (p && *(p+1) == ' ' && *(p+2))
406 s = p + 2;
407 p = strchr(s, '\n');
408 if (p)
409 *p = '\0';
410
411 /* squash extra space characters (branding string) */
412 p = s;
413 while (*p) {
414 if (isspace(*p)) {
415 char *r = p + 1;
416 char *q = r;
417 *p = ' ';
418 while (*q && isspace(*q))
419 q++;
420 if (q != (p+1))
421 while ((*r++ = *q++));
422 }
423 p++;
424 }
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700425 ret = do_write_string(ff, s);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200426done:
427 free(buf);
428 fclose(file);
429 return ret;
430}
431
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700432static int write_cpudesc(struct feat_fd *ff,
Wang Nan493c3032014-10-24 09:45:26 +0800433 struct perf_evlist *evlist __maybe_unused)
434{
Wang Nan493c3032014-10-24 09:45:26 +0800435 const char *cpuinfo_procs[] = CPUINFO_PROC;
436 unsigned int i;
437
438 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
439 int ret;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700440 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
Wang Nan493c3032014-10-24 09:45:26 +0800441 if (ret >= 0)
442 return ret;
443 }
444 return -1;
445}
446
447
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700448static int write_nrcpus(struct feat_fd *ff,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300449 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200450{
451 long nr;
452 u32 nrc, nra;
453 int ret;
454
Jan Stancekda8a58b2017-02-17 12:10:26 +0100455 nrc = cpu__max_present_cpu();
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200456
457 nr = sysconf(_SC_NPROCESSORS_ONLN);
458 if (nr < 0)
459 return -1;
460
461 nra = (u32)(nr & UINT_MAX);
462
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700463 ret = do_write(ff, &nrc, sizeof(nrc));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200464 if (ret < 0)
465 return ret;
466
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700467 return do_write(ff, &nra, sizeof(nra));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200468}
469
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700470static int write_event_desc(struct feat_fd *ff,
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200471 struct perf_evlist *evlist)
472{
Robert Richter6606f872012-08-16 21:10:19 +0200473 struct perf_evsel *evsel;
Namhyung Kim74ba9e12012-09-05 14:02:47 +0900474 u32 nre, nri, sz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200475 int ret;
476
Namhyung Kim74ba9e12012-09-05 14:02:47 +0900477 nre = evlist->nr_entries;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200478
479 /*
480 * write number of events
481 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700482 ret = do_write(ff, &nre, sizeof(nre));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200483 if (ret < 0)
484 return ret;
485
486 /*
487 * size of perf_event_attr struct
488 */
Robert Richter6606f872012-08-16 21:10:19 +0200489 sz = (u32)sizeof(evsel->attr);
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700490 ret = do_write(ff, &sz, sizeof(sz));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200491 if (ret < 0)
492 return ret;
493
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300494 evlist__for_each_entry(evlist, evsel) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700495 ret = do_write(ff, &evsel->attr, sz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200496 if (ret < 0)
497 return ret;
498 /*
499 * write number of unique id per event
500 * there is one id per instance of an event
501 *
502 * copy into an nri to be independent of the
503 * type of ids,
504 */
Robert Richter6606f872012-08-16 21:10:19 +0200505 nri = evsel->ids;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700506 ret = do_write(ff, &nri, sizeof(nri));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200507 if (ret < 0)
508 return ret;
509
510 /*
511 * write event string as passed on cmdline
512 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700513 ret = do_write_string(ff, perf_evsel__name(evsel));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200514 if (ret < 0)
515 return ret;
516 /*
517 * write unique ids for this event
518 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700519 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200520 if (ret < 0)
521 return ret;
522 }
523 return 0;
524}
525
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700526static int write_cmdline(struct feat_fd *ff,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300527 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200528{
529 char buf[MAXPATHLEN];
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300530 u32 n;
531 int i, ret;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200532
Tommi Rantala55f771282017-03-22 15:06:24 +0200533 /* actual path to perf binary */
534 ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200535 if (ret <= 0)
536 return -1;
537
538 /* readlink() does not add null termination */
539 buf[ret] = '\0';
540
541 /* account for binary path */
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300542 n = perf_env.nr_cmdline + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200543
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700544 ret = do_write(ff, &n, sizeof(n));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200545 if (ret < 0)
546 return ret;
547
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700548 ret = do_write_string(ff, buf);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200549 if (ret < 0)
550 return ret;
551
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300552 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700553 ret = do_write_string(ff, perf_env.cmdline_argv[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200554 if (ret < 0)
555 return ret;
556 }
557 return 0;
558}
559
560#define CORE_SIB_FMT \
561 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
562#define THRD_SIB_FMT \
563 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
564
565struct cpu_topo {
Kan Liang2bb00d22015-09-01 09:58:12 -0400566 u32 cpu_nr;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200567 u32 core_sib;
568 u32 thread_sib;
569 char **core_siblings;
570 char **thread_siblings;
571};
572
573static int build_cpu_topo(struct cpu_topo *tp, int cpu)
574{
575 FILE *fp;
576 char filename[MAXPATHLEN];
577 char *buf = NULL, *p;
578 size_t len = 0;
Stephane Eranianc5885742013-08-14 12:04:26 +0200579 ssize_t sret;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200580 u32 i = 0;
581 int ret = -1;
582
583 sprintf(filename, CORE_SIB_FMT, cpu);
584 fp = fopen(filename, "r");
585 if (!fp)
Stephane Eranianc5885742013-08-14 12:04:26 +0200586 goto try_threads;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200587
Stephane Eranianc5885742013-08-14 12:04:26 +0200588 sret = getline(&buf, &len, fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200589 fclose(fp);
Stephane Eranianc5885742013-08-14 12:04:26 +0200590 if (sret <= 0)
591 goto try_threads;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200592
593 p = strchr(buf, '\n');
594 if (p)
595 *p = '\0';
596
597 for (i = 0; i < tp->core_sib; i++) {
598 if (!strcmp(buf, tp->core_siblings[i]))
599 break;
600 }
601 if (i == tp->core_sib) {
602 tp->core_siblings[i] = buf;
603 tp->core_sib++;
604 buf = NULL;
605 len = 0;
606 }
Stephane Eranianc5885742013-08-14 12:04:26 +0200607 ret = 0;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200608
Stephane Eranianc5885742013-08-14 12:04:26 +0200609try_threads:
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200610 sprintf(filename, THRD_SIB_FMT, cpu);
611 fp = fopen(filename, "r");
612 if (!fp)
613 goto done;
614
615 if (getline(&buf, &len, fp) <= 0)
616 goto done;
617
618 p = strchr(buf, '\n');
619 if (p)
620 *p = '\0';
621
622 for (i = 0; i < tp->thread_sib; i++) {
623 if (!strcmp(buf, tp->thread_siblings[i]))
624 break;
625 }
626 if (i == tp->thread_sib) {
627 tp->thread_siblings[i] = buf;
628 tp->thread_sib++;
629 buf = NULL;
630 }
631 ret = 0;
632done:
633 if(fp)
634 fclose(fp);
635 free(buf);
636 return ret;
637}
638
639static void free_cpu_topo(struct cpu_topo *tp)
640{
641 u32 i;
642
643 if (!tp)
644 return;
645
646 for (i = 0 ; i < tp->core_sib; i++)
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300647 zfree(&tp->core_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200648
649 for (i = 0 ; i < tp->thread_sib; i++)
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300650 zfree(&tp->thread_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200651
652 free(tp);
653}
654
655static struct cpu_topo *build_cpu_topology(void)
656{
Jan Stancek43db2842017-02-17 12:10:25 +0100657 struct cpu_topo *tp = NULL;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200658 void *addr;
659 u32 nr, i;
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300660 size_t sz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200661 long ncpus;
662 int ret = -1;
Jan Stancek43db2842017-02-17 12:10:25 +0100663 struct cpu_map *map;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200664
Jan Stancekda8a58b2017-02-17 12:10:26 +0100665 ncpus = cpu__max_present_cpu();
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200666
Jan Stancek43db2842017-02-17 12:10:25 +0100667 /* build online CPU map */
668 map = cpu_map__new(NULL);
669 if (map == NULL) {
670 pr_debug("failed to get system cpumap\n");
671 return NULL;
672 }
673
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200674 nr = (u32)(ncpus & UINT_MAX);
675
676 sz = nr * sizeof(char *);
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300677 addr = calloc(1, sizeof(*tp) + 2 * sz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200678 if (!addr)
Jan Stancek43db2842017-02-17 12:10:25 +0100679 goto out_free;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200680
681 tp = addr;
Kan Liang2bb00d22015-09-01 09:58:12 -0400682 tp->cpu_nr = nr;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200683 addr += sizeof(*tp);
684 tp->core_siblings = addr;
685 addr += sz;
686 tp->thread_siblings = addr;
687
688 for (i = 0; i < nr; i++) {
Jan Stancek43db2842017-02-17 12:10:25 +0100689 if (!cpu_map__has(map, i))
690 continue;
691
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200692 ret = build_cpu_topo(tp, i);
693 if (ret < 0)
694 break;
695 }
Jan Stancek43db2842017-02-17 12:10:25 +0100696
697out_free:
698 cpu_map__put(map);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200699 if (ret) {
700 free_cpu_topo(tp);
701 tp = NULL;
702 }
703 return tp;
704}
705
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700706static int write_cpu_topology(struct feat_fd *ff,
707 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200708{
709 struct cpu_topo *tp;
710 u32 i;
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300711 int ret, j;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200712
713 tp = build_cpu_topology();
714 if (!tp)
715 return -1;
716
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700717 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200718 if (ret < 0)
719 goto done;
720
721 for (i = 0; i < tp->core_sib; i++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700722 ret = do_write_string(ff, tp->core_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200723 if (ret < 0)
724 goto done;
725 }
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700726 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200727 if (ret < 0)
728 goto done;
729
730 for (i = 0; i < tp->thread_sib; i++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700731 ret = do_write_string(ff, tp->thread_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200732 if (ret < 0)
733 break;
734 }
Kan Liang2bb00d22015-09-01 09:58:12 -0400735
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300736 ret = perf_env__read_cpu_topology_map(&perf_env);
737 if (ret < 0)
738 goto done;
739
740 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700741 ret = do_write(ff, &perf_env.cpu[j].core_id,
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300742 sizeof(perf_env.cpu[j].core_id));
Kan Liang2bb00d22015-09-01 09:58:12 -0400743 if (ret < 0)
744 return ret;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700745 ret = do_write(ff, &perf_env.cpu[j].socket_id,
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300746 sizeof(perf_env.cpu[j].socket_id));
Kan Liang2bb00d22015-09-01 09:58:12 -0400747 if (ret < 0)
748 return ret;
749 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200750done:
751 free_cpu_topo(tp);
752 return ret;
753}
754
755
756
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700757static int write_total_mem(struct feat_fd *ff,
758 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200759{
760 char *buf = NULL;
761 FILE *fp;
762 size_t len = 0;
763 int ret = -1, n;
764 uint64_t mem;
765
766 fp = fopen("/proc/meminfo", "r");
767 if (!fp)
768 return -1;
769
770 while (getline(&buf, &len, fp) > 0) {
771 ret = strncmp(buf, "MemTotal:", 9);
772 if (!ret)
773 break;
774 }
775 if (!ret) {
776 n = sscanf(buf, "%*s %"PRIu64, &mem);
777 if (n == 1)
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700778 ret = do_write(ff, &mem, sizeof(mem));
Wang Naned307752014-10-16 11:08:29 +0800779 } else
780 ret = -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200781 free(buf);
782 fclose(fp);
783 return ret;
784}
785
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700786static int write_topo_node(struct feat_fd *ff, int node)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200787{
788 char str[MAXPATHLEN];
789 char field[32];
790 char *buf = NULL, *p;
791 size_t len = 0;
792 FILE *fp;
793 u64 mem_total, mem_free, mem;
794 int ret = -1;
795
796 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
797 fp = fopen(str, "r");
798 if (!fp)
799 return -1;
800
801 while (getline(&buf, &len, fp) > 0) {
802 /* skip over invalid lines */
803 if (!strchr(buf, ':'))
804 continue;
Alan Coxa761a2d2014-01-20 19:10:11 +0100805 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200806 goto done;
807 if (!strcmp(field, "MemTotal:"))
808 mem_total = mem;
809 if (!strcmp(field, "MemFree:"))
810 mem_free = mem;
811 }
812
813 fclose(fp);
Thomas Jarosch5809fde2013-01-28 10:21:14 +0100814 fp = NULL;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200815
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700816 ret = do_write(ff, &mem_total, sizeof(u64));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200817 if (ret)
818 goto done;
819
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700820 ret = do_write(ff, &mem_free, sizeof(u64));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200821 if (ret)
822 goto done;
823
824 ret = -1;
825 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
826
827 fp = fopen(str, "r");
828 if (!fp)
829 goto done;
830
831 if (getline(&buf, &len, fp) <= 0)
832 goto done;
833
834 p = strchr(buf, '\n');
835 if (p)
836 *p = '\0';
837
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700838 ret = do_write_string(ff, buf);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200839done:
840 free(buf);
Thomas Jarosch5809fde2013-01-28 10:21:14 +0100841 if (fp)
842 fclose(fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200843 return ret;
844}
845
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700846static int write_numa_topology(struct feat_fd *ff,
847 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200848{
849 char *buf = NULL;
850 size_t len = 0;
851 FILE *fp;
852 struct cpu_map *node_map = NULL;
853 char *c;
854 u32 nr, i, j;
855 int ret = -1;
856
857 fp = fopen("/sys/devices/system/node/online", "r");
858 if (!fp)
859 return -1;
860
861 if (getline(&buf, &len, fp) <= 0)
862 goto done;
863
864 c = strchr(buf, '\n');
865 if (c)
866 *c = '\0';
867
868 node_map = cpu_map__new(buf);
869 if (!node_map)
870 goto done;
871
872 nr = (u32)node_map->nr;
873
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700874 ret = do_write(ff, &nr, sizeof(nr));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200875 if (ret < 0)
876 goto done;
877
878 for (i = 0; i < nr; i++) {
879 j = (u32)node_map->map[i];
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700880 ret = do_write(ff, &j, sizeof(j));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200881 if (ret < 0)
882 break;
883
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700884 ret = write_topo_node(ff, i);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200885 if (ret < 0)
886 break;
887 }
888done:
889 free(buf);
890 fclose(fp);
Masami Hiramatsu5191d8872015-12-09 11:11:35 +0900891 cpu_map__put(node_map);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200892 return ret;
893}
894
895/*
Robert Richter50a96672012-08-16 21:10:24 +0200896 * File format:
897 *
898 * struct pmu_mappings {
899 * u32 pmu_num;
900 * struct pmu_map {
901 * u32 type;
902 * char name[];
903 * }[pmu_num];
904 * };
905 */
906
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700907static int write_pmu_mappings(struct feat_fd *ff,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300908 struct perf_evlist *evlist __maybe_unused)
Robert Richter50a96672012-08-16 21:10:24 +0200909{
910 struct perf_pmu *pmu = NULL;
David Carrillo-Cisnerosa02c3952017-07-17 21:25:44 -0700911 u32 pmu_num = 0;
Namhyung Kim5323f602012-12-17 15:38:54 +0900912 int ret;
Robert Richter50a96672012-08-16 21:10:24 +0200913
David Carrillo-Cisnerosa02c3952017-07-17 21:25:44 -0700914 /*
915 * Do a first pass to count number of pmu to avoid lseek so this
916 * works in pipe mode as well.
917 */
918 while ((pmu = perf_pmu__scan(pmu))) {
919 if (!pmu->name)
920 continue;
921 pmu_num++;
922 }
923
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700924 ret = do_write(ff, &pmu_num, sizeof(pmu_num));
Namhyung Kim5323f602012-12-17 15:38:54 +0900925 if (ret < 0)
926 return ret;
Robert Richter50a96672012-08-16 21:10:24 +0200927
928 while ((pmu = perf_pmu__scan(pmu))) {
929 if (!pmu->name)
930 continue;
Namhyung Kim5323f602012-12-17 15:38:54 +0900931
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700932 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
Namhyung Kim5323f602012-12-17 15:38:54 +0900933 if (ret < 0)
934 return ret;
935
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700936 ret = do_write_string(ff, pmu->name);
Namhyung Kim5323f602012-12-17 15:38:54 +0900937 if (ret < 0)
938 return ret;
Robert Richter50a96672012-08-16 21:10:24 +0200939 }
940
Robert Richter50a96672012-08-16 21:10:24 +0200941 return 0;
942}
943
944/*
Namhyung Kima8bb5592013-01-22 18:09:31 +0900945 * File format:
946 *
947 * struct group_descs {
948 * u32 nr_groups;
949 * struct group_desc {
950 * char name[];
951 * u32 leader_idx;
952 * u32 nr_members;
953 * }[nr_groups];
954 * };
955 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700956static int write_group_desc(struct feat_fd *ff,
Namhyung Kima8bb5592013-01-22 18:09:31 +0900957 struct perf_evlist *evlist)
958{
959 u32 nr_groups = evlist->nr_groups;
960 struct perf_evsel *evsel;
961 int ret;
962
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700963 ret = do_write(ff, &nr_groups, sizeof(nr_groups));
Namhyung Kima8bb5592013-01-22 18:09:31 +0900964 if (ret < 0)
965 return ret;
966
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300967 evlist__for_each_entry(evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +0900968 if (perf_evsel__is_group_leader(evsel) &&
969 evsel->nr_members > 1) {
970 const char *name = evsel->group_name ?: "{anon_group}";
971 u32 leader_idx = evsel->idx;
972 u32 nr_members = evsel->nr_members;
973
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700974 ret = do_write_string(ff, name);
Namhyung Kima8bb5592013-01-22 18:09:31 +0900975 if (ret < 0)
976 return ret;
977
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700978 ret = do_write(ff, &leader_idx, sizeof(leader_idx));
Namhyung Kima8bb5592013-01-22 18:09:31 +0900979 if (ret < 0)
980 return ret;
981
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -0700982 ret = do_write(ff, &nr_members, sizeof(nr_members));
Namhyung Kima8bb5592013-01-22 18:09:31 +0900983 if (ret < 0)
984 return ret;
985 }
986 }
987 return 0;
988}
989
990/*
Kan Liangf4a07422018-11-21 08:49:39 -0800991 * Return the CPU id as a raw string.
992 *
993 * Each architecture should provide a more precise id string that
994 * can be use to match the architecture's "mapfile".
995 */
996char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
997{
998 return NULL;
999}
1000
1001/* Return zero when the cpuid from the mapfile.csv matches the
1002 * cpuid string generated on this platform.
1003 * Otherwise return non-zero.
1004 */
1005int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
1006{
1007 regex_t re;
1008 regmatch_t pmatch[1];
1009 int match;
1010
1011 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
1012 /* Warn unable to generate match particular string. */
1013 pr_info("Invalid regular expression %s\n", mapcpuid);
1014 return 1;
1015 }
1016
1017 match = !regexec(&re, cpuid, 1, pmatch, 0);
1018 regfree(&re);
1019 if (match) {
1020 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
1021
1022 /* Verify the entire string matched. */
1023 if (match_len == strlen(cpuid))
1024 return 0;
1025 }
1026 return 1;
1027}
1028
1029/*
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001030 * default get_cpuid(): nothing gets recorded
Jiada Wang7a759cd2017-04-09 20:02:37 -07001031 * actual implementation must be in arch/$(SRCARCH)/util/header.c
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001032 */
Rui Teng11d8f872016-07-28 10:05:57 +08001033int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001034{
1035 return -1;
1036}
1037
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001038static int write_cpuid(struct feat_fd *ff,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001039 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001040{
1041 char buffer[64];
1042 int ret;
1043
1044 ret = get_cpuid(buffer, sizeof(buffer));
Jiri Olsaa9aeb872019-02-13 13:32:43 +01001045 if (ret)
1046 return -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001047
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001048 return do_write_string(ff, buffer);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001049}
1050
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001051static int write_branch_stack(struct feat_fd *ff __maybe_unused,
1052 struct perf_evlist *evlist __maybe_unused)
Stephane Eranian330aa672012-03-08 23:47:46 +01001053{
1054 return 0;
1055}
1056
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001057static int write_auxtrace(struct feat_fd *ff,
Adrian Hunter4025ea42015-04-09 18:53:41 +03001058 struct perf_evlist *evlist __maybe_unused)
1059{
Adrian Hunter99fa2982015-04-30 17:37:25 +03001060 struct perf_session *session;
1061 int err;
1062
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -07001063 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
1064 return -1;
1065
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001066 session = container_of(ff->ph, struct perf_session, header);
Adrian Hunter99fa2982015-04-30 17:37:25 +03001067
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001068 err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
Adrian Hunter99fa2982015-04-30 17:37:25 +03001069 if (err < 0)
1070 pr_err("Failed to write auxtrace index\n");
1071 return err;
Adrian Hunter4025ea42015-04-09 18:53:41 +03001072}
1073
Alexey Budankovcf790512018-10-09 17:36:24 +03001074static int write_clockid(struct feat_fd *ff,
1075 struct perf_evlist *evlist __maybe_unused)
1076{
1077 return do_write(ff, &ff->ph->env.clockid_res_ns,
1078 sizeof(ff->ph->env.clockid_res_ns));
1079}
1080
Jiri Olsa720e98b2016-02-16 16:01:43 +01001081static int cpu_cache_level__sort(const void *a, const void *b)
1082{
1083 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
1084 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
1085
1086 return cache_a->level - cache_b->level;
1087}
1088
1089static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
1090{
1091 if (a->level != b->level)
1092 return false;
1093
1094 if (a->line_size != b->line_size)
1095 return false;
1096
1097 if (a->sets != b->sets)
1098 return false;
1099
1100 if (a->ways != b->ways)
1101 return false;
1102
1103 if (strcmp(a->type, b->type))
1104 return false;
1105
1106 if (strcmp(a->size, b->size))
1107 return false;
1108
1109 if (strcmp(a->map, b->map))
1110 return false;
1111
1112 return true;
1113}
1114
1115static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1116{
1117 char path[PATH_MAX], file[PATH_MAX];
1118 struct stat st;
1119 size_t len;
1120
1121 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1122 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1123
1124 if (stat(file, &st))
1125 return 1;
1126
1127 scnprintf(file, PATH_MAX, "%s/level", path);
1128 if (sysfs__read_int(file, (int *) &cache->level))
1129 return -1;
1130
1131 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1132 if (sysfs__read_int(file, (int *) &cache->line_size))
1133 return -1;
1134
1135 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1136 if (sysfs__read_int(file, (int *) &cache->sets))
1137 return -1;
1138
1139 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1140 if (sysfs__read_int(file, (int *) &cache->ways))
1141 return -1;
1142
1143 scnprintf(file, PATH_MAX, "%s/type", path);
1144 if (sysfs__read_str(file, &cache->type, &len))
1145 return -1;
1146
1147 cache->type[len] = 0;
1148 cache->type = rtrim(cache->type);
1149
1150 scnprintf(file, PATH_MAX, "%s/size", path);
1151 if (sysfs__read_str(file, &cache->size, &len)) {
1152 free(cache->type);
1153 return -1;
1154 }
1155
1156 cache->size[len] = 0;
1157 cache->size = rtrim(cache->size);
1158
1159 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1160 if (sysfs__read_str(file, &cache->map, &len)) {
1161 free(cache->map);
1162 free(cache->type);
1163 return -1;
1164 }
1165
1166 cache->map[len] = 0;
1167 cache->map = rtrim(cache->map);
1168 return 0;
1169}
1170
1171static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1172{
1173 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1174}
1175
1176static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
1177{
1178 u32 i, cnt = 0;
1179 long ncpus;
1180 u32 nr, cpu;
1181 u16 level;
1182
1183 ncpus = sysconf(_SC_NPROCESSORS_CONF);
1184 if (ncpus < 0)
1185 return -1;
1186
1187 nr = (u32)(ncpus & UINT_MAX);
1188
1189 for (cpu = 0; cpu < nr; cpu++) {
1190 for (level = 0; level < 10; level++) {
1191 struct cpu_cache_level c;
1192 int err;
1193
1194 err = cpu_cache_level__read(&c, cpu, level);
1195 if (err < 0)
1196 return err;
1197
1198 if (err == 1)
1199 break;
1200
1201 for (i = 0; i < cnt; i++) {
1202 if (cpu_cache_level__cmp(&c, &caches[i]))
1203 break;
1204 }
1205
1206 if (i == cnt)
1207 caches[cnt++] = c;
1208 else
1209 cpu_cache_level__free(&c);
1210
1211 if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1212 goto out;
1213 }
1214 }
1215 out:
1216 *cntp = cnt;
1217 return 0;
1218}
1219
1220#define MAX_CACHES 2000
1221
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001222static int write_cache(struct feat_fd *ff,
1223 struct perf_evlist *evlist __maybe_unused)
Jiri Olsa720e98b2016-02-16 16:01:43 +01001224{
1225 struct cpu_cache_level caches[MAX_CACHES];
1226 u32 cnt = 0, i, version = 1;
1227 int ret;
1228
1229 ret = build_caches(caches, MAX_CACHES, &cnt);
1230 if (ret)
1231 goto out;
1232
1233 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1234
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001235 ret = do_write(ff, &version, sizeof(u32));
Jiri Olsa720e98b2016-02-16 16:01:43 +01001236 if (ret < 0)
1237 goto out;
1238
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001239 ret = do_write(ff, &cnt, sizeof(u32));
Jiri Olsa720e98b2016-02-16 16:01:43 +01001240 if (ret < 0)
1241 goto out;
1242
1243 for (i = 0; i < cnt; i++) {
1244 struct cpu_cache_level *c = &caches[i];
1245
1246 #define _W(v) \
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001247 ret = do_write(ff, &c->v, sizeof(u32)); \
Jiri Olsa720e98b2016-02-16 16:01:43 +01001248 if (ret < 0) \
1249 goto out;
1250
1251 _W(level)
1252 _W(line_size)
1253 _W(sets)
1254 _W(ways)
1255 #undef _W
1256
1257 #define _W(v) \
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001258 ret = do_write_string(ff, (const char *) c->v); \
Jiri Olsa720e98b2016-02-16 16:01:43 +01001259 if (ret < 0) \
1260 goto out;
1261
1262 _W(type)
1263 _W(size)
1264 _W(map)
1265 #undef _W
1266 }
1267
1268out:
1269 for (i = 0; i < cnt; i++)
1270 cpu_cache_level__free(&caches[i]);
1271 return ret;
1272}
1273
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07001274static int write_stat(struct feat_fd *ff __maybe_unused,
Jiri Olsaffa517a2015-10-25 15:51:43 +01001275 struct perf_evlist *evlist __maybe_unused)
1276{
1277 return 0;
1278}
1279
Jin Yao60115182017-12-08 21:13:41 +08001280static int write_sample_time(struct feat_fd *ff,
1281 struct perf_evlist *evlist)
1282{
1283 int ret;
1284
1285 ret = do_write(ff, &evlist->first_sample_time,
1286 sizeof(evlist->first_sample_time));
1287 if (ret < 0)
1288 return ret;
1289
1290 return do_write(ff, &evlist->last_sample_time,
1291 sizeof(evlist->last_sample_time));
1292}
1293
Jiri Olsae2091ce2018-03-07 16:50:08 +01001294
1295static int memory_node__read(struct memory_node *n, unsigned long idx)
1296{
1297 unsigned int phys, size = 0;
1298 char path[PATH_MAX];
1299 struct dirent *ent;
1300 DIR *dir;
1301
1302#define for_each_memory(mem, dir) \
1303 while ((ent = readdir(dir))) \
1304 if (strcmp(ent->d_name, ".") && \
1305 strcmp(ent->d_name, "..") && \
1306 sscanf(ent->d_name, "memory%u", &mem) == 1)
1307
1308 scnprintf(path, PATH_MAX,
1309 "%s/devices/system/node/node%lu",
1310 sysfs__mountpoint(), idx);
1311
1312 dir = opendir(path);
1313 if (!dir) {
1314 pr_warning("failed: cant' open memory sysfs data\n");
1315 return -1;
1316 }
1317
1318 for_each_memory(phys, dir) {
1319 size = max(phys, size);
1320 }
1321
1322 size++;
1323
1324 n->set = bitmap_alloc(size);
1325 if (!n->set) {
1326 closedir(dir);
1327 return -ENOMEM;
1328 }
1329
Jiri Olsae2091ce2018-03-07 16:50:08 +01001330 n->node = idx;
1331 n->size = size;
1332
1333 rewinddir(dir);
1334
1335 for_each_memory(phys, dir) {
1336 set_bit(phys, n->set);
1337 }
1338
1339 closedir(dir);
1340 return 0;
1341}
1342
1343static int memory_node__sort(const void *a, const void *b)
1344{
1345 const struct memory_node *na = a;
1346 const struct memory_node *nb = b;
1347
1348 return na->node - nb->node;
1349}
1350
1351static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
1352{
1353 char path[PATH_MAX];
1354 struct dirent *ent;
1355 DIR *dir;
1356 u64 cnt = 0;
1357 int ret = 0;
1358
1359 scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1360 sysfs__mountpoint());
1361
1362 dir = opendir(path);
1363 if (!dir) {
Thomas Richter4f75f1cb2018-04-12 15:32:46 +02001364 pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1365 __func__, path);
Jiri Olsae2091ce2018-03-07 16:50:08 +01001366 return -1;
1367 }
1368
1369 while (!ret && (ent = readdir(dir))) {
1370 unsigned int idx;
1371 int r;
1372
1373 if (!strcmp(ent->d_name, ".") ||
1374 !strcmp(ent->d_name, ".."))
1375 continue;
1376
1377 r = sscanf(ent->d_name, "node%u", &idx);
1378 if (r != 1)
1379 continue;
1380
1381 if (WARN_ONCE(cnt >= size,
1382 "failed to write MEM_TOPOLOGY, way too many nodes\n"))
1383 return -1;
1384
1385 ret = memory_node__read(&nodes[cnt++], idx);
1386 }
1387
1388 *cntp = cnt;
1389 closedir(dir);
1390
1391 if (!ret)
1392 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1393
1394 return ret;
1395}
1396
1397#define MAX_MEMORY_NODES 2000
1398
1399/*
1400 * The MEM_TOPOLOGY holds physical memory map for every
1401 * node in system. The format of data is as follows:
1402 *
1403 * 0 - version | for future changes
1404 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1405 * 16 - count | number of nodes
1406 *
1407 * For each node we store map of physical indexes for
1408 * each node:
1409 *
1410 * 32 - node id | node index
1411 * 40 - size | size of bitmap
1412 * 48 - bitmap | bitmap of memory indexes that belongs to node
1413 */
1414static int write_mem_topology(struct feat_fd *ff __maybe_unused,
1415 struct perf_evlist *evlist __maybe_unused)
1416{
1417 static struct memory_node nodes[MAX_MEMORY_NODES];
1418 u64 bsize, version = 1, i, nr;
1419 int ret;
1420
1421 ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1422 (unsigned long long *) &bsize);
1423 if (ret)
1424 return ret;
1425
1426 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
1427 if (ret)
1428 return ret;
1429
1430 ret = do_write(ff, &version, sizeof(version));
1431 if (ret < 0)
1432 goto out;
1433
1434 ret = do_write(ff, &bsize, sizeof(bsize));
1435 if (ret < 0)
1436 goto out;
1437
1438 ret = do_write(ff, &nr, sizeof(nr));
1439 if (ret < 0)
1440 goto out;
1441
1442 for (i = 0; i < nr; i++) {
1443 struct memory_node *n = &nodes[i];
1444
1445 #define _W(v) \
1446 ret = do_write(ff, &n->v, sizeof(n->v)); \
1447 if (ret < 0) \
1448 goto out;
1449
1450 _W(node)
1451 _W(size)
1452
1453 #undef _W
1454
1455 ret = do_write_bitmap(ff, n->set, n->size);
1456 if (ret < 0)
1457 goto out;
1458 }
1459
1460out:
1461 return ret;
1462}
1463
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001464static void print_hostname(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001465{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001466 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001467}
1468
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001469static void print_osrelease(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001470{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001471 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001472}
1473
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001474static void print_arch(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001475{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001476 fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001477}
1478
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001479static void print_cpudesc(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001480{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001481 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001482}
1483
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001484static void print_nrcpus(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001485{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001486 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1487 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001488}
1489
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001490static void print_version(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001491{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001492 fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001493}
1494
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001495static void print_cmdline(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001496{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001497 int nr, i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001498
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001499 nr = ff->ph->env.nr_cmdline;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001500
1501 fprintf(fp, "# cmdline : ");
1502
Alexey Budankovf92da712018-06-04 09:50:56 +03001503 for (i = 0; i < nr; i++) {
1504 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1505 if (!argv_i) {
1506 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1507 } else {
1508 char *mem = argv_i;
1509 do {
1510 char *quote = strchr(argv_i, '\'');
1511 if (!quote)
1512 break;
1513 *quote++ = '\0';
1514 fprintf(fp, "%s\\\'", argv_i);
1515 argv_i = quote;
1516 } while (1);
1517 fprintf(fp, "%s ", argv_i);
1518 free(mem);
1519 }
1520 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001521 fputc('\n', fp);
1522}
1523
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001524static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001525{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001526 struct perf_header *ph = ff->ph;
1527 int cpu_nr = ph->env.nr_cpus_avail;
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001528 int nr, i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001529 char *str;
1530
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001531 nr = ph->env.nr_sibling_cores;
1532 str = ph->env.sibling_cores;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001533
1534 for (i = 0; i < nr; i++) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001535 fprintf(fp, "# sibling cores : %s\n", str);
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001536 str += strlen(str) + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001537 }
1538
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001539 nr = ph->env.nr_sibling_threads;
1540 str = ph->env.sibling_threads;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001541
1542 for (i = 0; i < nr; i++) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001543 fprintf(fp, "# sibling threads : %s\n", str);
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001544 str += strlen(str) + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001545 }
Kan Liang2bb00d22015-09-01 09:58:12 -04001546
1547 if (ph->env.cpu != NULL) {
1548 for (i = 0; i < cpu_nr; i++)
1549 fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1550 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1551 } else
1552 fprintf(fp, "# Core ID and Socket ID information is not available\n");
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001553}
1554
Alexey Budankovcf790512018-10-09 17:36:24 +03001555static void print_clockid(struct feat_fd *ff, FILE *fp)
1556{
1557 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1558 ff->ph->env.clockid_res_ns * 1000);
1559}
1560
Robert Richter4e1b9c62012-08-16 21:10:22 +02001561static void free_event_desc(struct perf_evsel *events)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001562{
Robert Richter4e1b9c62012-08-16 21:10:22 +02001563 struct perf_evsel *evsel;
1564
1565 if (!events)
1566 return;
1567
1568 for (evsel = events; evsel->attr.size; evsel++) {
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -03001569 zfree(&evsel->name);
1570 zfree(&evsel->id);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001571 }
1572
1573 free(events);
1574}
1575
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001576static struct perf_evsel *read_event_desc(struct feat_fd *ff)
Robert Richter4e1b9c62012-08-16 21:10:22 +02001577{
1578 struct perf_evsel *evsel, *events = NULL;
1579 u64 *id;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001580 void *buf = NULL;
Stephane Eranian62db9062012-02-09 23:21:07 +01001581 u32 nre, sz, nr, i, j;
Stephane Eranian62db9062012-02-09 23:21:07 +01001582 size_t msz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001583
1584 /* number of events */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001585 if (do_read_u32(ff, &nre))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001586 goto error;
1587
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001588 if (do_read_u32(ff, &sz))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001589 goto error;
1590
Stephane Eranian62db9062012-02-09 23:21:07 +01001591 /* buffer to hold on file attr struct */
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001592 buf = malloc(sz);
1593 if (!buf)
1594 goto error;
1595
Robert Richter4e1b9c62012-08-16 21:10:22 +02001596 /* the last event terminates with evsel->attr.size == 0: */
1597 events = calloc(nre + 1, sizeof(*events));
1598 if (!events)
1599 goto error;
1600
1601 msz = sizeof(evsel->attr);
Jiri Olsa9fafd982012-03-20 19:15:39 +01001602 if (sz < msz)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001603 msz = sz;
1604
Robert Richter4e1b9c62012-08-16 21:10:22 +02001605 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1606 evsel->idx = i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001607
Stephane Eranian62db9062012-02-09 23:21:07 +01001608 /*
1609 * must read entire on-file attr struct to
1610 * sync up with layout.
1611 */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001612 if (__do_read(ff, buf, sz))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001613 goto error;
1614
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001615 if (ff->ph->needs_swap)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001616 perf_event__attr_swap(buf);
1617
Robert Richter4e1b9c62012-08-16 21:10:22 +02001618 memcpy(&evsel->attr, buf, msz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001619
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001620 if (do_read_u32(ff, &nr))
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001621 goto error;
1622
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001623 if (ff->ph->needs_swap)
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001624 evsel->needs_swap = true;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001625
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001626 evsel->name = do_read_string(ff);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07001627 if (!evsel->name)
1628 goto error;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001629
1630 if (!nr)
1631 continue;
1632
1633 id = calloc(nr, sizeof(*id));
1634 if (!id)
1635 goto error;
1636 evsel->ids = nr;
1637 evsel->id = id;
1638
1639 for (j = 0 ; j < nr; j++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07001640 if (do_read_u64(ff, id))
Robert Richter4e1b9c62012-08-16 21:10:22 +02001641 goto error;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001642 id++;
1643 }
1644 }
1645out:
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -03001646 free(buf);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001647 return events;
1648error:
Markus Elfring4cc97612015-06-25 17:12:32 +02001649 free_event_desc(events);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001650 events = NULL;
1651 goto out;
1652}
1653
Peter Zijlstra2c5e8c52015-04-07 11:09:54 +02001654static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
Arnaldo Carvalho de Melo03536312017-06-16 12:18:27 -03001655 void *priv __maybe_unused)
Peter Zijlstra2c5e8c52015-04-07 11:09:54 +02001656{
1657 return fprintf(fp, ", %s = %s", name, val);
1658}
1659
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001660static void print_event_desc(struct feat_fd *ff, FILE *fp)
Robert Richter4e1b9c62012-08-16 21:10:22 +02001661{
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07001662 struct perf_evsel *evsel, *events;
Robert Richter4e1b9c62012-08-16 21:10:22 +02001663 u32 j;
1664 u64 *id;
1665
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07001666 if (ff->events)
1667 events = ff->events;
1668 else
1669 events = read_event_desc(ff);
1670
Robert Richter4e1b9c62012-08-16 21:10:22 +02001671 if (!events) {
1672 fprintf(fp, "# event desc: not available or unable to read\n");
1673 return;
1674 }
1675
1676 for (evsel = events; evsel->attr.size; evsel++) {
1677 fprintf(fp, "# event : name = %s, ", evsel->name);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001678
Robert Richter4e1b9c62012-08-16 21:10:22 +02001679 if (evsel->ids) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001680 fprintf(fp, ", id = {");
Robert Richter4e1b9c62012-08-16 21:10:22 +02001681 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1682 if (j)
1683 fputc(',', fp);
1684 fprintf(fp, " %"PRIu64, *id);
1685 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001686 fprintf(fp, " }");
Robert Richter4e1b9c62012-08-16 21:10:22 +02001687 }
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001688
Peter Zijlstra2c5e8c52015-04-07 11:09:54 +02001689 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001690
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001691 fputc('\n', fp);
1692 }
Robert Richter4e1b9c62012-08-16 21:10:22 +02001693
1694 free_event_desc(events);
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07001695 ff->events = NULL;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001696}
1697
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001698static void print_total_mem(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001699{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001700 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001701}
1702
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001703static void print_numa_topology(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001704{
Jiri Olsac60da222016-07-04 14:16:20 +02001705 int i;
1706 struct numa_node *n;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001707
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001708 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1709 n = &ff->ph->env.numa_nodes[i];
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001710
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001711 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1712 " free = %"PRIu64" kB\n",
Jiri Olsac60da222016-07-04 14:16:20 +02001713 n->node, n->mem_total, n->mem_free);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001714
Jiri Olsac60da222016-07-04 14:16:20 +02001715 fprintf(fp, "# node%u cpu list : ", n->node);
1716 cpu_map__fprintf(n->map, fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001717 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001718}
1719
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001720static void print_cpuid(struct feat_fd *ff, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001721{
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001722 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001723}
1724
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001725static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
Stephane Eranian330aa672012-03-08 23:47:46 +01001726{
1727 fprintf(fp, "# contains samples with branch stack\n");
1728}
1729
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001730static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
Adrian Hunter4025ea42015-04-09 18:53:41 +03001731{
1732 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1733}
1734
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001735static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
Jiri Olsaffa517a2015-10-25 15:51:43 +01001736{
1737 fprintf(fp, "# contains stat data\n");
1738}
1739
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001740static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
Jiri Olsa720e98b2016-02-16 16:01:43 +01001741{
1742 int i;
1743
1744 fprintf(fp, "# CPU cache info:\n");
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001745 for (i = 0; i < ff->ph->env.caches_cnt; i++) {
Jiri Olsa720e98b2016-02-16 16:01:43 +01001746 fprintf(fp, "# ");
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001747 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
Jiri Olsa720e98b2016-02-16 16:01:43 +01001748 }
1749}
1750
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001751static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
Robert Richter50a96672012-08-16 21:10:24 +02001752{
1753 const char *delimiter = "# pmu mappings: ";
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001754 char *str, *tmp;
Robert Richter50a96672012-08-16 21:10:24 +02001755 u32 pmu_num;
1756 u32 type;
1757
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001758 pmu_num = ff->ph->env.nr_pmu_mappings;
Robert Richter50a96672012-08-16 21:10:24 +02001759 if (!pmu_num) {
1760 fprintf(fp, "# pmu mappings: not available\n");
1761 return;
1762 }
1763
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001764 str = ff->ph->env.pmu_mappings;
Namhyung Kimbe4a2de2012-09-05 14:02:49 +09001765
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001766 while (pmu_num) {
1767 type = strtoul(str, &tmp, 0);
1768 if (*tmp != ':')
1769 goto error;
1770
1771 str = tmp + 1;
1772 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1773
Robert Richter50a96672012-08-16 21:10:24 +02001774 delimiter = ", ";
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001775 str += strlen(str) + 1;
1776 pmu_num--;
Robert Richter50a96672012-08-16 21:10:24 +02001777 }
1778
1779 fprintf(fp, "\n");
1780
1781 if (!pmu_num)
1782 return;
1783error:
1784 fprintf(fp, "# pmu mappings: unable to read\n");
1785}
1786
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001787static void print_group_desc(struct feat_fd *ff, FILE *fp)
Namhyung Kima8bb5592013-01-22 18:09:31 +09001788{
1789 struct perf_session *session;
1790 struct perf_evsel *evsel;
1791 u32 nr = 0;
1792
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07001793 session = container_of(ff->ph, struct perf_session, header);
Namhyung Kima8bb5592013-01-22 18:09:31 +09001794
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001795 evlist__for_each_entry(session->evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09001796 if (perf_evsel__is_group_leader(evsel) &&
1797 evsel->nr_members > 1) {
1798 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1799 perf_evsel__name(evsel));
1800
1801 nr = evsel->nr_members - 1;
1802 } else if (nr) {
1803 fprintf(fp, ",%s", perf_evsel__name(evsel));
1804
1805 if (--nr == 0)
1806 fprintf(fp, "}\n");
1807 }
1808 }
1809}
1810
Jin Yao60115182017-12-08 21:13:41 +08001811static void print_sample_time(struct feat_fd *ff, FILE *fp)
1812{
1813 struct perf_session *session;
1814 char time_buf[32];
1815 double d;
1816
1817 session = container_of(ff->ph, struct perf_session, header);
1818
1819 timestamp__scnprintf_usec(session->evlist->first_sample_time,
1820 time_buf, sizeof(time_buf));
1821 fprintf(fp, "# time of first sample : %s\n", time_buf);
1822
1823 timestamp__scnprintf_usec(session->evlist->last_sample_time,
1824 time_buf, sizeof(time_buf));
1825 fprintf(fp, "# time of last sample : %s\n", time_buf);
1826
1827 d = (double)(session->evlist->last_sample_time -
1828 session->evlist->first_sample_time) / NSEC_PER_MSEC;
1829
1830 fprintf(fp, "# sample duration : %10.3f ms\n", d);
1831}
1832
Jiri Olsae2091ce2018-03-07 16:50:08 +01001833static void memory_node__fprintf(struct memory_node *n,
1834 unsigned long long bsize, FILE *fp)
1835{
1836 char buf_map[100], buf_size[50];
1837 unsigned long long size;
1838
1839 size = bsize * bitmap_weight(n->set, n->size);
1840 unit_number__scnprintf(buf_size, 50, size);
1841
1842 bitmap_scnprintf(n->set, n->size, buf_map, 100);
1843 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
1844}
1845
1846static void print_mem_topology(struct feat_fd *ff, FILE *fp)
1847{
1848 struct memory_node *nodes;
1849 int i, nr;
1850
1851 nodes = ff->ph->env.memory_nodes;
1852 nr = ff->ph->env.nr_memory_nodes;
1853
1854 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
1855 nr, ff->ph->env.memory_bsize);
1856
1857 for (i = 0; i < nr; i++) {
1858 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
1859 }
1860}
1861
Robert Richter08d95bd2012-02-10 15:41:55 +01001862static int __event_process_build_id(struct build_id_event *bev,
1863 char *filename,
1864 struct perf_session *session)
1865{
1866 int err = -1;
Robert Richter08d95bd2012-02-10 15:41:55 +01001867 struct machine *machine;
Wang Nan1f121b02015-06-03 08:52:21 +00001868 u16 cpumode;
Robert Richter08d95bd2012-02-10 15:41:55 +01001869 struct dso *dso;
1870 enum dso_kernel_type dso_type;
1871
1872 machine = perf_session__findnew_machine(session, bev->pid);
1873 if (!machine)
1874 goto out;
1875
Wang Nan1f121b02015-06-03 08:52:21 +00001876 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
Robert Richter08d95bd2012-02-10 15:41:55 +01001877
Wang Nan1f121b02015-06-03 08:52:21 +00001878 switch (cpumode) {
Robert Richter08d95bd2012-02-10 15:41:55 +01001879 case PERF_RECORD_MISC_KERNEL:
1880 dso_type = DSO_TYPE_KERNEL;
Robert Richter08d95bd2012-02-10 15:41:55 +01001881 break;
1882 case PERF_RECORD_MISC_GUEST_KERNEL:
1883 dso_type = DSO_TYPE_GUEST_KERNEL;
Robert Richter08d95bd2012-02-10 15:41:55 +01001884 break;
1885 case PERF_RECORD_MISC_USER:
1886 case PERF_RECORD_MISC_GUEST_USER:
1887 dso_type = DSO_TYPE_USER;
Robert Richter08d95bd2012-02-10 15:41:55 +01001888 break;
1889 default:
1890 goto out;
1891 }
1892
Arnaldo Carvalho de Meloaa7cc2a2015-05-29 11:31:12 -03001893 dso = machine__findnew_dso(machine, filename);
Robert Richter08d95bd2012-02-10 15:41:55 +01001894 if (dso != NULL) {
Masami Hiramatsub5d8bbe2016-05-11 22:51:59 +09001895 char sbuild_id[SBUILD_ID_SIZE];
Robert Richter08d95bd2012-02-10 15:41:55 +01001896
1897 dso__set_build_id(dso, &bev->build_id);
1898
Namhyung Kim1deec1b2017-05-31 21:01:03 +09001899 if (dso_type != DSO_TYPE_USER) {
1900 struct kmod_path m = { .name = NULL, };
1901
1902 if (!kmod_path__parse_name(&m, filename) && m.kmod)
Namhyung Kim6b335e82017-05-31 21:01:04 +09001903 dso__set_module_info(dso, &m, machine);
Namhyung Kim1deec1b2017-05-31 21:01:03 +09001904 else
1905 dso->kernel = dso_type;
1906
1907 free(m.name);
1908 }
Robert Richter08d95bd2012-02-10 15:41:55 +01001909
1910 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1911 sbuild_id);
1912 pr_debug("build id event received for %s: %s\n",
1913 dso->long_name, sbuild_id);
Arnaldo Carvalho de Melod3a7c482015-06-02 11:53:26 -03001914 dso__put(dso);
Robert Richter08d95bd2012-02-10 15:41:55 +01001915 }
1916
1917 err = 0;
1918out:
1919 return err;
1920}
1921
1922static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1923 int input, u64 offset, u64 size)
1924{
1925 struct perf_session *session = container_of(header, struct perf_session, header);
1926 struct {
1927 struct perf_event_header header;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03001928 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
Robert Richter08d95bd2012-02-10 15:41:55 +01001929 char filename[0];
1930 } old_bev;
1931 struct build_id_event bev;
1932 char filename[PATH_MAX];
1933 u64 limit = offset + size;
1934
1935 while (offset < limit) {
1936 ssize_t len;
1937
Namhyung Kim5323f602012-12-17 15:38:54 +09001938 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
Robert Richter08d95bd2012-02-10 15:41:55 +01001939 return -1;
1940
1941 if (header->needs_swap)
1942 perf_event_header__bswap(&old_bev.header);
1943
1944 len = old_bev.header.size - sizeof(old_bev);
Namhyung Kim5323f602012-12-17 15:38:54 +09001945 if (readn(input, filename, len) != len)
Robert Richter08d95bd2012-02-10 15:41:55 +01001946 return -1;
1947
1948 bev.header = old_bev.header;
1949
1950 /*
1951 * As the pid is the missing value, we need to fill
1952 * it properly. The header.misc value give us nice hint.
1953 */
1954 bev.pid = HOST_KERNEL_ID;
1955 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1956 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1957 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1958
1959 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1960 __event_process_build_id(&bev, filename, session);
1961
1962 offset += bev.header.size;
1963 }
1964
1965 return 0;
1966}
1967
1968static int perf_header__read_build_ids(struct perf_header *header,
1969 int input, u64 offset, u64 size)
1970{
1971 struct perf_session *session = container_of(header, struct perf_session, header);
1972 struct build_id_event bev;
1973 char filename[PATH_MAX];
1974 u64 limit = offset + size, orig_offset = offset;
1975 int err = -1;
1976
1977 while (offset < limit) {
1978 ssize_t len;
1979
Namhyung Kim5323f602012-12-17 15:38:54 +09001980 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
Robert Richter08d95bd2012-02-10 15:41:55 +01001981 goto out;
1982
1983 if (header->needs_swap)
1984 perf_event_header__bswap(&bev.header);
1985
1986 len = bev.header.size - sizeof(bev);
Namhyung Kim5323f602012-12-17 15:38:54 +09001987 if (readn(input, filename, len) != len)
Robert Richter08d95bd2012-02-10 15:41:55 +01001988 goto out;
1989 /*
1990 * The a1645ce1 changeset:
1991 *
1992 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1993 *
1994 * Added a field to struct build_id_event that broke the file
1995 * format.
1996 *
1997 * Since the kernel build-id is the first entry, process the
1998 * table using the old format if the well known
1999 * '[kernel.kallsyms]' string for the kernel build-id has the
2000 * first 4 characters chopped off (where the pid_t sits).
2001 */
2002 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
2003 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
2004 return -1;
2005 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
2006 }
2007
2008 __event_process_build_id(&bev, filename, session);
2009
2010 offset += bev.header.size;
2011 }
2012 err = 0;
2013out:
2014 return err;
2015}
2016
David Carrillo-Cisnerosdfaa1582017-07-17 21:25:35 -07002017/* Macro for features that simply need to read and store a string. */
2018#define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002019static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
David Carrillo-Cisnerosdfaa1582017-07-17 21:25:35 -07002020{\
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002021 ff->ph->env.__feat_env = do_read_string(ff); \
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002022 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
David Carrillo-Cisnerosdfaa1582017-07-17 21:25:35 -07002023}
2024
2025FEAT_PROCESS_STR_FUN(hostname, hostname);
2026FEAT_PROCESS_STR_FUN(osrelease, os_release);
2027FEAT_PROCESS_STR_FUN(version, version);
2028FEAT_PROCESS_STR_FUN(arch, arch);
2029FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
2030FEAT_PROCESS_STR_FUN(cpuid, cpuid);
2031
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002032static int process_tracing_data(struct feat_fd *ff, void *data)
Robert Richterf1c67db2012-02-10 15:41:56 +01002033{
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002034 ssize_t ret = trace_report(ff->fd, data, false);
2035
Namhyung Kim3dce2ce2013-03-21 16:18:48 +09002036 return ret < 0 ? -1 : 0;
Robert Richterf1c67db2012-02-10 15:41:56 +01002037}
2038
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002039static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
Robert Richterf1c67db2012-02-10 15:41:56 +01002040{
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002041 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
Robert Richterf1c67db2012-02-10 15:41:56 +01002042 pr_debug("Failed to read buildids, continuing...\n");
2043 return 0;
2044}
2045
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002046static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002047{
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002048 int ret;
2049 u32 nr_cpus_avail, nr_cpus_online;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002050
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002051 ret = do_read_u32(ff, &nr_cpus_avail);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002052 if (ret)
2053 return ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002054
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002055 ret = do_read_u32(ff, &nr_cpus_online);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002056 if (ret)
2057 return ret;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002058 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
2059 ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002060 return 0;
2061}
2062
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002063static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002064{
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002065 u64 total_mem;
2066 int ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002067
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002068 ret = do_read_u64(ff, &total_mem);
David Carrillo-Cisneros6200e4942017-07-17 21:25:34 -07002069 if (ret)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002070 return -1;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002071 ff->ph->env.total_mem = (unsigned long long)total_mem;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002072 return 0;
2073}
2074
Robert Richter7c2f7af2012-08-16 21:10:23 +02002075static struct perf_evsel *
2076perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
2077{
2078 struct perf_evsel *evsel;
2079
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002080 evlist__for_each_entry(evlist, evsel) {
Robert Richter7c2f7af2012-08-16 21:10:23 +02002081 if (evsel->idx == idx)
2082 return evsel;
2083 }
2084
2085 return NULL;
2086}
2087
2088static void
Namhyung Kim3d7eb862012-09-24 17:15:01 +09002089perf_evlist__set_event_name(struct perf_evlist *evlist,
2090 struct perf_evsel *event)
Robert Richter7c2f7af2012-08-16 21:10:23 +02002091{
2092 struct perf_evsel *evsel;
2093
2094 if (!event->name)
2095 return;
2096
2097 evsel = perf_evlist__find_by_index(evlist, event->idx);
2098 if (!evsel)
2099 return;
2100
2101 if (evsel->name)
2102 return;
2103
2104 evsel->name = strdup(event->name);
2105}
2106
2107static int
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002108process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
Robert Richter7c2f7af2012-08-16 21:10:23 +02002109{
Namhyung Kim3d7eb862012-09-24 17:15:01 +09002110 struct perf_session *session;
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002111 struct perf_evsel *evsel, *events = read_event_desc(ff);
Robert Richter7c2f7af2012-08-16 21:10:23 +02002112
2113 if (!events)
2114 return 0;
2115
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002116 session = container_of(ff->ph, struct perf_session, header);
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07002117
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002118 if (session->data->is_pipe) {
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07002119 /* Save events for reading later by print_event_desc,
2120 * since they can't be read again in pipe mode. */
2121 ff->events = events;
2122 }
2123
Robert Richter7c2f7af2012-08-16 21:10:23 +02002124 for (evsel = events; evsel->attr.size; evsel++)
2125 perf_evlist__set_event_name(session->evlist, evsel);
2126
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002127 if (!session->data->is_pipe)
David Carrillo-Cisnerosf9ebdcc2017-07-17 21:25:49 -07002128 free_event_desc(events);
Robert Richter7c2f7af2012-08-16 21:10:23 +02002129
2130 return 0;
2131}
2132
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002133static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002134{
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002135 char *str, *cmdline = NULL, **argv = NULL;
2136 u32 nr, i, len = 0;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002137
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002138 if (do_read_u32(ff, &nr))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002139 return -1;
2140
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002141 ff->ph->env.nr_cmdline = nr;
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002142
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002143 cmdline = zalloc(ff->size + nr + 1);
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002144 if (!cmdline)
2145 return -1;
2146
2147 argv = zalloc(sizeof(char *) * (nr + 1));
2148 if (!argv)
2149 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002150
2151 for (i = 0; i < nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002152 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002153 if (!str)
2154 goto error;
2155
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002156 argv[i] = cmdline + len;
2157 memcpy(argv[i], str, strlen(str) + 1);
2158 len += strlen(str) + 1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002159 free(str);
2160 }
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002161 ff->ph->env.cmdline = cmdline;
2162 ff->ph->env.cmdline_argv = (const char **) argv;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002163 return 0;
2164
2165error:
Jiri Olsa768dd3f2015-07-21 14:31:31 +02002166 free(argv);
2167 free(cmdline);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002168 return -1;
2169}
2170
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002171static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002172{
Namhyung Kima1ae5652012-09-24 17:14:59 +09002173 u32 nr, i;
2174 char *str;
2175 struct strbuf sb;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002176 int cpu_nr = ff->ph->env.nr_cpus_avail;
Kan Liang2bb00d22015-09-01 09:58:12 -04002177 u64 size = 0;
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002178 struct perf_header *ph = ff->ph;
Thomas Richter01766222018-06-11 09:31:52 +02002179 bool do_core_id_test = true;
Kan Liang2bb00d22015-09-01 09:58:12 -04002180
2181 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
2182 if (!ph->env.cpu)
2183 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002184
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002185 if (do_read_u32(ff, &nr))
Kan Liang2bb00d22015-09-01 09:58:12 -04002186 goto free_cpu;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002187
Namhyung Kima1ae5652012-09-24 17:14:59 +09002188 ph->env.nr_sibling_cores = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04002189 size += sizeof(u32);
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002190 if (strbuf_init(&sb, 128) < 0)
2191 goto free_cpu;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002192
2193 for (i = 0; i < nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002194 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002195 if (!str)
2196 goto error;
2197
2198 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002199 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2200 goto error;
Kan Liang2bb00d22015-09-01 09:58:12 -04002201 size += string_size(str);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002202 free(str);
2203 }
2204 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
2205
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002206 if (do_read_u32(ff, &nr))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002207 return -1;
2208
Namhyung Kima1ae5652012-09-24 17:14:59 +09002209 ph->env.nr_sibling_threads = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04002210 size += sizeof(u32);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002211
2212 for (i = 0; i < nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002213 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002214 if (!str)
2215 goto error;
2216
2217 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002218 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2219 goto error;
Kan Liang2bb00d22015-09-01 09:58:12 -04002220 size += string_size(str);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002221 free(str);
2222 }
2223 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
Kan Liang2bb00d22015-09-01 09:58:12 -04002224
2225 /*
2226 * The header may be from old perf,
2227 * which doesn't include core id and socket id information.
2228 */
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002229 if (ff->size <= size) {
Kan Liang2bb00d22015-09-01 09:58:12 -04002230 zfree(&ph->env.cpu);
2231 return 0;
2232 }
2233
Thomas Richter01766222018-06-11 09:31:52 +02002234 /* On s390 the socket_id number is not related to the numbers of cpus.
2235 * The socket_id number might be higher than the numbers of cpus.
2236 * This depends on the configuration.
2237 */
2238 if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
2239 do_core_id_test = false;
2240
Kan Liang2bb00d22015-09-01 09:58:12 -04002241 for (i = 0; i < (u32)cpu_nr; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002242 if (do_read_u32(ff, &nr))
Kan Liang2bb00d22015-09-01 09:58:12 -04002243 goto free_cpu;
2244
Kan Liang2bb00d22015-09-01 09:58:12 -04002245 ph->env.cpu[i].core_id = nr;
2246
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002247 if (do_read_u32(ff, &nr))
Kan Liang2bb00d22015-09-01 09:58:12 -04002248 goto free_cpu;
2249
Thomas Richter01766222018-06-11 09:31:52 +02002250 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
Kan Liang2bb00d22015-09-01 09:58:12 -04002251 pr_debug("socket_id number is too big."
2252 "You may need to upgrade the perf tool.\n");
2253 goto free_cpu;
2254 }
2255
2256 ph->env.cpu[i].socket_id = nr;
2257 }
2258
Namhyung Kima1ae5652012-09-24 17:14:59 +09002259 return 0;
2260
2261error:
2262 strbuf_release(&sb);
Kan Liang2bb00d22015-09-01 09:58:12 -04002263free_cpu:
2264 zfree(&ph->env.cpu);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002265 return -1;
2266}
2267
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002268static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002269{
Jiri Olsac60da222016-07-04 14:16:20 +02002270 struct numa_node *nodes, *n;
Jiri Olsac60da222016-07-04 14:16:20 +02002271 u32 nr, i;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002272 char *str;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002273
2274 /* nr nodes */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002275 if (do_read_u32(ff, &nr))
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002276 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002277
Jiri Olsac60da222016-07-04 14:16:20 +02002278 nodes = zalloc(sizeof(*nodes) * nr);
2279 if (!nodes)
2280 return -ENOMEM;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002281
2282 for (i = 0; i < nr; i++) {
Jiri Olsac60da222016-07-04 14:16:20 +02002283 n = &nodes[i];
2284
Namhyung Kima1ae5652012-09-24 17:14:59 +09002285 /* node number */
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002286 if (do_read_u32(ff, &n->node))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002287 goto error;
2288
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002289 if (do_read_u64(ff, &n->mem_total))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002290 goto error;
2291
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002292 if (do_read_u64(ff, &n->mem_free))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002293 goto error;
2294
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002295 str = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002296 if (!str)
2297 goto error;
2298
Jiri Olsac60da222016-07-04 14:16:20 +02002299 n->map = cpu_map__new(str);
2300 if (!n->map)
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002301 goto error;
Jiri Olsac60da222016-07-04 14:16:20 +02002302
Namhyung Kima1ae5652012-09-24 17:14:59 +09002303 free(str);
2304 }
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002305 ff->ph->env.nr_numa_nodes = nr;
2306 ff->ph->env.numa_nodes = nodes;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002307 return 0;
2308
2309error:
Jiri Olsac60da222016-07-04 14:16:20 +02002310 free(nodes);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002311 return -1;
2312}
2313
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002314static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09002315{
Namhyung Kima1ae5652012-09-24 17:14:59 +09002316 char *name;
2317 u32 pmu_num;
2318 u32 type;
2319 struct strbuf sb;
2320
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002321 if (do_read_u32(ff, &pmu_num))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002322 return -1;
2323
Namhyung Kima1ae5652012-09-24 17:14:59 +09002324 if (!pmu_num) {
2325 pr_debug("pmu mappings not available\n");
2326 return 0;
2327 }
2328
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002329 ff->ph->env.nr_pmu_mappings = pmu_num;
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002330 if (strbuf_init(&sb, 128) < 0)
2331 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002332
2333 while (pmu_num) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002334 if (do_read_u32(ff, &type))
Namhyung Kima1ae5652012-09-24 17:14:59 +09002335 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002336
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002337 name = do_read_string(ff);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002338 if (!name)
2339 goto error;
2340
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002341 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2342 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002343 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002344 if (strbuf_add(&sb, "", 1) < 0)
2345 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002346
Kan Liange0838e02015-09-10 11:03:05 -03002347 if (!strcmp(name, "msr"))
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002348 ff->ph->env.msr_pmu_type = type;
Kan Liange0838e02015-09-10 11:03:05 -03002349
Namhyung Kima1ae5652012-09-24 17:14:59 +09002350 free(name);
2351 pmu_num--;
2352 }
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002353 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
Namhyung Kima1ae5652012-09-24 17:14:59 +09002354 return 0;
2355
2356error:
2357 strbuf_release(&sb);
2358 return -1;
2359}
2360
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002361static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
Namhyung Kima8bb5592013-01-22 18:09:31 +09002362{
2363 size_t ret = -1;
2364 u32 i, nr, nr_groups;
2365 struct perf_session *session;
2366 struct perf_evsel *evsel, *leader = NULL;
2367 struct group_desc {
2368 char *name;
2369 u32 leader_idx;
2370 u32 nr_members;
2371 } *desc;
2372
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002373 if (do_read_u32(ff, &nr_groups))
Namhyung Kima8bb5592013-01-22 18:09:31 +09002374 return -1;
2375
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002376 ff->ph->env.nr_groups = nr_groups;
Namhyung Kima8bb5592013-01-22 18:09:31 +09002377 if (!nr_groups) {
2378 pr_debug("group desc not available\n");
2379 return 0;
2380 }
2381
2382 desc = calloc(nr_groups, sizeof(*desc));
2383 if (!desc)
2384 return -1;
2385
2386 for (i = 0; i < nr_groups; i++) {
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002387 desc[i].name = do_read_string(ff);
Namhyung Kima8bb5592013-01-22 18:09:31 +09002388 if (!desc[i].name)
2389 goto out_free;
2390
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002391 if (do_read_u32(ff, &desc[i].leader_idx))
Namhyung Kima8bb5592013-01-22 18:09:31 +09002392 goto out_free;
2393
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002394 if (do_read_u32(ff, &desc[i].nr_members))
Namhyung Kima8bb5592013-01-22 18:09:31 +09002395 goto out_free;
Namhyung Kima8bb5592013-01-22 18:09:31 +09002396 }
2397
2398 /*
2399 * Rebuild group relationship based on the group_desc
2400 */
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002401 session = container_of(ff->ph, struct perf_session, header);
Namhyung Kima8bb5592013-01-22 18:09:31 +09002402 session->evlist->nr_groups = nr_groups;
2403
2404 i = nr = 0;
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002405 evlist__for_each_entry(session->evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09002406 if (evsel->idx == (int) desc[i].leader_idx) {
2407 evsel->leader = evsel;
2408 /* {anon_group} is a dummy name */
Namhyung Kim210e8122013-11-18 11:20:43 +09002409 if (strcmp(desc[i].name, "{anon_group}")) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09002410 evsel->group_name = desc[i].name;
Namhyung Kim210e8122013-11-18 11:20:43 +09002411 desc[i].name = NULL;
2412 }
Namhyung Kima8bb5592013-01-22 18:09:31 +09002413 evsel->nr_members = desc[i].nr_members;
2414
2415 if (i >= nr_groups || nr > 0) {
2416 pr_debug("invalid group desc\n");
2417 goto out_free;
2418 }
2419
2420 leader = evsel;
2421 nr = evsel->nr_members - 1;
2422 i++;
2423 } else if (nr) {
2424 /* This is a group member */
2425 evsel->leader = leader;
2426
2427 nr--;
2428 }
2429 }
2430
2431 if (i != nr_groups || nr != 0) {
2432 pr_debug("invalid group desc\n");
2433 goto out_free;
2434 }
2435
2436 ret = 0;
2437out_free:
Namhyung Kim50a27402013-11-18 11:20:44 +09002438 for (i = 0; i < nr_groups; i++)
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -03002439 zfree(&desc[i].name);
Namhyung Kima8bb5592013-01-22 18:09:31 +09002440 free(desc);
2441
2442 return ret;
2443}
2444
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002445static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
Adrian Hunter99fa2982015-04-30 17:37:25 +03002446{
2447 struct perf_session *session;
2448 int err;
2449
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002450 session = container_of(ff->ph, struct perf_session, header);
Adrian Hunter99fa2982015-04-30 17:37:25 +03002451
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002452 err = auxtrace_index__process(ff->fd, ff->size, session,
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002453 ff->ph->needs_swap);
Adrian Hunter99fa2982015-04-30 17:37:25 +03002454 if (err < 0)
2455 pr_err("Failed to process auxtrace index\n");
2456 return err;
2457}
2458
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002459static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
Jiri Olsa720e98b2016-02-16 16:01:43 +01002460{
2461 struct cpu_cache_level *caches;
2462 u32 cnt, i, version;
2463
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002464 if (do_read_u32(ff, &version))
Jiri Olsa720e98b2016-02-16 16:01:43 +01002465 return -1;
2466
Jiri Olsa720e98b2016-02-16 16:01:43 +01002467 if (version != 1)
2468 return -1;
2469
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002470 if (do_read_u32(ff, &cnt))
Jiri Olsa720e98b2016-02-16 16:01:43 +01002471 return -1;
2472
Jiri Olsa720e98b2016-02-16 16:01:43 +01002473 caches = zalloc(sizeof(*caches) * cnt);
2474 if (!caches)
2475 return -1;
2476
2477 for (i = 0; i < cnt; i++) {
2478 struct cpu_cache_level c;
2479
2480 #define _R(v) \
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002481 if (do_read_u32(ff, &c.v))\
Jiri Olsa720e98b2016-02-16 16:01:43 +01002482 goto out_free_caches; \
Jiri Olsa720e98b2016-02-16 16:01:43 +01002483
2484 _R(level)
2485 _R(line_size)
2486 _R(sets)
2487 _R(ways)
2488 #undef _R
2489
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002490 #define _R(v) \
David Carrillo-Cisneros48e5fce2017-07-17 21:25:43 -07002491 c.v = do_read_string(ff); \
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002492 if (!c.v) \
Jiri Olsa720e98b2016-02-16 16:01:43 +01002493 goto out_free_caches;
2494
2495 _R(type)
2496 _R(size)
2497 _R(map)
2498 #undef _R
2499
2500 caches[i] = c;
2501 }
2502
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07002503 ff->ph->env.caches = caches;
2504 ff->ph->env.caches_cnt = cnt;
Jiri Olsa720e98b2016-02-16 16:01:43 +01002505 return 0;
2506out_free_caches:
2507 free(caches);
2508 return -1;
2509}
2510
Jin Yao60115182017-12-08 21:13:41 +08002511static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2512{
2513 struct perf_session *session;
2514 u64 first_sample_time, last_sample_time;
2515 int ret;
2516
2517 session = container_of(ff->ph, struct perf_session, header);
2518
2519 ret = do_read_u64(ff, &first_sample_time);
2520 if (ret)
2521 return -1;
2522
2523 ret = do_read_u64(ff, &last_sample_time);
2524 if (ret)
2525 return -1;
2526
2527 session->evlist->first_sample_time = first_sample_time;
2528 session->evlist->last_sample_time = last_sample_time;
2529 return 0;
2530}
2531
Jiri Olsae2091ce2018-03-07 16:50:08 +01002532static int process_mem_topology(struct feat_fd *ff,
2533 void *data __maybe_unused)
2534{
2535 struct memory_node *nodes;
2536 u64 version, i, nr, bsize;
2537 int ret = -1;
2538
2539 if (do_read_u64(ff, &version))
2540 return -1;
2541
2542 if (version != 1)
2543 return -1;
2544
2545 if (do_read_u64(ff, &bsize))
2546 return -1;
2547
2548 if (do_read_u64(ff, &nr))
2549 return -1;
2550
2551 nodes = zalloc(sizeof(*nodes) * nr);
2552 if (!nodes)
2553 return -1;
2554
2555 for (i = 0; i < nr; i++) {
2556 struct memory_node n;
2557
2558 #define _R(v) \
2559 if (do_read_u64(ff, &n.v)) \
2560 goto out; \
2561
2562 _R(node)
2563 _R(size)
2564
2565 #undef _R
2566
2567 if (do_read_bitmap(ff, &n.set, &n.size))
2568 goto out;
2569
2570 nodes[i] = n;
2571 }
2572
2573 ff->ph->env.memory_bsize = bsize;
2574 ff->ph->env.memory_nodes = nodes;
2575 ff->ph->env.nr_memory_nodes = nr;
2576 ret = 0;
2577
2578out:
2579 if (ret)
2580 free(nodes);
2581 return ret;
2582}
2583
Alexey Budankovcf790512018-10-09 17:36:24 +03002584static int process_clockid(struct feat_fd *ff,
2585 void *data __maybe_unused)
2586{
2587 if (do_read_u64(ff, &ff->ph->env.clockid_res_ns))
2588 return -1;
2589
2590 return 0;
2591}
2592
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002593struct feature_ops {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002594 int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002595 void (*print)(struct feat_fd *ff, FILE *fp);
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07002596 int (*process)(struct feat_fd *ff, void *data);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002597 const char *name;
2598 bool full_only;
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002599 bool synthesize;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002600};
2601
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002602#define FEAT_OPR(n, func, __full_only) \
2603 [HEADER_##n] = { \
2604 .name = __stringify(n), \
2605 .write = write_##func, \
2606 .print = print_##func, \
2607 .full_only = __full_only, \
2608 .process = process_##func, \
2609 .synthesize = true \
2610 }
2611
2612#define FEAT_OPN(n, func, __full_only) \
2613 [HEADER_##n] = { \
2614 .name = __stringify(n), \
2615 .write = write_##func, \
2616 .print = print_##func, \
2617 .full_only = __full_only, \
2618 .process = process_##func \
2619 }
Robert Richter8cdfa782011-12-07 10:02:56 +01002620
2621/* feature_ops not implemented: */
Stephane Eranian2eeaaa02012-05-15 13:28:13 +02002622#define print_tracing_data NULL
2623#define print_build_id NULL
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002624
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002625#define process_branch_stack NULL
2626#define process_stat NULL
2627
2628
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002629static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002630 FEAT_OPN(TRACING_DATA, tracing_data, false),
2631 FEAT_OPN(BUILD_ID, build_id, false),
2632 FEAT_OPR(HOSTNAME, hostname, false),
2633 FEAT_OPR(OSRELEASE, osrelease, false),
2634 FEAT_OPR(VERSION, version, false),
2635 FEAT_OPR(ARCH, arch, false),
2636 FEAT_OPR(NRCPUS, nrcpus, false),
2637 FEAT_OPR(CPUDESC, cpudesc, false),
2638 FEAT_OPR(CPUID, cpuid, false),
2639 FEAT_OPR(TOTAL_MEM, total_mem, false),
2640 FEAT_OPR(EVENT_DESC, event_desc, false),
2641 FEAT_OPR(CMDLINE, cmdline, false),
2642 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
2643 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
2644 FEAT_OPN(BRANCH_STACK, branch_stack, false),
2645 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
Jiri Olsae8fedff2018-07-12 15:52:02 +02002646 FEAT_OPR(GROUP_DESC, group_desc, false),
David Carrillo-Cisnerosa4d8c982017-07-17 21:25:46 -07002647 FEAT_OPN(AUXTRACE, auxtrace, false),
2648 FEAT_OPN(STAT, stat, false),
2649 FEAT_OPN(CACHE, cache, true),
Jin Yao60115182017-12-08 21:13:41 +08002650 FEAT_OPR(SAMPLE_TIME, sample_time, false),
Jiri Olsae2091ce2018-03-07 16:50:08 +01002651 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
Alexey Budankovcf790512018-10-09 17:36:24 +03002652 FEAT_OPR(CLOCKID, clockid, false)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002653};
2654
2655struct header_print_data {
2656 FILE *fp;
2657 bool full; /* extended list of headers */
2658};
2659
2660static int perf_file_section__fprintf_info(struct perf_file_section *section,
2661 struct perf_header *ph,
2662 int feat, int fd, void *data)
2663{
2664 struct header_print_data *hd = data;
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002665 struct feat_fd ff;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002666
2667 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2668 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2669 "%d, continuing...\n", section->offset, feat);
2670 return 0;
2671 }
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002672 if (feat >= HEADER_LAST_FEATURE) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002673 pr_warning("unknown feature %d\n", feat);
Robert Richterf7a8a132011-12-07 10:02:51 +01002674 return 0;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002675 }
2676 if (!feat_ops[feat].print)
2677 return 0;
2678
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002679 ff = (struct feat_fd) {
2680 .fd = fd,
2681 .ph = ph,
2682 };
2683
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002684 if (!feat_ops[feat].full_only || hd->full)
David Carrillo-Cisneroscfc65422017-07-17 21:25:40 -07002685 feat_ops[feat].print(&ff, hd->fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002686 else
2687 fprintf(hd->fp, "# %s info available, use -I to display\n",
2688 feat_ops[feat].name);
2689
2690 return 0;
2691}
2692
2693int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2694{
2695 struct header_print_data hd;
2696 struct perf_header *header = &session->header;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002697 int fd = perf_data__fd(session->data);
Jiri Olsaf45f5612016-10-10 09:03:07 +02002698 struct stat st;
Arnaldo Carvalho de Melo0afcf292018-12-11 16:11:54 -03002699 time_t stctime;
Jiri Olsaaabae162016-10-10 09:35:50 +02002700 int ret, bit;
Jiri Olsaf45f5612016-10-10 09:03:07 +02002701
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002702 hd.fp = fp;
2703 hd.full = full;
2704
Jiri Olsaf45f5612016-10-10 09:03:07 +02002705 ret = fstat(fd, &st);
2706 if (ret == -1)
2707 return -1;
2708
Arnaldo Carvalho de Melo0afcf292018-12-11 16:11:54 -03002709 stctime = st.st_ctime;
2710 fprintf(fp, "# captured on : %s", ctime(&stctime));
Jiri Olsae971a5a2018-03-07 16:50:03 +01002711
2712 fprintf(fp, "# header version : %u\n", header->version);
2713 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
2714 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
2715 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
Jiri Olsaf45f5612016-10-10 09:03:07 +02002716
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002717 perf_header__process_sections(header, fd, &hd,
2718 perf_file_section__fprintf_info);
Jiri Olsaaabae162016-10-10 09:35:50 +02002719
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002720 if (session->data->is_pipe)
David Carrillo-Cisnerosc9d1c932017-04-10 13:14:32 -07002721 return 0;
2722
Jiri Olsaaabae162016-10-10 09:35:50 +02002723 fprintf(fp, "# missing features: ");
2724 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2725 if (bit)
2726 fprintf(fp, "%s ", feat_ops[bit].name);
2727 }
2728
2729 fprintf(fp, "\n");
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002730 return 0;
2731}
2732
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002733static int do_write_feat(struct feat_fd *ff, int type,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002734 struct perf_file_section **p,
2735 struct perf_evlist *evlist)
2736{
2737 int err;
2738 int ret = 0;
2739
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002740 if (perf_header__has_feat(ff->ph, type)) {
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002741 if (!feat_ops[type].write)
2742 return -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002743
David Carrillo-Cisneros0b3d3412017-07-17 21:25:45 -07002744 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
2745 return -1;
2746
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002747 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002748
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002749 err = feat_ops[type].write(ff, evlist);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002750 if (err < 0) {
Jiri Olsa0c2aff42016-10-10 09:38:02 +02002751 pr_debug("failed to write feature %s\n", feat_ops[type].name);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002752
2753 /* undo anything written */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002754 lseek(ff->fd, (*p)->offset, SEEK_SET);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002755
2756 return -1;
2757 }
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002758 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002759 (*p)++;
2760 }
2761 return ret;
2762}
2763
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002764static int perf_header__adds_write(struct perf_header *header,
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002765 struct perf_evlist *evlist, int fd)
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002766{
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002767 int nr_sections;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002768 struct feat_fd ff;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002769 struct perf_file_section *feat_sec, *p;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002770 int sec_size;
2771 u64 sec_start;
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002772 int feat;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002773 int err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002774
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002775 ff = (struct feat_fd){
2776 .fd = fd,
2777 .ph = header,
2778 };
2779
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002780 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002781 if (!nr_sections)
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002782 return 0;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002783
Paul Gortmaker91b98802013-01-30 20:05:49 -05002784 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002785 if (feat_sec == NULL)
2786 return -ENOMEM;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002787
2788 sec_size = sizeof(*feat_sec) * nr_sections;
2789
Jiri Olsa8d541e92013-07-17 19:49:44 +02002790 sec_start = header->feat_offset;
Xiao Guangrongf887f302010-02-04 16:46:42 +08002791 lseek(fd, sec_start + sec_size, SEEK_SET);
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002792
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002793 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002794 if (do_write_feat(&ff, feat, &p, evlist))
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002795 perf_header__clear_feat(header, feat);
2796 }
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002797
Xiao Guangrongf887f302010-02-04 16:46:42 +08002798 lseek(fd, sec_start, SEEK_SET);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002799 /*
2800 * may write more than needed due to dropped feature, but
Ingo Molnaradba1632018-12-03 11:22:00 +01002801 * this is okay, reader will skip the missing entries
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002802 */
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002803 err = do_write(&ff, feat_sec, sec_size);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002804 if (err < 0)
2805 pr_debug("failed to write feature section\n");
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002806 free(feat_sec);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002807 return err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002808}
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002809
Tom Zanussi8dc58102010-04-01 23:59:15 -05002810int perf_header__write_pipe(int fd)
2811{
2812 struct perf_pipe_file_header f_header;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002813 struct feat_fd ff;
Tom Zanussi8dc58102010-04-01 23:59:15 -05002814 int err;
2815
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002816 ff = (struct feat_fd){ .fd = fd };
2817
Tom Zanussi8dc58102010-04-01 23:59:15 -05002818 f_header = (struct perf_pipe_file_header){
2819 .magic = PERF_MAGIC,
2820 .size = sizeof(f_header),
2821 };
2822
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002823 err = do_write(&ff, &f_header, sizeof(f_header));
Tom Zanussi8dc58102010-04-01 23:59:15 -05002824 if (err < 0) {
2825 pr_debug("failed to write perf pipe header\n");
2826 return err;
2827 }
2828
2829 return 0;
2830}
2831
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002832int perf_session__write_header(struct perf_session *session,
2833 struct perf_evlist *evlist,
2834 int fd, bool at_exit)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002835{
2836 struct perf_file_header f_header;
2837 struct perf_file_attr f_attr;
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002838 struct perf_header *header = &session->header;
Jiri Olsa563aecb2013-06-05 13:35:06 +02002839 struct perf_evsel *evsel;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002840 struct feat_fd ff;
Jiri Olsa944d62b2013-07-17 19:49:43 +02002841 u64 attr_offset;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002842 int err;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002843
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002844 ff = (struct feat_fd){ .fd = fd};
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002845 lseek(fd, sizeof(f_header), SEEK_SET);
2846
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002847 evlist__for_each_entry(session->evlist, evsel) {
Robert Richter6606f872012-08-16 21:10:19 +02002848 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002849 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002850 if (err < 0) {
2851 pr_debug("failed to write perf header\n");
2852 return err;
2853 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002854 }
2855
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002856 attr_offset = lseek(ff.fd, 0, SEEK_CUR);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002857
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002858 evlist__for_each_entry(evlist, evsel) {
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002859 f_attr = (struct perf_file_attr){
Robert Richter6606f872012-08-16 21:10:19 +02002860 .attr = evsel->attr,
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002861 .ids = {
Robert Richter6606f872012-08-16 21:10:19 +02002862 .offset = evsel->id_offset,
2863 .size = evsel->ids * sizeof(u64),
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002864 }
2865 };
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002866 err = do_write(&ff, &f_attr, sizeof(f_attr));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002867 if (err < 0) {
2868 pr_debug("failed to write perf header attribute\n");
2869 return err;
2870 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002871 }
2872
Adrian Hunterd645c442013-12-11 14:36:28 +02002873 if (!header->data_offset)
2874 header->data_offset = lseek(fd, 0, SEEK_CUR);
Jiri Olsa8d541e92013-07-17 19:49:44 +02002875 header->feat_offset = header->data_offset + header->data_size;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002876
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002877 if (at_exit) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002878 err = perf_header__adds_write(header, evlist, fd);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002879 if (err < 0)
2880 return err;
2881 }
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002882
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002883 f_header = (struct perf_file_header){
2884 .magic = PERF_MAGIC,
2885 .size = sizeof(f_header),
2886 .attr_size = sizeof(f_attr),
2887 .attrs = {
Jiri Olsa944d62b2013-07-17 19:49:43 +02002888 .offset = attr_offset,
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002889 .size = evlist->nr_entries * sizeof(f_attr),
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002890 },
2891 .data = {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002892 .offset = header->data_offset,
2893 .size = header->data_size,
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002894 },
Jiri Olsa44b3c572013-07-11 17:28:31 +02002895 /* event_types is ignored, store zeros */
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002896 };
2897
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002898 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002899
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002900 lseek(fd, 0, SEEK_SET);
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07002901 err = do_write(&ff, &f_header, sizeof(f_header));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002902 if (err < 0) {
2903 pr_debug("failed to write perf header\n");
2904 return err;
2905 }
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002906 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002907
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002908 return 0;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002909}
2910
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002911static int perf_header__getbuffer64(struct perf_header *header,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002912 int fd, void *buf, size_t size)
2913{
Arnaldo Carvalho de Melo1e7972c2011-01-03 16:50:55 -02002914 if (readn(fd, buf, size) <= 0)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002915 return -1;
2916
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002917 if (header->needs_swap)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002918 mem_bswap_64(buf, size);
2919
2920 return 0;
2921}
2922
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002923int perf_header__process_sections(struct perf_header *header, int fd,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002924 void *data,
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002925 int (*process)(struct perf_file_section *section,
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002926 struct perf_header *ph,
2927 int feat, int fd, void *data))
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002928{
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002929 struct perf_file_section *feat_sec, *sec;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002930 int nr_sections;
2931 int sec_size;
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002932 int feat;
2933 int err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002934
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002935 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002936 if (!nr_sections)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002937 return 0;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002938
Paul Gortmaker91b98802013-01-30 20:05:49 -05002939 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002940 if (!feat_sec)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002941 return -1;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002942
2943 sec_size = sizeof(*feat_sec) * nr_sections;
2944
Jiri Olsa8d541e92013-07-17 19:49:44 +02002945 lseek(fd, header->feat_offset, SEEK_SET);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002946
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002947 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2948 if (err < 0)
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002949 goto out_free;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002950
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002951 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2952 err = process(sec++, header, feat, fd, data);
2953 if (err < 0)
2954 goto out_free;
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +01002955 }
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002956 err = 0;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002957out_free:
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002958 free(feat_sec);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002959 return err;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002960}
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002961
Stephane Eranian114382a2012-02-09 23:21:08 +01002962static const int attr_file_abi_sizes[] = {
2963 [0] = PERF_ATTR_SIZE_VER0,
2964 [1] = PERF_ATTR_SIZE_VER1,
Jiri Olsa239cc472012-08-07 15:20:42 +02002965 [2] = PERF_ATTR_SIZE_VER2,
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002966 [3] = PERF_ATTR_SIZE_VER3,
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02002967 [4] = PERF_ATTR_SIZE_VER4,
Stephane Eranian114382a2012-02-09 23:21:08 +01002968 0,
2969};
2970
2971/*
2972 * In the legacy file format, the magic number is not used to encode endianness.
2973 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2974 * on ABI revisions, we need to try all combinations for all endianness to
2975 * detect the endianness.
2976 */
2977static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2978{
2979 uint64_t ref_size, attr_size;
2980 int i;
2981
2982 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2983 ref_size = attr_file_abi_sizes[i]
2984 + sizeof(struct perf_file_section);
2985 if (hdr_sz != ref_size) {
2986 attr_size = bswap_64(hdr_sz);
2987 if (attr_size != ref_size)
2988 continue;
2989
2990 ph->needs_swap = true;
2991 }
2992 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2993 i,
2994 ph->needs_swap);
2995 return 0;
2996 }
2997 /* could not determine endianness */
2998 return -1;
2999}
3000
3001#define PERF_PIPE_HDR_VER0 16
3002
3003static const size_t attr_pipe_abi_sizes[] = {
3004 [0] = PERF_PIPE_HDR_VER0,
3005 0,
3006};
3007
3008/*
3009 * In the legacy pipe format, there is an implicit assumption that endiannesss
3010 * between host recording the samples, and host parsing the samples is the
3011 * same. This is not always the case given that the pipe output may always be
3012 * redirected into a file and analyzed on a different machine with possibly a
3013 * different endianness and perf_event ABI revsions in the perf tool itself.
3014 */
3015static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
3016{
3017 u64 attr_size;
3018 int i;
3019
3020 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
3021 if (hdr_sz != attr_pipe_abi_sizes[i]) {
3022 attr_size = bswap_64(hdr_sz);
3023 if (attr_size != hdr_sz)
3024 continue;
3025
3026 ph->needs_swap = true;
3027 }
3028 pr_debug("Pipe ABI%d perf.data file detected\n", i);
3029 return 0;
3030 }
3031 return -1;
3032}
3033
Feng Tange84ba4e2012-10-30 11:56:07 +08003034bool is_perf_magic(u64 magic)
3035{
3036 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
3037 || magic == __perf_magic2
3038 || magic == __perf_magic2_sw)
3039 return true;
3040
3041 return false;
3042}
3043
Stephane Eranian114382a2012-02-09 23:21:08 +01003044static int check_magic_endian(u64 magic, uint64_t hdr_sz,
3045 bool is_pipe, struct perf_header *ph)
Stephane Eranian73323f52012-02-02 13:54:44 +01003046{
3047 int ret;
3048
3049 /* check for legacy format */
Stephane Eranian114382a2012-02-09 23:21:08 +01003050 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
Stephane Eranian73323f52012-02-02 13:54:44 +01003051 if (ret == 0) {
Jiri Olsa2a08c3e2013-07-17 19:49:47 +02003052 ph->version = PERF_HEADER_VERSION_1;
Stephane Eranian73323f52012-02-02 13:54:44 +01003053 pr_debug("legacy perf.data format\n");
Stephane Eranian114382a2012-02-09 23:21:08 +01003054 if (is_pipe)
3055 return try_all_pipe_abis(hdr_sz, ph);
Stephane Eranian73323f52012-02-02 13:54:44 +01003056
Stephane Eranian114382a2012-02-09 23:21:08 +01003057 return try_all_file_abis(hdr_sz, ph);
Stephane Eranian73323f52012-02-02 13:54:44 +01003058 }
Stephane Eranian114382a2012-02-09 23:21:08 +01003059 /*
3060 * the new magic number serves two purposes:
3061 * - unique number to identify actual perf.data files
3062 * - encode endianness of file
3063 */
Namhyung Kimf7913972015-01-29 17:06:45 +09003064 ph->version = PERF_HEADER_VERSION_2;
Stephane Eranian73323f52012-02-02 13:54:44 +01003065
Stephane Eranian114382a2012-02-09 23:21:08 +01003066 /* check magic number with one endianness */
3067 if (magic == __perf_magic2)
Stephane Eranian73323f52012-02-02 13:54:44 +01003068 return 0;
3069
Stephane Eranian114382a2012-02-09 23:21:08 +01003070 /* check magic number with opposite endianness */
3071 if (magic != __perf_magic2_sw)
Stephane Eranian73323f52012-02-02 13:54:44 +01003072 return -1;
3073
3074 ph->needs_swap = true;
3075
3076 return 0;
3077}
3078
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003079int perf_file_header__read(struct perf_file_header *header,
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003080 struct perf_header *ph, int fd)
3081{
Jiri Olsa727ebd52013-11-28 11:30:14 +01003082 ssize_t ret;
Stephane Eranian73323f52012-02-02 13:54:44 +01003083
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003084 lseek(fd, 0, SEEK_SET);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003085
Stephane Eranian73323f52012-02-02 13:54:44 +01003086 ret = readn(fd, header, sizeof(*header));
3087 if (ret <= 0)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003088 return -1;
3089
Stephane Eranian114382a2012-02-09 23:21:08 +01003090 if (check_magic_endian(header->magic,
3091 header->attr_size, false, ph) < 0) {
3092 pr_debug("magic/endian check failed\n");
Stephane Eranian73323f52012-02-02 13:54:44 +01003093 return -1;
Stephane Eranian114382a2012-02-09 23:21:08 +01003094 }
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003095
Stephane Eranian73323f52012-02-02 13:54:44 +01003096 if (ph->needs_swap) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003097 mem_bswap_64(header, offsetof(struct perf_file_header,
Stephane Eranian73323f52012-02-02 13:54:44 +01003098 adds_features));
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003099 }
3100
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003101 if (header->size != sizeof(*header)) {
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003102 /* Support the previous format */
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003103 if (header->size == offsetof(typeof(*header), adds_features))
3104 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003105 else
3106 return -1;
David Ahernd327fa42011-10-18 17:34:01 -06003107 } else if (ph->needs_swap) {
David Ahernd327fa42011-10-18 17:34:01 -06003108 /*
3109 * feature bitmap is declared as an array of unsigned longs --
3110 * not good since its size can differ between the host that
3111 * generated the data file and the host analyzing the file.
3112 *
3113 * We need to handle endianness, but we don't know the size of
3114 * the unsigned long where the file was generated. Take a best
3115 * guess at determining it: try 64-bit swap first (ie., file
3116 * created on a 64-bit host), and check if the hostname feature
3117 * bit is set (this feature bit is forced on as of fbe96f2).
3118 * If the bit is not, undo the 64-bit swap and try a 32-bit
3119 * swap. If the hostname bit is still not set (e.g., older data
3120 * file), punt and fallback to the original behavior --
3121 * clearing all feature bits and setting buildid.
3122 */
David Ahern80c01202012-06-08 11:47:51 -03003123 mem_bswap_64(&header->adds_features,
3124 BITS_TO_U64(HEADER_FEAT_BITS));
David Ahernd327fa42011-10-18 17:34:01 -06003125
3126 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
David Ahern80c01202012-06-08 11:47:51 -03003127 /* unswap as u64 */
3128 mem_bswap_64(&header->adds_features,
3129 BITS_TO_U64(HEADER_FEAT_BITS));
3130
3131 /* unswap as u32 */
3132 mem_bswap_32(&header->adds_features,
3133 BITS_TO_U32(HEADER_FEAT_BITS));
David Ahernd327fa42011-10-18 17:34:01 -06003134 }
3135
3136 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3137 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3138 set_bit(HEADER_BUILD_ID, header->adds_features);
3139 }
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003140 }
3141
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003142 memcpy(&ph->adds_features, &header->adds_features,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003143 sizeof(ph->adds_features));
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003144
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003145 ph->data_offset = header->data.offset;
3146 ph->data_size = header->data.size;
Jiri Olsa8d541e92013-07-17 19:49:44 +02003147 ph->feat_offset = header->data.offset + header->data.size;
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003148 return 0;
3149}
3150
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003151static int perf_file_section__process(struct perf_file_section *section,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003152 struct perf_header *ph,
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03003153 int feat, int fd, void *data)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003154{
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07003155 struct feat_fd fdd = {
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07003156 .fd = fd,
3157 .ph = ph,
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07003158 .size = section->size,
3159 .offset = section->offset,
David Carrillo-Cisneros1a222752017-07-17 21:25:41 -07003160 };
3161
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003162 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -02003163 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003164 "%d, continuing...\n", section->offset, feat);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003165 return 0;
3166 }
3167
Robert Richterb1e5a9b2011-12-07 10:02:57 +01003168 if (feat >= HEADER_LAST_FEATURE) {
3169 pr_debug("unknown feature %d, continuing...\n", feat);
3170 return 0;
3171 }
3172
Robert Richterf1c67db2012-02-10 15:41:56 +01003173 if (!feat_ops[feat].process)
3174 return 0;
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003175
David Carrillo-Cisneros62552452017-07-17 21:25:42 -07003176 return feat_ops[feat].process(&fdd, data);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02003177}
3178
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003179static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
Tom Zanussi454c4072010-05-01 01:41:20 -05003180 struct perf_header *ph, int fd,
3181 bool repipe)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003182{
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003183 struct feat_fd ff = {
3184 .fd = STDOUT_FILENO,
3185 .ph = ph,
3186 };
Jiri Olsa727ebd52013-11-28 11:30:14 +01003187 ssize_t ret;
Stephane Eranian73323f52012-02-02 13:54:44 +01003188
3189 ret = readn(fd, header, sizeof(*header));
3190 if (ret <= 0)
3191 return -1;
3192
Stephane Eranian114382a2012-02-09 23:21:08 +01003193 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
3194 pr_debug("endian/magic failed\n");
Tom Zanussi8dc58102010-04-01 23:59:15 -05003195 return -1;
Stephane Eranian114382a2012-02-09 23:21:08 +01003196 }
3197
3198 if (ph->needs_swap)
3199 header->size = bswap_64(header->size);
Tom Zanussi8dc58102010-04-01 23:59:15 -05003200
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003201 if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
Tom Zanussi454c4072010-05-01 01:41:20 -05003202 return -1;
3203
Tom Zanussi8dc58102010-04-01 23:59:15 -05003204 return 0;
3205}
3206
Jiri Olsad4339562013-07-17 19:49:41 +02003207static int perf_header__read_pipe(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05003208{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003209 struct perf_header *header = &session->header;
Tom Zanussi8dc58102010-04-01 23:59:15 -05003210 struct perf_pipe_file_header f_header;
3211
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003212 if (perf_file_header__read_pipe(&f_header, header,
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003213 perf_data__fd(session->data),
Tom Zanussi454c4072010-05-01 01:41:20 -05003214 session->repipe) < 0) {
Tom Zanussi8dc58102010-04-01 23:59:15 -05003215 pr_debug("incompatible file format\n");
3216 return -EINVAL;
3217 }
3218
Tom Zanussi8dc58102010-04-01 23:59:15 -05003219 return 0;
3220}
3221
Stephane Eranian69996df2012-02-09 23:21:06 +01003222static int read_attr(int fd, struct perf_header *ph,
3223 struct perf_file_attr *f_attr)
3224{
3225 struct perf_event_attr *attr = &f_attr->attr;
3226 size_t sz, left;
3227 size_t our_sz = sizeof(f_attr->attr);
Jiri Olsa727ebd52013-11-28 11:30:14 +01003228 ssize_t ret;
Stephane Eranian69996df2012-02-09 23:21:06 +01003229
3230 memset(f_attr, 0, sizeof(*f_attr));
3231
3232 /* read minimal guaranteed structure */
3233 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
3234 if (ret <= 0) {
3235 pr_debug("cannot read %d bytes of header attr\n",
3236 PERF_ATTR_SIZE_VER0);
3237 return -1;
3238 }
3239
3240 /* on file perf_event_attr size */
3241 sz = attr->size;
Stephane Eranian114382a2012-02-09 23:21:08 +01003242
Stephane Eranian69996df2012-02-09 23:21:06 +01003243 if (ph->needs_swap)
3244 sz = bswap_32(sz);
3245
3246 if (sz == 0) {
3247 /* assume ABI0 */
3248 sz = PERF_ATTR_SIZE_VER0;
3249 } else if (sz > our_sz) {
3250 pr_debug("file uses a more recent and unsupported ABI"
3251 " (%zu bytes extra)\n", sz - our_sz);
3252 return -1;
3253 }
3254 /* what we have not yet read and that we know about */
3255 left = sz - PERF_ATTR_SIZE_VER0;
3256 if (left) {
3257 void *ptr = attr;
3258 ptr += PERF_ATTR_SIZE_VER0;
3259
3260 ret = readn(fd, ptr, left);
3261 }
3262 /* read perf_file_section, ids are read in caller */
3263 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
3264
3265 return ret <= 0 ? -1 : 0;
3266}
3267
Namhyung Kim831394b2012-09-06 11:10:46 +09003268static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
Tzvetomir Stoyanov (VMware)096177a2018-08-08 14:02:46 -04003269 struct tep_handle *pevent)
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003270{
Tzvetomir Stoyanov97fbf3f2018-11-30 10:44:07 -05003271 struct tep_event *event;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003272 char bf[128];
3273
Namhyung Kim831394b2012-09-06 11:10:46 +09003274 /* already prepared */
3275 if (evsel->tp_format)
3276 return 0;
3277
Namhyung Kim3dce2ce2013-03-21 16:18:48 +09003278 if (pevent == NULL) {
3279 pr_debug("broken or missing trace data\n");
3280 return -1;
3281 }
3282
Tzvetomir Stoyanov (VMware)af85cd12018-08-08 14:02:50 -04003283 event = tep_find_event(pevent, evsel->attr.config);
Namhyung Kima7619ae2013-04-18 21:24:16 +09003284 if (event == NULL) {
3285 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003286 return -1;
Namhyung Kima7619ae2013-04-18 21:24:16 +09003287 }
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003288
Namhyung Kim831394b2012-09-06 11:10:46 +09003289 if (!evsel->name) {
3290 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
3291 evsel->name = strdup(bf);
3292 if (evsel->name == NULL)
3293 return -1;
3294 }
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003295
Arnaldo Carvalho de Melofcf65bf2012-08-07 09:58:03 -03003296 evsel->tp_format = event;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003297 return 0;
3298}
3299
Namhyung Kim831394b2012-09-06 11:10:46 +09003300static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
Tzvetomir Stoyanov (VMware)096177a2018-08-08 14:02:46 -04003301 struct tep_handle *pevent)
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003302{
3303 struct perf_evsel *pos;
3304
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03003305 evlist__for_each_entry(evlist, pos) {
Namhyung Kim831394b2012-09-06 11:10:46 +09003306 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
3307 perf_evsel__prepare_tracepoint_event(pos, pevent))
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003308 return -1;
3309 }
3310
3311 return 0;
3312}
3313
Jiri Olsad4339562013-07-17 19:49:41 +02003314int perf_session__read_header(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05003315{
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003316 struct perf_data *data = session->data;
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003317 struct perf_header *header = &session->header;
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003318 struct perf_file_header f_header;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003319 struct perf_file_attr f_attr;
3320 u64 f_id;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003321 int nr_attrs, nr_ids, i, j;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003322 int fd = perf_data__fd(data);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003323
Namhyung Kim334fe7a2013-03-11 16:43:12 +09003324 session->evlist = perf_evlist__new();
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003325 if (session->evlist == NULL)
3326 return -ENOMEM;
3327
Kan Liang2c071442015-08-28 05:48:05 -04003328 session->evlist->env = &header->env;
Arnaldo Carvalho de Melo4cde9982015-09-09 12:25:00 -03003329 session->machines.host.env = &header->env;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003330 if (perf_data__is_pipe(data))
Jiri Olsad4339562013-07-17 19:49:41 +02003331 return perf_header__read_pipe(session);
Tom Zanussi8dc58102010-04-01 23:59:15 -05003332
Stephane Eranian69996df2012-02-09 23:21:06 +01003333 if (perf_file_header__read(&f_header, header, fd) < 0)
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02003334 return -EINVAL;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003335
Namhyung Kimb314e5c2013-09-30 17:19:48 +09003336 /*
3337 * Sanity check that perf.data was written cleanly; data size is
3338 * initialized to 0 and updated only if the on_exit function is run.
3339 * If data size is still 0 then the file contains only partial
3340 * information. Just warn user and process it as much as it can.
3341 */
3342 if (f_header.data.size == 0) {
3343 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
3344 "Was the 'perf record' command properly terminated?\n",
Jiri Olsaeae8ad82017-01-23 22:25:41 +01003345 data->file.path);
Namhyung Kimb314e5c2013-09-30 17:19:48 +09003346 }
3347
Stephane Eranian69996df2012-02-09 23:21:06 +01003348 nr_attrs = f_header.attrs.size / f_header.attr_size;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003349 lseek(fd, f_header.attrs.offset, SEEK_SET);
3350
3351 for (i = 0; i < nr_attrs; i++) {
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003352 struct perf_evsel *evsel;
Peter Zijlstra1c222bc2009-08-06 20:57:41 +02003353 off_t tmp;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003354
Stephane Eranian69996df2012-02-09 23:21:06 +01003355 if (read_attr(fd, header, &f_attr) < 0)
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003356 goto out_errno;
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003357
David Ahern1060ab82015-04-09 16:15:46 -04003358 if (header->needs_swap) {
3359 f_attr.ids.size = bswap_64(f_attr.ids.size);
3360 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
David Aherneda39132011-07-15 12:34:09 -06003361 perf_event__attr_swap(&f_attr.attr);
David Ahern1060ab82015-04-09 16:15:46 -04003362 }
David Aherneda39132011-07-15 12:34:09 -06003363
Peter Zijlstra1c222bc2009-08-06 20:57:41 +02003364 tmp = lseek(fd, 0, SEEK_CUR);
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -03003365 evsel = perf_evsel__new(&f_attr.attr);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003366
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003367 if (evsel == NULL)
3368 goto out_delete_evlist;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03003369
3370 evsel->needs_swap = header->needs_swap;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003371 /*
3372 * Do it before so that if perf_evsel__alloc_id fails, this
3373 * entry gets purged too at perf_evlist__delete().
3374 */
3375 perf_evlist__add(session->evlist, evsel);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003376
3377 nr_ids = f_attr.ids.size / sizeof(u64);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003378 /*
3379 * We don't have the cpu and thread maps on the header, so
3380 * for allocating the perf_sample_id table we fake 1 cpu and
3381 * hattr->ids threads.
3382 */
3383 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
3384 goto out_delete_evlist;
3385
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003386 lseek(fd, f_attr.ids.offset, SEEK_SET);
3387
3388 for (j = 0; j < nr_ids; j++) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03003389 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003390 goto out_errno;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003391
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003392 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02003393 }
Arnaldo Carvalho de Melo11deb1f2009-11-17 01:18:09 -02003394
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003395 lseek(fd, tmp, SEEK_SET);
3396 }
3397
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003398 perf_header__process_sections(header, fd, &session->tevent,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02003399 perf_file_section__process);
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +01003400
Namhyung Kim831394b2012-09-06 11:10:46 +09003401 if (perf_evlist__prepare_tracepoint_events(session->evlist,
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003402 session->tevent.pevent))
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03003403 goto out_delete_evlist;
3404
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02003405 return 0;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02003406out_errno:
3407 return -errno;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003408
3409out_delete_evlist:
3410 perf_evlist__delete(session->evlist);
3411 session->evlist = NULL;
3412 return -ENOMEM;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02003413}
Frederic Weisbecker0d3a5c82009-08-16 20:56:37 +02003414
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003415int perf_event__synthesize_attr(struct perf_tool *tool,
Robert Richterf4d83432012-08-16 21:10:17 +02003416 struct perf_event_attr *attr, u32 ids, u64 *id,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02003417 perf_event__handler_t process)
Frederic Weisbecker0d3a5c82009-08-16 20:56:37 +02003418{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003419 union perf_event *ev;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003420 size_t size;
3421 int err;
3422
3423 size = sizeof(struct perf_event_attr);
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003424 size = PERF_ALIGN(size, sizeof(u64));
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003425 size += sizeof(struct perf_event_header);
3426 size += ids * sizeof(u64);
3427
3428 ev = malloc(size);
3429
Chris Samuelce47dc52010-11-13 13:35:06 +11003430 if (ev == NULL)
3431 return -ENOMEM;
3432
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003433 ev->attr.attr = *attr;
3434 memcpy(ev->attr.id, id, ids * sizeof(u64));
3435
3436 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
Robert Richterf4d83432012-08-16 21:10:17 +02003437 ev->attr.header.size = (u16)size;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003438
Robert Richterf4d83432012-08-16 21:10:17 +02003439 if (ev->attr.header.size == size)
3440 err = process(tool, ev, NULL, NULL);
3441 else
3442 err = -E2BIG;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003443
3444 free(ev);
3445
3446 return err;
3447}
3448
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003449int perf_event__synthesize_features(struct perf_tool *tool,
3450 struct perf_session *session,
3451 struct perf_evlist *evlist,
3452 perf_event__handler_t process)
3453{
3454 struct perf_header *header = &session->header;
3455 struct feat_fd ff;
3456 struct feature_event *fe;
3457 size_t sz, sz_hdr;
3458 int feat, ret;
3459
3460 sz_hdr = sizeof(fe->header);
3461 sz = sizeof(union perf_event);
3462 /* get a nice alignment */
3463 sz = PERF_ALIGN(sz, page_size);
3464
3465 memset(&ff, 0, sizeof(ff));
3466
3467 ff.buf = malloc(sz);
3468 if (!ff.buf)
3469 return -ENOMEM;
3470
3471 ff.size = sz - sz_hdr;
3472
3473 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3474 if (!feat_ops[feat].synthesize) {
3475 pr_debug("No record header feature for header :%d\n", feat);
3476 continue;
3477 }
3478
3479 ff.offset = sizeof(*fe);
3480
3481 ret = feat_ops[feat].write(&ff, evlist);
3482 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
3483 pr_debug("Error writing feature\n");
3484 continue;
3485 }
3486 /* ff.buf may have changed due to realloc in do_write() */
3487 fe = ff.buf;
3488 memset(fe, 0, sizeof(*fe));
3489
3490 fe->feat_id = feat;
3491 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3492 fe->header.size = ff.offset;
3493
3494 ret = process(tool, ff.buf, NULL, NULL);
3495 if (ret) {
3496 free(ff.buf);
3497 return ret;
3498 }
3499 }
Jiri Olsa57b5de42018-03-14 10:22:05 +01003500
3501 /* Send HEADER_LAST_FEATURE mark. */
3502 fe = ff.buf;
3503 fe->feat_id = HEADER_LAST_FEATURE;
3504 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3505 fe->header.size = sizeof(*fe);
3506
3507 ret = process(tool, ff.buf, NULL, NULL);
3508
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003509 free(ff.buf);
Jiri Olsa57b5de42018-03-14 10:22:05 +01003510 return ret;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003511}
3512
Jiri Olsa89f16882018-09-13 14:54:03 +02003513int perf_event__process_feature(struct perf_session *session,
3514 union perf_event *event)
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003515{
Jiri Olsa89f16882018-09-13 14:54:03 +02003516 struct perf_tool *tool = session->tool;
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003517 struct feat_fd ff = { .fd = 0 };
3518 struct feature_event *fe = (struct feature_event *)event;
3519 int type = fe->header.type;
3520 u64 feat = fe->feat_id;
3521
3522 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
3523 pr_warning("invalid record type %d in pipe-mode\n", type);
3524 return 0;
3525 }
Ravi Bangoria92ead7e2018-06-25 18:12:20 +05303526 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
David Carrillo-Cisnerose9def1b2017-07-17 21:25:48 -07003527 pr_warning("invalid record type %d in pipe-mode\n", type);
3528 return -1;
3529 }
3530
3531 if (!feat_ops[feat].process)
3532 return 0;
3533
3534 ff.buf = (void *)fe->data;
3535 ff.size = event->header.size - sizeof(event->header);
3536 ff.ph = &session->header;
3537
3538 if (feat_ops[feat].process(&ff, NULL))
3539 return -1;
3540
3541 if (!feat_ops[feat].print || !tool->show_feat_hdr)
3542 return 0;
3543
3544 if (!feat_ops[feat].full_only ||
3545 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
3546 feat_ops[feat].print(&ff, stdout);
3547 } else {
3548 fprintf(stdout, "# %s info available, use -I to display\n",
3549 feat_ops[feat].name);
3550 }
3551
3552 return 0;
3553}
3554
Jiri Olsaa6e52812015-10-25 15:51:37 +01003555static struct event_update_event *
3556event_update_event__new(size_t size, u64 type, u64 id)
3557{
3558 struct event_update_event *ev;
3559
3560 size += sizeof(*ev);
3561 size = PERF_ALIGN(size, sizeof(u64));
3562
3563 ev = zalloc(size);
3564 if (ev) {
3565 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3566 ev->header.size = (u16)size;
3567 ev->type = type;
3568 ev->id = id;
3569 }
3570 return ev;
3571}
3572
3573int
3574perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3575 struct perf_evsel *evsel,
3576 perf_event__handler_t process)
3577{
3578 struct event_update_event *ev;
3579 size_t size = strlen(evsel->unit);
3580 int err;
3581
3582 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3583 if (ev == NULL)
3584 return -ENOMEM;
3585
Arnaldo Carvalho de Melo75725882018-12-06 11:02:57 -03003586 strlcpy(ev->data, evsel->unit, size + 1);
Jiri Olsaa6e52812015-10-25 15:51:37 +01003587 err = process(tool, (union perf_event *)ev, NULL, NULL);
3588 free(ev);
3589 return err;
3590}
3591
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003592int
3593perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3594 struct perf_evsel *evsel,
3595 perf_event__handler_t process)
3596{
3597 struct event_update_event *ev;
3598 struct event_update_event_scale *ev_data;
3599 int err;
3600
3601 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3602 if (ev == NULL)
3603 return -ENOMEM;
3604
3605 ev_data = (struct event_update_event_scale *) ev->data;
3606 ev_data->scale = evsel->scale;
3607 err = process(tool, (union perf_event*) ev, NULL, NULL);
3608 free(ev);
3609 return err;
3610}
3611
Jiri Olsa802c9042015-10-25 15:51:39 +01003612int
3613perf_event__synthesize_event_update_name(struct perf_tool *tool,
3614 struct perf_evsel *evsel,
3615 perf_event__handler_t process)
3616{
3617 struct event_update_event *ev;
3618 size_t len = strlen(evsel->name);
3619 int err;
3620
3621 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3622 if (ev == NULL)
3623 return -ENOMEM;
3624
Arnaldo Carvalho de Melo5192bde2018-12-06 11:09:46 -03003625 strlcpy(ev->data, evsel->name, len + 1);
Jiri Olsa802c9042015-10-25 15:51:39 +01003626 err = process(tool, (union perf_event*) ev, NULL, NULL);
3627 free(ev);
3628 return err;
3629}
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003630
Jiri Olsa86ebb092015-10-25 15:51:40 +01003631int
3632perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3633 struct perf_evsel *evsel,
3634 perf_event__handler_t process)
3635{
3636 size_t size = sizeof(struct event_update_event);
3637 struct event_update_event *ev;
3638 int max, err;
3639 u16 type;
3640
3641 if (!evsel->own_cpus)
3642 return 0;
3643
3644 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3645 if (!ev)
3646 return -ENOMEM;
3647
3648 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3649 ev->header.size = (u16)size;
3650 ev->type = PERF_EVENT_UPDATE__CPUS;
3651 ev->id = evsel->id[0];
3652
3653 cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3654 evsel->own_cpus,
3655 type, max);
3656
3657 err = process(tool, (union perf_event*) ev, NULL, NULL);
3658 free(ev);
3659 return err;
3660}
3661
Jiri Olsac853f932015-10-25 15:51:41 +01003662size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3663{
3664 struct event_update_event *ev = &event->event_update;
3665 struct event_update_event_scale *ev_scale;
3666 struct event_update_event_cpus *ev_cpus;
3667 struct cpu_map *map;
3668 size_t ret;
3669
3670 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id);
3671
3672 switch (ev->type) {
3673 case PERF_EVENT_UPDATE__SCALE:
3674 ev_scale = (struct event_update_event_scale *) ev->data;
3675 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3676 break;
3677 case PERF_EVENT_UPDATE__UNIT:
3678 ret += fprintf(fp, "... unit: %s\n", ev->data);
3679 break;
3680 case PERF_EVENT_UPDATE__NAME:
3681 ret += fprintf(fp, "... name: %s\n", ev->data);
3682 break;
3683 case PERF_EVENT_UPDATE__CPUS:
3684 ev_cpus = (struct event_update_event_cpus *) ev->data;
3685 ret += fprintf(fp, "... ");
3686
3687 map = cpu_map__new_data(&ev_cpus->cpus);
3688 if (map)
3689 ret += cpu_map__fprintf(map, fp);
3690 else
3691 ret += fprintf(fp, "failed to get cpus\n");
3692 break;
3693 default:
3694 ret += fprintf(fp, "... unknown type\n");
3695 break;
3696 }
3697
3698 return ret;
3699}
Jiri Olsa86ebb092015-10-25 15:51:40 +01003700
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003701int perf_event__synthesize_attrs(struct perf_tool *tool,
Jiri Olsa318ec182018-08-30 08:32:15 +02003702 struct perf_evlist *evlist,
3703 perf_event__handler_t process)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003704{
Robert Richter6606f872012-08-16 21:10:19 +02003705 struct perf_evsel *evsel;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003706 int err = 0;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003707
Jiri Olsa318ec182018-08-30 08:32:15 +02003708 evlist__for_each_entry(evlist, evsel) {
Robert Richter6606f872012-08-16 21:10:19 +02003709 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3710 evsel->id, process);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003711 if (err) {
3712 pr_debug("failed to create perf header attribute\n");
3713 return err;
3714 }
3715 }
3716
3717 return err;
3718}
3719
Andi Kleenbfd8f722017-11-17 13:42:58 -08003720static bool has_unit(struct perf_evsel *counter)
3721{
3722 return counter->unit && *counter->unit;
3723}
3724
3725static bool has_scale(struct perf_evsel *counter)
3726{
3727 return counter->scale != 1;
3728}
3729
3730int perf_event__synthesize_extra_attr(struct perf_tool *tool,
3731 struct perf_evlist *evsel_list,
3732 perf_event__handler_t process,
3733 bool is_pipe)
3734{
3735 struct perf_evsel *counter;
3736 int err;
3737
3738 /*
3739 * Synthesize other events stuff not carried within
3740 * attr event - unit, scale, name
3741 */
3742 evlist__for_each_entry(evsel_list, counter) {
3743 if (!counter->supported)
3744 continue;
3745
3746 /*
3747 * Synthesize unit and scale only if it's defined.
3748 */
3749 if (has_unit(counter)) {
3750 err = perf_event__synthesize_event_update_unit(tool, counter, process);
3751 if (err < 0) {
3752 pr_err("Couldn't synthesize evsel unit.\n");
3753 return err;
3754 }
3755 }
3756
3757 if (has_scale(counter)) {
3758 err = perf_event__synthesize_event_update_scale(tool, counter, process);
3759 if (err < 0) {
3760 pr_err("Couldn't synthesize evsel counter.\n");
3761 return err;
3762 }
3763 }
3764
3765 if (counter->own_cpus) {
3766 err = perf_event__synthesize_event_update_cpus(tool, counter, process);
3767 if (err < 0) {
3768 pr_err("Couldn't synthesize evsel cpus.\n");
3769 return err;
3770 }
3771 }
3772
3773 /*
3774 * Name is needed only for pipe output,
3775 * perf.data carries event names.
3776 */
3777 if (is_pipe) {
3778 err = perf_event__synthesize_event_update_name(tool, counter, process);
3779 if (err < 0) {
3780 pr_err("Couldn't synthesize evsel name.\n");
3781 return err;
3782 }
3783 }
3784 }
3785 return 0;
3786}
3787
Adrian Hunter47c3d102013-07-04 16:20:21 +03003788int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3789 union perf_event *event,
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003790 struct perf_evlist **pevlist)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003791{
Robert Richterf4d83432012-08-16 21:10:17 +02003792 u32 i, ids, n_ids;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003793 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003794 struct perf_evlist *evlist = *pevlist;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003795
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003796 if (evlist == NULL) {
Namhyung Kim334fe7a2013-03-11 16:43:12 +09003797 *pevlist = evlist = perf_evlist__new();
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003798 if (evlist == NULL)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003799 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003800 }
3801
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -03003802 evsel = perf_evsel__new(&event->attr.attr);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003803 if (evsel == NULL)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003804 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003805
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003806 perf_evlist__add(evlist, evsel);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003807
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003808 ids = event->header.size;
3809 ids -= (void *)&event->attr.id - (void *)event;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003810 n_ids = ids / sizeof(u64);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003811 /*
3812 * We don't have the cpu and thread maps on the header, so
3813 * for allocating the perf_sample_id table we fake 1 cpu and
3814 * hattr->ids threads.
3815 */
3816 if (perf_evsel__alloc_id(evsel, 1, n_ids))
3817 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003818
3819 for (i = 0; i < n_ids; i++) {
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003820 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003821 }
3822
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003823 return 0;
3824}
Tom Zanussicd19a032010-04-01 23:59:20 -05003825
Jiri Olsaffe777252015-10-25 15:51:36 +01003826int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3827 union perf_event *event,
3828 struct perf_evlist **pevlist)
3829{
3830 struct event_update_event *ev = &event->event_update;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003831 struct event_update_event_scale *ev_scale;
Jiri Olsa86ebb092015-10-25 15:51:40 +01003832 struct event_update_event_cpus *ev_cpus;
Jiri Olsaffe777252015-10-25 15:51:36 +01003833 struct perf_evlist *evlist;
3834 struct perf_evsel *evsel;
Jiri Olsa86ebb092015-10-25 15:51:40 +01003835 struct cpu_map *map;
Jiri Olsaffe777252015-10-25 15:51:36 +01003836
3837 if (!pevlist || *pevlist == NULL)
3838 return -EINVAL;
3839
3840 evlist = *pevlist;
3841
3842 evsel = perf_evlist__id2evsel(evlist, ev->id);
3843 if (evsel == NULL)
3844 return -EINVAL;
3845
Jiri Olsaa6e52812015-10-25 15:51:37 +01003846 switch (ev->type) {
3847 case PERF_EVENT_UPDATE__UNIT:
3848 evsel->unit = strdup(ev->data);
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003849 break;
Jiri Olsa802c9042015-10-25 15:51:39 +01003850 case PERF_EVENT_UPDATE__NAME:
3851 evsel->name = strdup(ev->data);
3852 break;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003853 case PERF_EVENT_UPDATE__SCALE:
3854 ev_scale = (struct event_update_event_scale *) ev->data;
3855 evsel->scale = ev_scale->scale;
Arnaldo Carvalho de Melo8434a2e2017-02-08 21:57:22 -03003856 break;
Jiri Olsa86ebb092015-10-25 15:51:40 +01003857 case PERF_EVENT_UPDATE__CPUS:
3858 ev_cpus = (struct event_update_event_cpus *) ev->data;
3859
3860 map = cpu_map__new_data(&ev_cpus->cpus);
3861 if (map)
3862 evsel->own_cpus = map;
3863 else
3864 pr_err("failed to get event_update cpus\n");
Jiri Olsaa6e52812015-10-25 15:51:37 +01003865 default:
3866 break;
3867 }
3868
Jiri Olsaffe777252015-10-25 15:51:36 +01003869 return 0;
3870}
3871
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003872int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003873 struct perf_evlist *evlist,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02003874 perf_event__handler_t process)
Tom Zanussi92155452010-04-01 23:59:21 -05003875{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003876 union perf_event ev;
Jiri Olsa29208e52011-10-20 15:59:43 +02003877 struct tracing_data *tdata;
Tom Zanussi92155452010-04-01 23:59:21 -05003878 ssize_t size = 0, aligned_size = 0, padding;
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003879 struct feat_fd ff;
Irina Tirdea1d037ca2012-09-11 01:15:03 +03003880 int err __maybe_unused = 0;
Tom Zanussi92155452010-04-01 23:59:21 -05003881
Jiri Olsa29208e52011-10-20 15:59:43 +02003882 /*
3883 * We are going to store the size of the data followed
3884 * by the data contents. Since the fd descriptor is a pipe,
3885 * we cannot seek back to store the size of the data once
3886 * we know it. Instead we:
3887 *
3888 * - write the tracing data to the temp file
3889 * - get/write the data size to pipe
3890 * - write the tracing data from the temp file
3891 * to the pipe
3892 */
3893 tdata = tracing_data_get(&evlist->entries, fd, true);
3894 if (!tdata)
3895 return -1;
3896
Tom Zanussi92155452010-04-01 23:59:21 -05003897 memset(&ev, 0, sizeof(ev));
3898
3899 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
Jiri Olsa29208e52011-10-20 15:59:43 +02003900 size = tdata->size;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003901 aligned_size = PERF_ALIGN(size, sizeof(u64));
Tom Zanussi92155452010-04-01 23:59:21 -05003902 padding = aligned_size - size;
3903 ev.tracing_data.header.size = sizeof(ev.tracing_data);
3904 ev.tracing_data.size = aligned_size;
3905
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003906 process(tool, &ev, NULL, NULL);
Tom Zanussi92155452010-04-01 23:59:21 -05003907
Jiri Olsa29208e52011-10-20 15:59:43 +02003908 /*
3909 * The put function will copy all the tracing data
3910 * stored in temp file to the pipe.
3911 */
3912 tracing_data_put(tdata);
3913
David Carrillo-Cisnerosccebbeb2017-07-17 21:25:39 -07003914 ff = (struct feat_fd){ .fd = fd };
3915 if (write_padded(&ff, NULL, 0, padding))
David Carrillo-Cisneros2ff53652017-07-17 21:25:36 -07003916 return -1;
Tom Zanussi92155452010-04-01 23:59:21 -05003917
3918 return aligned_size;
3919}
3920
Jiri Olsa89f16882018-09-13 14:54:03 +02003921int perf_event__process_tracing_data(struct perf_session *session,
3922 union perf_event *event)
Tom Zanussi92155452010-04-01 23:59:21 -05003923{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003924 ssize_t size_read, padding, size = event->tracing_data.size;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003925 int fd = perf_data__fd(session->data);
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003926 off_t offset = lseek(fd, 0, SEEK_CUR);
Tom Zanussi92155452010-04-01 23:59:21 -05003927 char buf[BUFSIZ];
3928
3929 /* setup for reading amidst mmap */
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003930 lseek(fd, offset + sizeof(struct tracing_data_event),
Tom Zanussi92155452010-04-01 23:59:21 -05003931 SEEK_SET);
3932
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003933 size_read = trace_report(fd, &session->tevent,
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03003934 session->repipe);
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003935 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
Tom Zanussi92155452010-04-01 23:59:21 -05003936
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003937 if (readn(fd, buf, padding) < 0) {
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03003938 pr_err("%s: reading input file", __func__);
3939 return -1;
3940 }
Tom Zanussi454c4072010-05-01 01:41:20 -05003941 if (session->repipe) {
3942 int retw = write(STDOUT_FILENO, buf, padding);
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03003943 if (retw <= 0 || retw != padding) {
3944 pr_err("%s: repiping tracing data padding", __func__);
3945 return -1;
3946 }
Tom Zanussi454c4072010-05-01 01:41:20 -05003947 }
Tom Zanussi92155452010-04-01 23:59:21 -05003948
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03003949 if (size_read + padding != size) {
3950 pr_err("%s: tracing data size mismatch", __func__);
3951 return -1;
3952 }
Tom Zanussi92155452010-04-01 23:59:21 -05003953
Namhyung Kim831394b2012-09-06 11:10:46 +09003954 perf_evlist__prepare_tracepoint_events(session->evlist,
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003955 session->tevent.pevent);
Arnaldo Carvalho de Melo8b6ee4c2012-08-07 23:36:16 -03003956
Tom Zanussi92155452010-04-01 23:59:21 -05003957 return size_read + padding;
3958}
Tom Zanussic7929e42010-04-01 23:59:22 -05003959
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003960int perf_event__synthesize_build_id(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003961 struct dso *pos, u16 misc,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003962 perf_event__handler_t process,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02003963 struct machine *machine)
Tom Zanussic7929e42010-04-01 23:59:22 -05003964{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003965 union perf_event ev;
Tom Zanussic7929e42010-04-01 23:59:22 -05003966 size_t len;
3967 int err = 0;
3968
3969 if (!pos->hit)
3970 return err;
3971
3972 memset(&ev, 0, sizeof(ev));
3973
3974 len = pos->long_name_len + 1;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003975 len = PERF_ALIGN(len, NAME_ALIGN);
Tom Zanussic7929e42010-04-01 23:59:22 -05003976 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3977 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3978 ev.build_id.header.misc = misc;
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -03003979 ev.build_id.pid = machine->pid;
Tom Zanussic7929e42010-04-01 23:59:22 -05003980 ev.build_id.header.size = sizeof(ev.build_id) + len;
3981 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3982
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003983 err = process(tool, &ev, NULL, machine);
Tom Zanussic7929e42010-04-01 23:59:22 -05003984
3985 return err;
3986}
3987
Jiri Olsa89f16882018-09-13 14:54:03 +02003988int perf_event__process_build_id(struct perf_session *session,
3989 union perf_event *event)
Tom Zanussic7929e42010-04-01 23:59:22 -05003990{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003991 __event_process_build_id(&event->build_id,
3992 event->build_id.filename,
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08003993 session);
Tom Zanussic7929e42010-04-01 23:59:22 -05003994 return 0;
3995}