blob: 9d4aa951eaa6081ba21d5ec7a54c6896c2eb98f9 [file] [log] [blame]
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -03001// SPDX-License-Identifier: GPL-2.0-only
2
3#include "util/debug.h"
4#include "util/dso.h"
5#include "util/event.h"
6#include "util/evlist.h"
7#include "util/machine.h"
8#include "util/map.h"
9#include "util/map_symbol.h"
10#include "util/branch.h"
11#include "util/memswap.h"
12#include "util/namespaces.h"
13#include "util/session.h"
14#include "util/stat.h"
15#include "util/symbol.h"
16#include "util/synthetic-events.h"
17#include "util/target.h"
18#include "util/time-utils.h"
Namhyung Kimab640692020-03-25 21:45:33 +090019#include "util/cgroup.h"
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -030020#include <linux/bitops.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/zalloc.h>
24#include <linux/perf_event.h>
25#include <asm/bug.h>
26#include <perf/evsel.h>
27#include <internal/cpumap.h>
28#include <perf/cpumap.h>
Jiri Olsa20f2be12019-08-06 15:25:25 +020029#include <internal/lib.h> // page_size
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -030030#include <internal/threadmap.h>
31#include <perf/threadmap.h>
32#include <symbol/kallsyms.h>
33#include <dirent.h>
34#include <errno.h>
35#include <inttypes.h>
36#include <stdio.h>
37#include <string.h>
38#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
39#include <api/fs/fs.h>
40#include <sys/types.h>
41#include <sys/stat.h>
42#include <fcntl.h>
43#include <unistd.h>
44
45#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
46
47unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
48
49int perf_tool__process_synth_event(struct perf_tool *tool,
50 union perf_event *event,
51 struct machine *machine,
52 perf_event__handler_t process)
53{
54 struct perf_sample synth_sample = {
55 .pid = -1,
56 .tid = -1,
57 .time = -1,
58 .stream_id = -1,
59 .cpu = -1,
60 .period = 1,
61 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
62 };
63
64 return process(tool, event, &synth_sample, machine);
65};
66
67/*
68 * Assumes that the first 4095 bytes of /proc/pid/stat contains
69 * the comm, tgid and ppid.
70 */
71static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
72 pid_t *tgid, pid_t *ppid)
73{
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -030074 char bf[4096];
75 int fd;
76 size_t size = 0;
77 ssize_t n;
78 char *name, *tgids, *ppids;
79
80 *tgid = -1;
81 *ppid = -1;
82
Ian Rogers04ed4cc2020-04-02 08:43:55 -070083 snprintf(bf, sizeof(bf), "/proc/%d/status", pid);
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -030084
Ian Rogers04ed4cc2020-04-02 08:43:55 -070085 fd = open(bf, O_RDONLY);
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -030086 if (fd < 0) {
Ian Rogers04ed4cc2020-04-02 08:43:55 -070087 pr_debug("couldn't open %s\n", bf);
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -030088 return -1;
89 }
90
91 n = read(fd, bf, sizeof(bf) - 1);
92 close(fd);
93 if (n <= 0) {
94 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
95 pid);
96 return -1;
97 }
98 bf[n] = '\0';
99
100 name = strstr(bf, "Name:");
101 tgids = strstr(bf, "Tgid:");
102 ppids = strstr(bf, "PPid:");
103
104 if (name) {
105 char *nl;
106
107 name = skip_spaces(name + 5); /* strlen("Name:") */
108 nl = strchr(name, '\n');
109 if (nl)
110 *nl = '\0';
111
112 size = strlen(name);
113 if (size >= len)
114 size = len - 1;
115 memcpy(comm, name, size);
116 comm[size] = '\0';
117 } else {
118 pr_debug("Name: string not found for pid %d\n", pid);
119 }
120
121 if (tgids) {
122 tgids += 5; /* strlen("Tgid:") */
123 *tgid = atoi(tgids);
124 } else {
125 pr_debug("Tgid: string not found for pid %d\n", pid);
126 }
127
128 if (ppids) {
129 ppids += 5; /* strlen("PPid:") */
130 *ppid = atoi(ppids);
131 } else {
132 pr_debug("PPid: string not found for pid %d\n", pid);
133 }
134
135 return 0;
136}
137
138static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
139 struct machine *machine,
140 pid_t *tgid, pid_t *ppid)
141{
142 size_t size;
143
144 *ppid = -1;
145
146 memset(&event->comm, 0, sizeof(event->comm));
147
148 if (machine__is_host(machine)) {
149 if (perf_event__get_comm_ids(pid, event->comm.comm,
150 sizeof(event->comm.comm),
151 tgid, ppid) != 0) {
152 return -1;
153 }
154 } else {
155 *tgid = machine->pid;
156 }
157
158 if (*tgid < 0)
159 return -1;
160
161 event->comm.pid = *tgid;
162 event->comm.header.type = PERF_RECORD_COMM;
163
164 size = strlen(event->comm.comm) + 1;
165 size = PERF_ALIGN(size, sizeof(u64));
166 memset(event->comm.comm + size, 0, machine->id_hdr_size);
167 event->comm.header.size = (sizeof(event->comm) -
168 (sizeof(event->comm.comm) - size) +
169 machine->id_hdr_size);
170 event->comm.tid = pid;
171
172 return 0;
173}
174
175pid_t perf_event__synthesize_comm(struct perf_tool *tool,
176 union perf_event *event, pid_t pid,
177 perf_event__handler_t process,
178 struct machine *machine)
179{
180 pid_t tgid, ppid;
181
182 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
183 return -1;
184
185 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
186 return -1;
187
188 return tgid;
189}
190
191static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
192 struct perf_ns_link_info *ns_link_info)
193{
194 struct stat64 st;
195 char proc_ns[128];
196
197 sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
198 if (stat64(proc_ns, &st) == 0) {
199 ns_link_info->dev = st.st_dev;
200 ns_link_info->ino = st.st_ino;
201 }
202}
203
204int perf_event__synthesize_namespaces(struct perf_tool *tool,
205 union perf_event *event,
206 pid_t pid, pid_t tgid,
207 perf_event__handler_t process,
208 struct machine *machine)
209{
210 u32 idx;
211 struct perf_ns_link_info *ns_link_info;
212
213 if (!tool || !tool->namespace_events)
214 return 0;
215
216 memset(&event->namespaces, 0, (sizeof(event->namespaces) +
217 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
218 machine->id_hdr_size));
219
220 event->namespaces.pid = tgid;
221 event->namespaces.tid = pid;
222
223 event->namespaces.nr_namespaces = NR_NAMESPACES;
224
225 ns_link_info = event->namespaces.link_info;
226
227 for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
228 perf_event__get_ns_link_info(pid, perf_ns__name(idx),
229 &ns_link_info[idx]);
230
231 event->namespaces.header.type = PERF_RECORD_NAMESPACES;
232
233 event->namespaces.header.size = (sizeof(event->namespaces) +
234 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
235 machine->id_hdr_size);
236
237 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
238 return -1;
239
240 return 0;
241}
242
243static int perf_event__synthesize_fork(struct perf_tool *tool,
244 union perf_event *event,
245 pid_t pid, pid_t tgid, pid_t ppid,
246 perf_event__handler_t process,
247 struct machine *machine)
248{
249 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
250
251 /*
252 * for main thread set parent to ppid from status file. For other
253 * threads set parent pid to main thread. ie., assume main thread
254 * spawns all threads in a process
255 */
256 if (tgid == pid) {
257 event->fork.ppid = ppid;
258 event->fork.ptid = ppid;
259 } else {
260 event->fork.ppid = tgid;
261 event->fork.ptid = tgid;
262 }
263 event->fork.pid = tgid;
264 event->fork.tid = pid;
265 event->fork.header.type = PERF_RECORD_FORK;
266 event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
267
268 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
269
270 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
271 return -1;
272
273 return 0;
274}
275
276int perf_event__synthesize_mmap_events(struct perf_tool *tool,
277 union perf_event *event,
278 pid_t pid, pid_t tgid,
279 perf_event__handler_t process,
280 struct machine *machine,
281 bool mmap_data)
282{
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -0300283 FILE *fp;
284 unsigned long long t;
Ian Rogers04ed4cc2020-04-02 08:43:55 -0700285 char bf[BUFSIZ];
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -0300286 bool truncation = false;
287 unsigned long long timeout = proc_map_timeout * 1000000ULL;
288 int rc = 0;
289 const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
290 int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
291
292 if (machine__is_default_guest(machine))
293 return 0;
294
Ian Rogers04ed4cc2020-04-02 08:43:55 -0700295 snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
296 machine->root_dir, pid, pid);
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -0300297
Ian Rogers04ed4cc2020-04-02 08:43:55 -0700298 fp = fopen(bf, "r");
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -0300299 if (fp == NULL) {
300 /*
301 * We raced with a task exiting - just return:
302 */
Ian Rogers04ed4cc2020-04-02 08:43:55 -0700303 pr_debug("couldn't open %s\n", bf);
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -0300304 return -1;
305 }
306
307 event->header.type = PERF_RECORD_MMAP2;
308 t = rdclock();
309
310 while (1) {
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -0300311 char prot[5];
312 char execname[PATH_MAX];
313 char anonstr[] = "//anon";
314 unsigned int ino;
315 size_t size;
316 ssize_t n;
317
318 if (fgets(bf, sizeof(bf), fp) == NULL)
319 break;
320
321 if ((rdclock() - t) > timeout) {
Ian Rogers04ed4cc2020-04-02 08:43:55 -0700322 pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -0300323 "You may want to increase "
324 "the time limit by --proc-map-timeout\n",
Ian Rogers04ed4cc2020-04-02 08:43:55 -0700325 machine->root_dir, pid, pid);
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -0300326 truncation = true;
327 goto out;
328 }
329
330 /* ensure null termination since stack will be reused. */
331 strcpy(execname, "");
332
333 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
334 n = sscanf(bf, "%"PRI_lx64"-%"PRI_lx64" %s %"PRI_lx64" %x:%x %u %[^\n]\n",
335 &event->mmap2.start, &event->mmap2.len, prot,
336 &event->mmap2.pgoff, &event->mmap2.maj,
337 &event->mmap2.min,
338 &ino, execname);
339
340 /*
341 * Anon maps don't have the execname.
342 */
343 if (n < 7)
344 continue;
345
346 event->mmap2.ino = (u64)ino;
Ian Rogers3b7a15b2020-03-12 22:31:29 -0700347 event->mmap2.ino_generation = 0;
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -0300348
349 /*
350 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
351 */
352 if (machine__is_host(machine))
353 event->header.misc = PERF_RECORD_MISC_USER;
354 else
355 event->header.misc = PERF_RECORD_MISC_GUEST_USER;
356
357 /* map protection and flags bits */
358 event->mmap2.prot = 0;
359 event->mmap2.flags = 0;
360 if (prot[0] == 'r')
361 event->mmap2.prot |= PROT_READ;
362 if (prot[1] == 'w')
363 event->mmap2.prot |= PROT_WRITE;
364 if (prot[2] == 'x')
365 event->mmap2.prot |= PROT_EXEC;
366
367 if (prot[3] == 's')
368 event->mmap2.flags |= MAP_SHARED;
369 else
370 event->mmap2.flags |= MAP_PRIVATE;
371
372 if (prot[2] != 'x') {
373 if (!mmap_data || prot[0] != 'r')
374 continue;
375
376 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
377 }
378
379out:
380 if (truncation)
381 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
382
383 if (!strcmp(execname, ""))
384 strcpy(execname, anonstr);
385
386 if (hugetlbfs_mnt_len &&
387 !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
388 strcpy(execname, anonstr);
389 event->mmap2.flags |= MAP_HUGETLB;
390 }
391
392 size = strlen(execname) + 1;
393 memcpy(event->mmap2.filename, execname, size);
394 size = PERF_ALIGN(size, sizeof(u64));
395 event->mmap2.len -= event->mmap.start;
396 event->mmap2.header.size = (sizeof(event->mmap2) -
397 (sizeof(event->mmap2.filename) - size));
398 memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
399 event->mmap2.header.size += machine->id_hdr_size;
400 event->mmap2.pid = tgid;
401 event->mmap2.tid = pid;
402
403 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
404 rc = -1;
405 break;
406 }
407
408 if (truncation)
409 break;
410 }
411
412 fclose(fp);
413 return rc;
414}
415
Namhyung Kimab640692020-03-25 21:45:33 +0900416#ifdef HAVE_FILE_HANDLE
417static int perf_event__synthesize_cgroup(struct perf_tool *tool,
418 union perf_event *event,
419 char *path, size_t mount_len,
420 perf_event__handler_t process,
421 struct machine *machine)
422{
423 size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
424 size_t path_len = strlen(path) - mount_len + 1;
425 struct {
426 struct file_handle fh;
427 uint64_t cgroup_id;
428 } handle;
429 int mount_id;
430
431 while (path_len % sizeof(u64))
432 path[mount_len + path_len++] = '\0';
433
434 memset(&event->cgroup, 0, event_size);
435
436 event->cgroup.header.type = PERF_RECORD_CGROUP;
437 event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
438
439 handle.fh.handle_bytes = sizeof(handle.cgroup_id);
440 if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
441 pr_debug("stat failed: %s\n", path);
442 return -1;
443 }
444
445 event->cgroup.id = handle.cgroup_id;
446 strncpy(event->cgroup.path, path + mount_len, path_len);
447 memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
448
449 if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
450 pr_debug("process synth event failed\n");
451 return -1;
452 }
453
454 return 0;
455}
456
457static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
458 union perf_event *event,
459 char *path, size_t mount_len,
460 perf_event__handler_t process,
461 struct machine *machine)
462{
463 size_t pos = strlen(path);
464 DIR *d;
465 struct dirent *dent;
466 int ret = 0;
467
468 if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
469 process, machine) < 0)
470 return -1;
471
472 d = opendir(path);
473 if (d == NULL) {
474 pr_debug("failed to open directory: %s\n", path);
475 return -1;
476 }
477
478 while ((dent = readdir(d)) != NULL) {
479 if (dent->d_type != DT_DIR)
480 continue;
481 if (!strcmp(dent->d_name, ".") ||
482 !strcmp(dent->d_name, ".."))
483 continue;
484
485 /* any sane path should be less than PATH_MAX */
486 if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
487 continue;
488
489 if (path[pos - 1] != '/')
490 strcat(path, "/");
491 strcat(path, dent->d_name);
492
493 ret = perf_event__walk_cgroup_tree(tool, event, path,
494 mount_len, process, machine);
495 if (ret < 0)
496 break;
497
498 path[pos] = '\0';
499 }
500
501 closedir(d);
502 return ret;
503}
504
505int perf_event__synthesize_cgroups(struct perf_tool *tool,
506 perf_event__handler_t process,
507 struct machine *machine)
508{
509 union perf_event event;
510 char cgrp_root[PATH_MAX];
511 size_t mount_len; /* length of mount point in the path */
512
513 if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
514 pr_debug("cannot find cgroup mount point\n");
515 return -1;
516 }
517
518 mount_len = strlen(cgrp_root);
519 /* make sure the path starts with a slash (after mount point) */
520 strcat(cgrp_root, "/");
521
522 if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
523 process, machine) < 0)
524 return -1;
525
526 return 0;
527}
528#else
529int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
530 perf_event__handler_t process __maybe_unused,
531 struct machine *machine __maybe_unused)
532{
533 return -1;
534}
535#endif
536
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -0300537int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
538 struct machine *machine)
539{
540 int rc = 0;
541 struct map *pos;
542 struct maps *maps = machine__kernel_maps(machine);
543 union perf_event *event = zalloc((sizeof(event->mmap) +
544 machine->id_hdr_size));
545 if (event == NULL) {
546 pr_debug("Not enough memory synthesizing mmap event "
547 "for kernel modules\n");
548 return -1;
549 }
550
551 event->header.type = PERF_RECORD_MMAP;
552
553 /*
554 * kernel uses 0 for user space maps, see kernel/perf_event.c
555 * __perf_event_mmap
556 */
557 if (machine__is_host(machine))
558 event->header.misc = PERF_RECORD_MISC_KERNEL;
559 else
560 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
561
Arnaldo Carvalho de Melo8efc4f02019-10-28 11:31:38 -0300562 maps__for_each_entry(maps, pos) {
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -0300563 size_t size;
564
565 if (!__map__is_kmodule(pos))
566 continue;
567
568 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
569 event->mmap.header.type = PERF_RECORD_MMAP;
570 event->mmap.header.size = (sizeof(event->mmap) -
571 (sizeof(event->mmap.filename) - size));
572 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
573 event->mmap.header.size += machine->id_hdr_size;
574 event->mmap.start = pos->start;
575 event->mmap.len = pos->end - pos->start;
576 event->mmap.pid = machine->pid;
577
578 memcpy(event->mmap.filename, pos->dso->long_name,
579 pos->dso->long_name_len + 1);
580 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
581 rc = -1;
582 break;
583 }
584 }
585
586 free(event);
587 return rc;
588}
589
590static int __event__synthesize_thread(union perf_event *comm_event,
591 union perf_event *mmap_event,
592 union perf_event *fork_event,
593 union perf_event *namespaces_event,
594 pid_t pid, int full, perf_event__handler_t process,
595 struct perf_tool *tool, struct machine *machine, bool mmap_data)
596{
597 char filename[PATH_MAX];
598 DIR *tasks;
599 struct dirent *dirent;
600 pid_t tgid, ppid;
601 int rc = 0;
602
603 /* special case: only send one comm event using passed in pid */
604 if (!full) {
605 tgid = perf_event__synthesize_comm(tool, comm_event, pid,
606 process, machine);
607
608 if (tgid == -1)
609 return -1;
610
611 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
612 tgid, process, machine) < 0)
613 return -1;
614
615 /*
616 * send mmap only for thread group leader
Arnaldo Carvalho de Melo79b6bb72019-11-25 21:58:33 -0300617 * see thread__init_maps()
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -0300618 */
619 if (pid == tgid &&
620 perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
621 process, machine, mmap_data))
622 return -1;
623
624 return 0;
625 }
626
627 if (machine__is_default_guest(machine))
628 return 0;
629
630 snprintf(filename, sizeof(filename), "%s/proc/%d/task",
631 machine->root_dir, pid);
632
633 tasks = opendir(filename);
634 if (tasks == NULL) {
635 pr_debug("couldn't open %s\n", filename);
636 return 0;
637 }
638
639 while ((dirent = readdir(tasks)) != NULL) {
640 char *end;
641 pid_t _pid;
642
643 _pid = strtol(dirent->d_name, &end, 10);
644 if (*end)
645 continue;
646
647 rc = -1;
648 if (perf_event__prepare_comm(comm_event, _pid, machine,
649 &tgid, &ppid) != 0)
650 break;
651
652 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
653 ppid, process, machine) < 0)
654 break;
655
656 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
657 tgid, process, machine) < 0)
658 break;
659
660 /*
661 * Send the prepared comm event
662 */
663 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
664 break;
665
666 rc = 0;
667 if (_pid == pid) {
668 /* process the parent's maps too */
669 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
670 process, machine, mmap_data);
671 if (rc)
672 break;
673 }
674 }
675
676 closedir(tasks);
677 return rc;
678}
679
680int perf_event__synthesize_thread_map(struct perf_tool *tool,
681 struct perf_thread_map *threads,
682 perf_event__handler_t process,
683 struct machine *machine,
684 bool mmap_data)
685{
686 union perf_event *comm_event, *mmap_event, *fork_event;
687 union perf_event *namespaces_event;
688 int err = -1, thread, j;
689
690 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
691 if (comm_event == NULL)
692 goto out;
693
694 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
695 if (mmap_event == NULL)
696 goto out_free_comm;
697
698 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
699 if (fork_event == NULL)
700 goto out_free_mmap;
701
702 namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
703 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
704 machine->id_hdr_size);
705 if (namespaces_event == NULL)
706 goto out_free_fork;
707
708 err = 0;
709 for (thread = 0; thread < threads->nr; ++thread) {
710 if (__event__synthesize_thread(comm_event, mmap_event,
711 fork_event, namespaces_event,
712 perf_thread_map__pid(threads, thread), 0,
713 process, tool, machine,
714 mmap_data)) {
715 err = -1;
716 break;
717 }
718
719 /*
720 * comm.pid is set to thread group id by
721 * perf_event__synthesize_comm
722 */
723 if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
724 bool need_leader = true;
725
726 /* is thread group leader in thread_map? */
727 for (j = 0; j < threads->nr; ++j) {
728 if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
729 need_leader = false;
730 break;
731 }
732 }
733
734 /* if not, generate events for it */
735 if (need_leader &&
736 __event__synthesize_thread(comm_event, mmap_event,
737 fork_event, namespaces_event,
738 comm_event->comm.pid, 0,
739 process, tool, machine,
740 mmap_data)) {
741 err = -1;
742 break;
743 }
744 }
745 }
746 free(namespaces_event);
747out_free_fork:
748 free(fork_event);
749out_free_mmap:
750 free(mmap_event);
751out_free_comm:
752 free(comm_event);
753out:
754 return err;
755}
756
757static int __perf_event__synthesize_threads(struct perf_tool *tool,
758 perf_event__handler_t process,
759 struct machine *machine,
760 bool mmap_data,
761 struct dirent **dirent,
762 int start,
763 int num)
764{
765 union perf_event *comm_event, *mmap_event, *fork_event;
766 union perf_event *namespaces_event;
767 int err = -1;
768 char *end;
769 pid_t pid;
770 int i;
771
772 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
773 if (comm_event == NULL)
774 goto out;
775
776 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
777 if (mmap_event == NULL)
778 goto out_free_comm;
779
780 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
781 if (fork_event == NULL)
782 goto out_free_mmap;
783
784 namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
785 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
786 machine->id_hdr_size);
787 if (namespaces_event == NULL)
788 goto out_free_fork;
789
790 for (i = start; i < start + num; i++) {
791 if (!isdigit(dirent[i]->d_name[0]))
792 continue;
793
794 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
795 /* only interested in proper numerical dirents */
796 if (*end)
797 continue;
798 /*
799 * We may race with exiting thread, so don't stop just because
800 * one thread couldn't be synthesized.
801 */
802 __event__synthesize_thread(comm_event, mmap_event, fork_event,
803 namespaces_event, pid, 1, process,
804 tool, machine, mmap_data);
805 }
806 err = 0;
807
808 free(namespaces_event);
809out_free_fork:
810 free(fork_event);
811out_free_mmap:
812 free(mmap_event);
813out_free_comm:
814 free(comm_event);
815out:
816 return err;
817}
818
819struct synthesize_threads_arg {
820 struct perf_tool *tool;
821 perf_event__handler_t process;
822 struct machine *machine;
823 bool mmap_data;
824 struct dirent **dirent;
825 int num;
826 int start;
827};
828
829static void *synthesize_threads_worker(void *arg)
830{
831 struct synthesize_threads_arg *args = arg;
832
833 __perf_event__synthesize_threads(args->tool, args->process,
834 args->machine, args->mmap_data,
835 args->dirent,
836 args->start, args->num);
837 return NULL;
838}
839
840int perf_event__synthesize_threads(struct perf_tool *tool,
841 perf_event__handler_t process,
842 struct machine *machine,
843 bool mmap_data,
844 unsigned int nr_threads_synthesize)
845{
846 struct synthesize_threads_arg *args = NULL;
847 pthread_t *synthesize_threads = NULL;
848 char proc_path[PATH_MAX];
849 struct dirent **dirent;
850 int num_per_thread;
851 int m, n, i, j;
852 int thread_nr;
853 int base = 0;
854 int err = -1;
855
856
857 if (machine__is_default_guest(machine))
858 return 0;
859
860 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
861 n = scandir(proc_path, &dirent, 0, alphasort);
862 if (n < 0)
863 return err;
864
865 if (nr_threads_synthesize == UINT_MAX)
866 thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
867 else
868 thread_nr = nr_threads_synthesize;
869
870 if (thread_nr <= 1) {
871 err = __perf_event__synthesize_threads(tool, process,
872 machine, mmap_data,
873 dirent, base, n);
874 goto free_dirent;
875 }
876 if (thread_nr > n)
877 thread_nr = n;
878
879 synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
880 if (synthesize_threads == NULL)
881 goto free_dirent;
882
883 args = calloc(sizeof(*args), thread_nr);
884 if (args == NULL)
885 goto free_threads;
886
887 num_per_thread = n / thread_nr;
888 m = n % thread_nr;
889 for (i = 0; i < thread_nr; i++) {
890 args[i].tool = tool;
891 args[i].process = process;
892 args[i].machine = machine;
893 args[i].mmap_data = mmap_data;
894 args[i].dirent = dirent;
895 }
896 for (i = 0; i < m; i++) {
897 args[i].num = num_per_thread + 1;
898 args[i].start = i * args[i].num;
899 }
900 if (i != 0)
901 base = args[i-1].start + args[i-1].num;
902 for (j = i; j < thread_nr; j++) {
903 args[j].num = num_per_thread;
904 args[j].start = base + (j - i) * args[i].num;
905 }
906
907 for (i = 0; i < thread_nr; i++) {
908 if (pthread_create(&synthesize_threads[i], NULL,
909 synthesize_threads_worker, &args[i]))
910 goto out_join;
911 }
912 err = 0;
913out_join:
914 for (i = 0; i < thread_nr; i++)
915 pthread_join(synthesize_threads[i], NULL);
916 free(args);
917free_threads:
918 free(synthesize_threads);
919free_dirent:
920 for (i = 0; i < n; i++)
921 zfree(&dirent[i]);
922 free(dirent);
923
924 return err;
925}
926
927int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
928 perf_event__handler_t process __maybe_unused,
929 struct machine *machine __maybe_unused)
930{
931 return 0;
932}
933
934static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
935 perf_event__handler_t process,
936 struct machine *machine)
937{
938 size_t size;
939 struct map *map = machine__kernel_map(machine);
940 struct kmap *kmap;
941 int err;
942 union perf_event *event;
943
944 if (map == NULL)
945 return -1;
946
947 kmap = map__kmap(map);
948 if (!kmap->ref_reloc_sym)
949 return -1;
950
951 /*
952 * We should get this from /sys/kernel/sections/.text, but till that is
953 * available use this, and after it is use this as a fallback for older
954 * kernels.
955 */
956 event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
957 if (event == NULL) {
958 pr_debug("Not enough memory synthesizing mmap event "
959 "for kernel modules\n");
960 return -1;
961 }
962
963 if (machine__is_host(machine)) {
964 /*
965 * kernel uses PERF_RECORD_MISC_USER for user space maps,
966 * see kernel/perf_event.c __perf_event_mmap
967 */
968 event->header.misc = PERF_RECORD_MISC_KERNEL;
969 } else {
970 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
971 }
972
973 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
974 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
975 size = PERF_ALIGN(size, sizeof(u64));
976 event->mmap.header.type = PERF_RECORD_MMAP;
977 event->mmap.header.size = (sizeof(event->mmap) -
978 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
979 event->mmap.pgoff = kmap->ref_reloc_sym->addr;
980 event->mmap.start = map->start;
981 event->mmap.len = map->end - event->mmap.start;
982 event->mmap.pid = machine->pid;
983
984 err = perf_tool__process_synth_event(tool, event, machine, process);
985 free(event);
986
987 return err;
988}
989
990int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
991 perf_event__handler_t process,
992 struct machine *machine)
993{
994 int err;
995
996 err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
997 if (err < 0)
998 return err;
999
1000 return perf_event__synthesize_extra_kmaps(tool, process, machine);
1001}
1002
1003int perf_event__synthesize_thread_map2(struct perf_tool *tool,
1004 struct perf_thread_map *threads,
1005 perf_event__handler_t process,
1006 struct machine *machine)
1007{
1008 union perf_event *event;
1009 int i, err, size;
1010
1011 size = sizeof(event->thread_map);
1012 size += threads->nr * sizeof(event->thread_map.entries[0]);
1013
1014 event = zalloc(size);
1015 if (!event)
1016 return -ENOMEM;
1017
1018 event->header.type = PERF_RECORD_THREAD_MAP;
1019 event->header.size = size;
1020 event->thread_map.nr = threads->nr;
1021
1022 for (i = 0; i < threads->nr; i++) {
1023 struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
1024 char *comm = perf_thread_map__comm(threads, i);
1025
1026 if (!comm)
1027 comm = (char *) "";
1028
1029 entry->pid = perf_thread_map__pid(threads, i);
1030 strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1031 }
1032
1033 err = process(tool, event, NULL, machine);
1034
1035 free(event);
1036 return err;
1037}
1038
1039static void synthesize_cpus(struct cpu_map_entries *cpus,
1040 struct perf_cpu_map *map)
1041{
1042 int i;
1043
1044 cpus->nr = map->nr;
1045
1046 for (i = 0; i < map->nr; i++)
1047 cpus->cpu[i] = map->map[i];
1048}
1049
1050static void synthesize_mask(struct perf_record_record_cpu_map *mask,
1051 struct perf_cpu_map *map, int max)
1052{
1053 int i;
1054
1055 mask->nr = BITS_TO_LONGS(max);
1056 mask->long_size = sizeof(long);
1057
1058 for (i = 0; i < map->nr; i++)
1059 set_bit(map->map[i], mask->mask);
1060}
1061
1062static size_t cpus_size(struct perf_cpu_map *map)
1063{
1064 return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
1065}
1066
1067static size_t mask_size(struct perf_cpu_map *map, int *max)
1068{
1069 int i;
1070
1071 *max = 0;
1072
1073 for (i = 0; i < map->nr; i++) {
1074 /* bit possition of the cpu is + 1 */
1075 int bit = map->map[i] + 1;
1076
1077 if (bit > *max)
1078 *max = bit;
1079 }
1080
1081 return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
1082}
1083
1084void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
1085{
1086 size_t size_cpus, size_mask;
1087 bool is_dummy = perf_cpu_map__empty(map);
1088
1089 /*
1090 * Both array and mask data have variable size based
1091 * on the number of cpus and their actual values.
1092 * The size of the 'struct perf_record_cpu_map_data' is:
1093 *
1094 * array = size of 'struct cpu_map_entries' +
1095 * number of cpus * sizeof(u64)
1096 *
1097 * mask = size of 'struct perf_record_record_cpu_map' +
1098 * maximum cpu bit converted to size of longs
1099 *
1100 * and finaly + the size of 'struct perf_record_cpu_map_data'.
1101 */
1102 size_cpus = cpus_size(map);
1103 size_mask = mask_size(map, max);
1104
1105 if (is_dummy || (size_cpus < size_mask)) {
1106 *size += size_cpus;
1107 *type = PERF_CPU_MAP__CPUS;
1108 } else {
1109 *size += size_mask;
1110 *type = PERF_CPU_MAP__MASK;
1111 }
1112
1113 *size += sizeof(struct perf_record_cpu_map_data);
1114 *size = PERF_ALIGN(*size, sizeof(u64));
1115 return zalloc(*size);
1116}
1117
1118void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
1119 u16 type, int max)
1120{
1121 data->type = type;
1122
1123 switch (type) {
1124 case PERF_CPU_MAP__CPUS:
1125 synthesize_cpus((struct cpu_map_entries *) data->data, map);
1126 break;
1127 case PERF_CPU_MAP__MASK:
1128 synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
1129 default:
1130 break;
1131 };
1132}
1133
1134static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
1135{
1136 size_t size = sizeof(struct perf_record_cpu_map);
1137 struct perf_record_cpu_map *event;
1138 int max;
1139 u16 type;
1140
1141 event = cpu_map_data__alloc(map, &size, &type, &max);
1142 if (!event)
1143 return NULL;
1144
1145 event->header.type = PERF_RECORD_CPU_MAP;
1146 event->header.size = size;
1147 event->data.type = type;
1148
1149 cpu_map_data__synthesize(&event->data, map, type, max);
1150 return event;
1151}
1152
1153int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1154 struct perf_cpu_map *map,
1155 perf_event__handler_t process,
1156 struct machine *machine)
1157{
1158 struct perf_record_cpu_map *event;
1159 int err;
1160
1161 event = cpu_map_event__new(map);
1162 if (!event)
1163 return -ENOMEM;
1164
1165 err = process(tool, (union perf_event *) event, NULL, machine);
1166
1167 free(event);
1168 return err;
1169}
1170
1171int perf_event__synthesize_stat_config(struct perf_tool *tool,
1172 struct perf_stat_config *config,
1173 perf_event__handler_t process,
1174 struct machine *machine)
1175{
1176 struct perf_record_stat_config *event;
1177 int size, i = 0, err;
1178
1179 size = sizeof(*event);
1180 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1181
1182 event = zalloc(size);
1183 if (!event)
1184 return -ENOMEM;
1185
1186 event->header.type = PERF_RECORD_STAT_CONFIG;
1187 event->header.size = size;
1188 event->nr = PERF_STAT_CONFIG_TERM__MAX;
1189
1190#define ADD(__term, __val) \
1191 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
1192 event->data[i].val = __val; \
1193 i++;
1194
1195 ADD(AGGR_MODE, config->aggr_mode)
1196 ADD(INTERVAL, config->interval)
1197 ADD(SCALE, config->scale)
1198
1199 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1200 "stat config terms unbalanced\n");
1201#undef ADD
1202
1203 err = process(tool, (union perf_event *) event, NULL, machine);
1204
1205 free(event);
1206 return err;
1207}
1208
1209int perf_event__synthesize_stat(struct perf_tool *tool,
1210 u32 cpu, u32 thread, u64 id,
1211 struct perf_counts_values *count,
1212 perf_event__handler_t process,
1213 struct machine *machine)
1214{
1215 struct perf_record_stat event;
1216
1217 event.header.type = PERF_RECORD_STAT;
1218 event.header.size = sizeof(event);
1219 event.header.misc = 0;
1220
1221 event.id = id;
1222 event.cpu = cpu;
1223 event.thread = thread;
1224 event.val = count->val;
1225 event.ena = count->ena;
1226 event.run = count->run;
1227
1228 return process(tool, (union perf_event *) &event, NULL, machine);
1229}
1230
1231int perf_event__synthesize_stat_round(struct perf_tool *tool,
1232 u64 evtime, u64 type,
1233 perf_event__handler_t process,
1234 struct machine *machine)
1235{
1236 struct perf_record_stat_round event;
1237
1238 event.header.type = PERF_RECORD_STAT_ROUND;
1239 event.header.size = sizeof(event);
1240 event.header.misc = 0;
1241
1242 event.time = evtime;
1243 event.type = type;
1244
1245 return process(tool, (union perf_event *) &event, NULL, machine);
1246}
1247
1248size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
1249{
1250 size_t sz, result = sizeof(struct perf_record_sample);
1251
1252 if (type & PERF_SAMPLE_IDENTIFIER)
1253 result += sizeof(u64);
1254
1255 if (type & PERF_SAMPLE_IP)
1256 result += sizeof(u64);
1257
1258 if (type & PERF_SAMPLE_TID)
1259 result += sizeof(u64);
1260
1261 if (type & PERF_SAMPLE_TIME)
1262 result += sizeof(u64);
1263
1264 if (type & PERF_SAMPLE_ADDR)
1265 result += sizeof(u64);
1266
1267 if (type & PERF_SAMPLE_ID)
1268 result += sizeof(u64);
1269
1270 if (type & PERF_SAMPLE_STREAM_ID)
1271 result += sizeof(u64);
1272
1273 if (type & PERF_SAMPLE_CPU)
1274 result += sizeof(u64);
1275
1276 if (type & PERF_SAMPLE_PERIOD)
1277 result += sizeof(u64);
1278
1279 if (type & PERF_SAMPLE_READ) {
1280 result += sizeof(u64);
1281 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1282 result += sizeof(u64);
1283 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1284 result += sizeof(u64);
1285 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1286 if (read_format & PERF_FORMAT_GROUP) {
1287 sz = sample->read.group.nr *
1288 sizeof(struct sample_read_value);
1289 result += sz;
1290 } else {
1291 result += sizeof(u64);
1292 }
1293 }
1294
1295 if (type & PERF_SAMPLE_CALLCHAIN) {
1296 sz = (sample->callchain->nr + 1) * sizeof(u64);
1297 result += sz;
1298 }
1299
1300 if (type & PERF_SAMPLE_RAW) {
1301 result += sizeof(u32);
1302 result += sample->raw_size;
1303 }
1304
1305 if (type & PERF_SAMPLE_BRANCH_STACK) {
1306 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
Kan Liang42bbabe2020-02-28 08:30:00 -08001307 /* nr, hw_idx */
1308 sz += 2 * sizeof(u64);
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -03001309 result += sz;
1310 }
1311
1312 if (type & PERF_SAMPLE_REGS_USER) {
1313 if (sample->user_regs.abi) {
1314 result += sizeof(u64);
1315 sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1316 result += sz;
1317 } else {
1318 result += sizeof(u64);
1319 }
1320 }
1321
1322 if (type & PERF_SAMPLE_STACK_USER) {
1323 sz = sample->user_stack.size;
1324 result += sizeof(u64);
1325 if (sz) {
1326 result += sz;
1327 result += sizeof(u64);
1328 }
1329 }
1330
1331 if (type & PERF_SAMPLE_WEIGHT)
1332 result += sizeof(u64);
1333
1334 if (type & PERF_SAMPLE_DATA_SRC)
1335 result += sizeof(u64);
1336
1337 if (type & PERF_SAMPLE_TRANSACTION)
1338 result += sizeof(u64);
1339
1340 if (type & PERF_SAMPLE_REGS_INTR) {
1341 if (sample->intr_regs.abi) {
1342 result += sizeof(u64);
1343 sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1344 result += sz;
1345 } else {
1346 result += sizeof(u64);
1347 }
1348 }
1349
1350 if (type & PERF_SAMPLE_PHYS_ADDR)
1351 result += sizeof(u64);
1352
Namhyung Kimba78c1c2020-03-25 21:45:30 +09001353 if (type & PERF_SAMPLE_CGROUP)
1354 result += sizeof(u64);
1355
Adrian Hunter98dcf142019-11-15 14:42:11 +02001356 if (type & PERF_SAMPLE_AUX) {
1357 result += sizeof(u64);
1358 result += sample->aux_sample.size;
1359 }
1360
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -03001361 return result;
1362}
1363
1364int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
1365 const struct perf_sample *sample)
1366{
1367 __u64 *array;
1368 size_t sz;
1369 /*
1370 * used for cross-endian analysis. See git commit 65014ab3
1371 * for why this goofiness is needed.
1372 */
1373 union u64_swap u;
1374
1375 array = event->sample.array;
1376
1377 if (type & PERF_SAMPLE_IDENTIFIER) {
1378 *array = sample->id;
1379 array++;
1380 }
1381
1382 if (type & PERF_SAMPLE_IP) {
1383 *array = sample->ip;
1384 array++;
1385 }
1386
1387 if (type & PERF_SAMPLE_TID) {
1388 u.val32[0] = sample->pid;
1389 u.val32[1] = sample->tid;
1390 *array = u.val64;
1391 array++;
1392 }
1393
1394 if (type & PERF_SAMPLE_TIME) {
1395 *array = sample->time;
1396 array++;
1397 }
1398
1399 if (type & PERF_SAMPLE_ADDR) {
1400 *array = sample->addr;
1401 array++;
1402 }
1403
1404 if (type & PERF_SAMPLE_ID) {
1405 *array = sample->id;
1406 array++;
1407 }
1408
1409 if (type & PERF_SAMPLE_STREAM_ID) {
1410 *array = sample->stream_id;
1411 array++;
1412 }
1413
1414 if (type & PERF_SAMPLE_CPU) {
1415 u.val32[0] = sample->cpu;
1416 u.val32[1] = 0;
1417 *array = u.val64;
1418 array++;
1419 }
1420
1421 if (type & PERF_SAMPLE_PERIOD) {
1422 *array = sample->period;
1423 array++;
1424 }
1425
1426 if (type & PERF_SAMPLE_READ) {
1427 if (read_format & PERF_FORMAT_GROUP)
1428 *array = sample->read.group.nr;
1429 else
1430 *array = sample->read.one.value;
1431 array++;
1432
1433 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1434 *array = sample->read.time_enabled;
1435 array++;
1436 }
1437
1438 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1439 *array = sample->read.time_running;
1440 array++;
1441 }
1442
1443 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1444 if (read_format & PERF_FORMAT_GROUP) {
1445 sz = sample->read.group.nr *
1446 sizeof(struct sample_read_value);
1447 memcpy(array, sample->read.group.values, sz);
1448 array = (void *)array + sz;
1449 } else {
1450 *array = sample->read.one.id;
1451 array++;
1452 }
1453 }
1454
1455 if (type & PERF_SAMPLE_CALLCHAIN) {
1456 sz = (sample->callchain->nr + 1) * sizeof(u64);
1457 memcpy(array, sample->callchain, sz);
1458 array = (void *)array + sz;
1459 }
1460
1461 if (type & PERF_SAMPLE_RAW) {
1462 u.val32[0] = sample->raw_size;
1463 *array = u.val64;
1464 array = (void *)array + sizeof(u32);
1465
1466 memcpy(array, sample->raw_data, sample->raw_size);
1467 array = (void *)array + sample->raw_size;
1468 }
1469
1470 if (type & PERF_SAMPLE_BRANCH_STACK) {
1471 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
Kan Liang42bbabe2020-02-28 08:30:00 -08001472 /* nr, hw_idx */
1473 sz += 2 * sizeof(u64);
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -03001474 memcpy(array, sample->branch_stack, sz);
1475 array = (void *)array + sz;
1476 }
1477
1478 if (type & PERF_SAMPLE_REGS_USER) {
1479 if (sample->user_regs.abi) {
1480 *array++ = sample->user_regs.abi;
1481 sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1482 memcpy(array, sample->user_regs.regs, sz);
1483 array = (void *)array + sz;
1484 } else {
1485 *array++ = 0;
1486 }
1487 }
1488
1489 if (type & PERF_SAMPLE_STACK_USER) {
1490 sz = sample->user_stack.size;
1491 *array++ = sz;
1492 if (sz) {
1493 memcpy(array, sample->user_stack.data, sz);
1494 array = (void *)array + sz;
1495 *array++ = sz;
1496 }
1497 }
1498
1499 if (type & PERF_SAMPLE_WEIGHT) {
1500 *array = sample->weight;
1501 array++;
1502 }
1503
1504 if (type & PERF_SAMPLE_DATA_SRC) {
1505 *array = sample->data_src;
1506 array++;
1507 }
1508
1509 if (type & PERF_SAMPLE_TRANSACTION) {
1510 *array = sample->transaction;
1511 array++;
1512 }
1513
1514 if (type & PERF_SAMPLE_REGS_INTR) {
1515 if (sample->intr_regs.abi) {
1516 *array++ = sample->intr_regs.abi;
1517 sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1518 memcpy(array, sample->intr_regs.regs, sz);
1519 array = (void *)array + sz;
1520 } else {
1521 *array++ = 0;
1522 }
1523 }
1524
1525 if (type & PERF_SAMPLE_PHYS_ADDR) {
1526 *array = sample->phys_addr;
1527 array++;
1528 }
1529
Namhyung Kimba78c1c2020-03-25 21:45:30 +09001530 if (type & PERF_SAMPLE_CGROUP) {
1531 *array = sample->cgroup;
1532 array++;
1533 }
1534
Adrian Hunter98dcf142019-11-15 14:42:11 +02001535 if (type & PERF_SAMPLE_AUX) {
1536 sz = sample->aux_sample.size;
1537 *array++ = sz;
1538 memcpy(array, sample->aux_sample.data, sz);
1539 array = (void *)array + sz;
1540 }
1541
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -03001542 return 0;
1543}
1544
1545int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1546 struct evlist *evlist, struct machine *machine)
1547{
1548 union perf_event *ev;
1549 struct evsel *evsel;
1550 size_t nr = 0, i = 0, sz, max_nr, n;
1551 int err;
1552
1553 pr_debug2("Synthesizing id index\n");
1554
1555 max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
1556 sizeof(struct id_index_entry);
1557
1558 evlist__for_each_entry(evlist, evsel)
Jiri Olsae7eb9002019-09-02 22:15:47 +02001559 nr += evsel->core.ids;
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -03001560
1561 n = nr > max_nr ? max_nr : nr;
1562 sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
1563 ev = zalloc(sz);
1564 if (!ev)
1565 return -ENOMEM;
1566
1567 ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1568 ev->id_index.header.size = sz;
1569 ev->id_index.nr = n;
1570
1571 evlist__for_each_entry(evlist, evsel) {
1572 u32 j;
1573
Jiri Olsae7eb9002019-09-02 22:15:47 +02001574 for (j = 0; j < evsel->core.ids; j++) {
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -03001575 struct id_index_entry *e;
1576 struct perf_sample_id *sid;
1577
1578 if (i >= n) {
1579 err = process(tool, ev, NULL, machine);
1580 if (err)
1581 goto out_err;
1582 nr -= n;
1583 i = 0;
1584 }
1585
1586 e = &ev->id_index.entries[i++];
1587
Jiri Olsadeaf3212019-09-02 22:12:26 +02001588 e->id = evsel->core.id[j];
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -03001589
1590 sid = perf_evlist__id2sid(evlist, e->id);
1591 if (!sid) {
1592 free(ev);
1593 return -ENOENT;
1594 }
1595
1596 e->idx = sid->idx;
1597 e->cpu = sid->cpu;
1598 e->tid = sid->tid;
1599 }
1600 }
1601
1602 sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
1603 ev->id_index.header.size = sz;
1604 ev->id_index.nr = nr;
1605
1606 err = process(tool, ev, NULL, machine);
1607out_err:
1608 free(ev);
1609
1610 return err;
1611}
1612
1613int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1614 struct target *target, struct perf_thread_map *threads,
1615 perf_event__handler_t process, bool data_mmap,
1616 unsigned int nr_threads_synthesize)
1617{
1618 if (target__has_task(target))
1619 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1620 else if (target__has_cpu(target))
1621 return perf_event__synthesize_threads(tool, process,
1622 machine, data_mmap,
1623 nr_threads_synthesize);
1624 /* command specified */
1625 return 0;
1626}
1627
1628int machine__synthesize_threads(struct machine *machine, struct target *target,
1629 struct perf_thread_map *threads, bool data_mmap,
1630 unsigned int nr_threads_synthesize)
1631{
1632 return __machine__synthesize_threads(machine, NULL, target, threads,
1633 perf_event__process, data_mmap,
1634 nr_threads_synthesize);
1635}
1636
1637static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
1638{
1639 struct perf_record_event_update *ev;
1640
1641 size += sizeof(*ev);
1642 size = PERF_ALIGN(size, sizeof(u64));
1643
1644 ev = zalloc(size);
1645 if (ev) {
1646 ev->header.type = PERF_RECORD_EVENT_UPDATE;
1647 ev->header.size = (u16)size;
1648 ev->type = type;
1649 ev->id = id;
1650 }
1651 return ev;
1652}
1653
1654int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
1655 perf_event__handler_t process)
1656{
1657 size_t size = strlen(evsel->unit);
1658 struct perf_record_event_update *ev;
1659 int err;
1660
Jiri Olsadeaf3212019-09-02 22:12:26 +02001661 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -03001662 if (ev == NULL)
1663 return -ENOMEM;
1664
1665 strlcpy(ev->data, evsel->unit, size + 1);
1666 err = process(tool, (union perf_event *)ev, NULL, NULL);
1667 free(ev);
1668 return err;
1669}
1670
1671int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
1672 perf_event__handler_t process)
1673{
1674 struct perf_record_event_update *ev;
1675 struct perf_record_event_update_scale *ev_data;
1676 int err;
1677
Jiri Olsadeaf3212019-09-02 22:12:26 +02001678 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -03001679 if (ev == NULL)
1680 return -ENOMEM;
1681
1682 ev_data = (struct perf_record_event_update_scale *)ev->data;
1683 ev_data->scale = evsel->scale;
1684 err = process(tool, (union perf_event *)ev, NULL, NULL);
1685 free(ev);
1686 return err;
1687}
1688
1689int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
1690 perf_event__handler_t process)
1691{
1692 struct perf_record_event_update *ev;
1693 size_t len = strlen(evsel->name);
1694 int err;
1695
Jiri Olsadeaf3212019-09-02 22:12:26 +02001696 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -03001697 if (ev == NULL)
1698 return -ENOMEM;
1699
1700 strlcpy(ev->data, evsel->name, len + 1);
1701 err = process(tool, (union perf_event *)ev, NULL, NULL);
1702 free(ev);
1703 return err;
1704}
1705
1706int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
1707 perf_event__handler_t process)
1708{
1709 size_t size = sizeof(struct perf_record_event_update);
1710 struct perf_record_event_update *ev;
1711 int max, err;
1712 u16 type;
1713
1714 if (!evsel->core.own_cpus)
1715 return 0;
1716
1717 ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
1718 if (!ev)
1719 return -ENOMEM;
1720
1721 ev->header.type = PERF_RECORD_EVENT_UPDATE;
1722 ev->header.size = (u16)size;
1723 ev->type = PERF_EVENT_UPDATE__CPUS;
Jiri Olsadeaf3212019-09-02 22:12:26 +02001724 ev->id = evsel->core.id[0];
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -03001725
1726 cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
1727 evsel->core.own_cpus, type, max);
1728
1729 err = process(tool, (union perf_event *)ev, NULL, NULL);
1730 free(ev);
1731 return err;
1732}
1733
1734int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
1735 perf_event__handler_t process)
1736{
1737 struct evsel *evsel;
1738 int err = 0;
1739
1740 evlist__for_each_entry(evlist, evsel) {
Jiri Olsae7eb9002019-09-02 22:15:47 +02001741 err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
Jiri Olsadeaf3212019-09-02 22:12:26 +02001742 evsel->core.id, process);
Arnaldo Carvalho de Melo055c67e2019-09-18 16:08:52 -03001743 if (err) {
1744 pr_debug("failed to create perf header attribute\n");
1745 return err;
1746 }
1747 }
1748
1749 return err;
1750}
1751
1752static bool has_unit(struct evsel *evsel)
1753{
1754 return evsel->unit && *evsel->unit;
1755}
1756
1757static bool has_scale(struct evsel *evsel)
1758{
1759 return evsel->scale != 1;
1760}
1761
1762int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
1763 perf_event__handler_t process, bool is_pipe)
1764{
1765 struct evsel *evsel;
1766 int err;
1767
1768 /*
1769 * Synthesize other events stuff not carried within
1770 * attr event - unit, scale, name
1771 */
1772 evlist__for_each_entry(evsel_list, evsel) {
1773 if (!evsel->supported)
1774 continue;
1775
1776 /*
1777 * Synthesize unit and scale only if it's defined.
1778 */
1779 if (has_unit(evsel)) {
1780 err = perf_event__synthesize_event_update_unit(tool, evsel, process);
1781 if (err < 0) {
1782 pr_err("Couldn't synthesize evsel unit.\n");
1783 return err;
1784 }
1785 }
1786
1787 if (has_scale(evsel)) {
1788 err = perf_event__synthesize_event_update_scale(tool, evsel, process);
1789 if (err < 0) {
1790 pr_err("Couldn't synthesize evsel evsel.\n");
1791 return err;
1792 }
1793 }
1794
1795 if (evsel->core.own_cpus) {
1796 err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
1797 if (err < 0) {
1798 pr_err("Couldn't synthesize evsel cpus.\n");
1799 return err;
1800 }
1801 }
1802
1803 /*
1804 * Name is needed only for pipe output,
1805 * perf.data carries event names.
1806 */
1807 if (is_pipe) {
1808 err = perf_event__synthesize_event_update_name(tool, evsel, process);
1809 if (err < 0) {
1810 pr_err("Couldn't synthesize evsel name.\n");
1811 return err;
1812 }
1813 }
1814 }
1815 return 0;
1816}
1817
1818int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
1819 u32 ids, u64 *id, perf_event__handler_t process)
1820{
1821 union perf_event *ev;
1822 size_t size;
1823 int err;
1824
1825 size = sizeof(struct perf_event_attr);
1826 size = PERF_ALIGN(size, sizeof(u64));
1827 size += sizeof(struct perf_event_header);
1828 size += ids * sizeof(u64);
1829
1830 ev = zalloc(size);
1831
1832 if (ev == NULL)
1833 return -ENOMEM;
1834
1835 ev->attr.attr = *attr;
1836 memcpy(ev->attr.id, id, ids * sizeof(u64));
1837
1838 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
1839 ev->attr.header.size = (u16)size;
1840
1841 if (ev->attr.header.size == size)
1842 err = process(tool, ev, NULL, NULL);
1843 else
1844 err = -E2BIG;
1845
1846 free(ev);
1847
1848 return err;
1849}
1850
1851int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
1852 perf_event__handler_t process)
1853{
1854 union perf_event ev;
1855 struct tracing_data *tdata;
1856 ssize_t size = 0, aligned_size = 0, padding;
1857 struct feat_fd ff;
1858
1859 /*
1860 * We are going to store the size of the data followed
1861 * by the data contents. Since the fd descriptor is a pipe,
1862 * we cannot seek back to store the size of the data once
1863 * we know it. Instead we:
1864 *
1865 * - write the tracing data to the temp file
1866 * - get/write the data size to pipe
1867 * - write the tracing data from the temp file
1868 * to the pipe
1869 */
1870 tdata = tracing_data_get(&evlist->core.entries, fd, true);
1871 if (!tdata)
1872 return -1;
1873
1874 memset(&ev, 0, sizeof(ev));
1875
1876 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
1877 size = tdata->size;
1878 aligned_size = PERF_ALIGN(size, sizeof(u64));
1879 padding = aligned_size - size;
1880 ev.tracing_data.header.size = sizeof(ev.tracing_data);
1881 ev.tracing_data.size = aligned_size;
1882
1883 process(tool, &ev, NULL, NULL);
1884
1885 /*
1886 * The put function will copy all the tracing data
1887 * stored in temp file to the pipe.
1888 */
1889 tracing_data_put(tdata);
1890
1891 ff = (struct feat_fd){ .fd = fd };
1892 if (write_padded(&ff, NULL, 0, padding))
1893 return -1;
1894
1895 return aligned_size;
1896}
1897
1898int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
1899 perf_event__handler_t process, struct machine *machine)
1900{
1901 union perf_event ev;
1902 size_t len;
1903
1904 if (!pos->hit)
1905 return 0;
1906
1907 memset(&ev, 0, sizeof(ev));
1908
1909 len = pos->long_name_len + 1;
1910 len = PERF_ALIGN(len, NAME_ALIGN);
1911 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
1912 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
1913 ev.build_id.header.misc = misc;
1914 ev.build_id.pid = machine->pid;
1915 ev.build_id.header.size = sizeof(ev.build_id) + len;
1916 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
1917
1918 return process(tool, &ev, NULL, machine);
1919}
1920
1921int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
1922 struct evlist *evlist, perf_event__handler_t process, bool attrs)
1923{
1924 int err;
1925
1926 if (attrs) {
1927 err = perf_event__synthesize_attrs(tool, evlist, process);
1928 if (err < 0) {
1929 pr_err("Couldn't synthesize attrs.\n");
1930 return err;
1931 }
1932 }
1933
1934 err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
1935 err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
1936 if (err < 0) {
1937 pr_err("Couldn't synthesize thread map.\n");
1938 return err;
1939 }
1940
1941 err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
1942 if (err < 0) {
1943 pr_err("Couldn't synthesize thread map.\n");
1944 return err;
1945 }
1946
1947 err = perf_event__synthesize_stat_config(tool, config, process, NULL);
1948 if (err < 0) {
1949 pr_err("Couldn't synthesize config.\n");
1950 return err;
1951 }
1952
1953 return 0;
1954}
1955
1956int __weak perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
1957 struct perf_tool *tool __maybe_unused,
1958 perf_event__handler_t process __maybe_unused,
1959 struct machine *machine __maybe_unused)
1960{
1961 return 0;
1962}
1963
1964extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
1965
1966int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
1967 struct evlist *evlist, perf_event__handler_t process)
1968{
1969 struct perf_header *header = &session->header;
1970 struct perf_record_header_feature *fe;
1971 struct feat_fd ff;
1972 size_t sz, sz_hdr;
1973 int feat, ret;
1974
1975 sz_hdr = sizeof(fe->header);
1976 sz = sizeof(union perf_event);
1977 /* get a nice alignment */
1978 sz = PERF_ALIGN(sz, page_size);
1979
1980 memset(&ff, 0, sizeof(ff));
1981
1982 ff.buf = malloc(sz);
1983 if (!ff.buf)
1984 return -ENOMEM;
1985
1986 ff.size = sz - sz_hdr;
1987 ff.ph = &session->header;
1988
1989 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
1990 if (!feat_ops[feat].synthesize) {
1991 pr_debug("No record header feature for header :%d\n", feat);
1992 continue;
1993 }
1994
1995 ff.offset = sizeof(*fe);
1996
1997 ret = feat_ops[feat].write(&ff, evlist);
1998 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
1999 pr_debug("Error writing feature\n");
2000 continue;
2001 }
2002 /* ff.buf may have changed due to realloc in do_write() */
2003 fe = ff.buf;
2004 memset(fe, 0, sizeof(*fe));
2005
2006 fe->feat_id = feat;
2007 fe->header.type = PERF_RECORD_HEADER_FEATURE;
2008 fe->header.size = ff.offset;
2009
2010 ret = process(tool, ff.buf, NULL, NULL);
2011 if (ret) {
2012 free(ff.buf);
2013 return ret;
2014 }
2015 }
2016
2017 /* Send HEADER_LAST_FEATURE mark. */
2018 fe = ff.buf;
2019 fe->feat_id = HEADER_LAST_FEATURE;
2020 fe->header.type = PERF_RECORD_HEADER_FEATURE;
2021 fe->header.size = sizeof(*fe);
2022
2023 ret = process(tool, ff.buf, NULL, NULL);
2024
2025 free(ff.buf);
2026 return ret;
2027}