blob: 5df4ca91bed346050727e6de6394bc582b84cbcb [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
Arnaldo Carvalho de Meloa8c9ae12011-11-05 08:41:51 -02009#include "util.h"
Borislav Petkov85c66be2013-02-20 16:32:30 +010010#include <lk/debugfs.h>
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -020011#include <poll.h>
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020012#include "cpumap.h"
13#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090014#include "target.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020015#include "evlist.h"
16#include "evsel.h"
Adrian Huntere3e1a542013-08-14 15:48:24 +030017#include "debug.h"
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020018#include <unistd.h>
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020019
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020020#include "parse-events.h"
21
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020022#include <sys/mman.h>
23
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020024#include <linux/bitops.h>
25#include <linux/hash.h>
26
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020027#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030028#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020029
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020030void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
31 struct thread_map *threads)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020032{
33 int i;
34
35 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
36 INIT_HLIST_HEAD(&evlist->heads[i]);
37 INIT_LIST_HEAD(&evlist->entries);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020038 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020039 evlist->workload.pid = -1;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020040}
41
Namhyung Kim334fe7a2013-03-11 16:43:12 +090042struct perf_evlist *perf_evlist__new(void)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020043{
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020046 if (evlist != NULL)
Namhyung Kim334fe7a2013-03-11 16:43:12 +090047 perf_evlist__init(evlist, NULL, NULL);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020048
49 return evlist;
50}
51
Adrian Hunter75562572013-08-27 11:23:09 +030052/**
53 * perf_evlist__set_id_pos - set the positions of event ids.
54 * @evlist: selected event list
55 *
56 * Events with compatible sample types all have the same id_pos
57 * and is_pos. For convenience, put a copy on evlist.
58 */
59void perf_evlist__set_id_pos(struct perf_evlist *evlist)
60{
61 struct perf_evsel *first = perf_evlist__first(evlist);
62
63 evlist->id_pos = first->id_pos;
64 evlist->is_pos = first->is_pos;
65}
66
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020067static void perf_evlist__purge(struct perf_evlist *evlist)
68{
69 struct perf_evsel *pos, *n;
70
71 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
72 list_del_init(&pos->node);
73 perf_evsel__delete(pos);
74 }
75
76 evlist->nr_entries = 0;
77}
78
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020079void perf_evlist__exit(struct perf_evlist *evlist)
80{
81 free(evlist->mmap);
82 free(evlist->pollfd);
83 evlist->mmap = NULL;
84 evlist->pollfd = NULL;
85}
86
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020087void perf_evlist__delete(struct perf_evlist *evlist)
88{
89 perf_evlist__purge(evlist);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020090 perf_evlist__exit(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020091 free(evlist);
92}
93
94void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
95{
96 list_add_tail(&entry->node, &evlist->entries);
Adrian Hunter75562572013-08-27 11:23:09 +030097 if (!evlist->nr_entries++)
98 perf_evlist__set_id_pos(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020099}
100
Jiri Olsa0529bc12012-01-27 15:34:20 +0100101void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
102 struct list_head *list,
103 int nr_entries)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200104{
Adrian Hunter75562572013-08-27 11:23:09 +0300105 bool set_id_pos = !evlist->nr_entries;
106
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200107 list_splice_tail(list, &evlist->entries);
108 evlist->nr_entries += nr_entries;
Adrian Hunter75562572013-08-27 11:23:09 +0300109 if (set_id_pos)
110 perf_evlist__set_id_pos(evlist);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200111}
112
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300113void __perf_evlist__set_leader(struct list_head *list)
114{
115 struct perf_evsel *evsel, *leader;
116
117 leader = list_entry(list->next, struct perf_evsel, node);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900118 evsel = list_entry(list->prev, struct perf_evsel, node);
119
120 leader->nr_members = evsel->idx - leader->idx + 1;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300121
122 list_for_each_entry(evsel, list, node) {
Stephane Eranian74b21332013-01-31 13:54:37 +0100123 evsel->leader = leader;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300124 }
125}
126
127void perf_evlist__set_leader(struct perf_evlist *evlist)
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200128{
Namhyung Kim97f63e42013-01-22 18:09:29 +0900129 if (evlist->nr_entries) {
130 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300131 __perf_evlist__set_leader(&evlist->entries);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900132 }
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200133}
134
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200135int perf_evlist__add_default(struct perf_evlist *evlist)
136{
137 struct perf_event_attr attr = {
138 .type = PERF_TYPE_HARDWARE,
139 .config = PERF_COUNT_HW_CPU_CYCLES,
140 };
Joerg Roedel1aed2672012-01-04 17:54:20 +0100141 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200142
Joerg Roedel1aed2672012-01-04 17:54:20 +0100143 event_attr_init(&attr);
144
145 evsel = perf_evsel__new(&attr, 0);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200146 if (evsel == NULL)
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200147 goto error;
148
149 /* use strdup() because free(evsel) assumes name is allocated */
150 evsel->name = strdup("cycles");
151 if (!evsel->name)
152 goto error_free;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200153
154 perf_evlist__add(evlist, evsel);
155 return 0;
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200156error_free:
157 perf_evsel__delete(evsel);
158error:
159 return -ENOMEM;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200160}
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200161
Arnaldo Carvalho de Meloe60fc842012-10-03 11:50:55 -0300162static int perf_evlist__add_attrs(struct perf_evlist *evlist,
163 struct perf_event_attr *attrs, size_t nr_attrs)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200164{
165 struct perf_evsel *evsel, *n;
166 LIST_HEAD(head);
167 size_t i;
168
169 for (i = 0; i < nr_attrs; i++) {
170 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
171 if (evsel == NULL)
172 goto out_delete_partial_list;
173 list_add_tail(&evsel->node, &head);
174 }
175
176 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
177
178 return 0;
179
180out_delete_partial_list:
181 list_for_each_entry_safe(evsel, n, &head, node)
182 perf_evsel__delete(evsel);
183 return -1;
184}
185
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -0300186int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
187 struct perf_event_attr *attrs, size_t nr_attrs)
188{
189 size_t i;
190
191 for (i = 0; i < nr_attrs; i++)
192 event_attr_init(attrs + i);
193
194 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
195}
196
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -0300197struct perf_evsel *
198perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200199{
200 struct perf_evsel *evsel;
201
202 list_for_each_entry(evsel, &evlist->entries, node) {
203 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
204 (int)evsel->attr.config == id)
205 return evsel;
206 }
207
208 return NULL;
209}
210
David Aherna2f28042013-08-28 22:29:51 -0600211struct perf_evsel *
212perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
213 const char *name)
214{
215 struct perf_evsel *evsel;
216
217 list_for_each_entry(evsel, &evlist->entries, node) {
218 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
219 (strcmp(evsel->name, name) == 0))
220 return evsel;
221 }
222
223 return NULL;
224}
225
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300226int perf_evlist__add_newtp(struct perf_evlist *evlist,
227 const char *sys, const char *name, void *handler)
228{
229 struct perf_evsel *evsel;
230
231 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
232 if (evsel == NULL)
233 return -1;
234
235 evsel->handler.func = handler;
236 perf_evlist__add(evlist, evsel);
237 return 0;
238}
239
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300240void perf_evlist__disable(struct perf_evlist *evlist)
241{
242 int cpu, thread;
243 struct perf_evsel *pos;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900244 int nr_cpus = cpu_map__nr(evlist->cpus);
245 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300246
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900247 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300248 list_for_each_entry(pos, &evlist->entries, node) {
Namhyung Kim823254e2012-11-29 15:38:30 +0900249 if (!perf_evsel__is_group_leader(pos))
Jiri Olsa3fe4430d2012-11-12 18:34:03 +0100250 continue;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900251 for (thread = 0; thread < nr_threads; thread++)
Namhyung Kim55da8002012-05-31 14:51:46 +0900252 ioctl(FD(pos, cpu, thread),
253 PERF_EVENT_IOC_DISABLE, 0);
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300254 }
255 }
256}
257
David Ahern764e16a32011-08-25 10:17:55 -0600258void perf_evlist__enable(struct perf_evlist *evlist)
259{
260 int cpu, thread;
261 struct perf_evsel *pos;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900262 int nr_cpus = cpu_map__nr(evlist->cpus);
263 int nr_threads = thread_map__nr(evlist->threads);
David Ahern764e16a32011-08-25 10:17:55 -0600264
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900265 for (cpu = 0; cpu < nr_cpus; cpu++) {
David Ahern764e16a32011-08-25 10:17:55 -0600266 list_for_each_entry(pos, &evlist->entries, node) {
Namhyung Kim823254e2012-11-29 15:38:30 +0900267 if (!perf_evsel__is_group_leader(pos))
Jiri Olsa3fe4430d2012-11-12 18:34:03 +0100268 continue;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900269 for (thread = 0; thread < nr_threads; thread++)
Namhyung Kim55da8002012-05-31 14:51:46 +0900270 ioctl(FD(pos, cpu, thread),
271 PERF_EVENT_IOC_ENABLE, 0);
David Ahern764e16a32011-08-25 10:17:55 -0600272 }
273 }
274}
275
Arnaldo Carvalho de Melo806fb632011-11-29 08:05:52 -0200276static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200277{
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900278 int nr_cpus = cpu_map__nr(evlist->cpus);
279 int nr_threads = thread_map__nr(evlist->threads);
280 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200281 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
282 return evlist->pollfd != NULL ? 0 : -ENOMEM;
283}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200284
285void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
286{
287 fcntl(fd, F_SETFL, O_NONBLOCK);
288 evlist->pollfd[evlist->nr_fds].fd = fd;
289 evlist->pollfd[evlist->nr_fds].events = POLLIN;
290 evlist->nr_fds++;
291}
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200292
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300293static void perf_evlist__id_hash(struct perf_evlist *evlist,
294 struct perf_evsel *evsel,
295 int cpu, int thread, u64 id)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200296{
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300297 int hash;
298 struct perf_sample_id *sid = SID(evsel, cpu, thread);
299
300 sid->id = id;
301 sid->evsel = evsel;
302 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
303 hlist_add_head(&sid->node, &evlist->heads[hash]);
304}
305
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300306void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
307 int cpu, int thread, u64 id)
308{
309 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
310 evsel->id[evsel->ids++] = id;
311}
312
313static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
314 struct perf_evsel *evsel,
315 int cpu, int thread, int fd)
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300316{
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200317 u64 read_data[4] = { 0, };
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300318 int id_idx = 1; /* The first entry is the counter value */
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200319 u64 id;
320 int ret;
321
322 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
323 if (!ret)
324 goto add;
325
326 if (errno != ENOTTY)
327 return -1;
328
329 /* Legacy way to get event id.. All hail to old kernels! */
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200330
Jiri Olsac4861af2012-10-12 13:02:21 +0200331 /*
332 * This way does not work with group format read, so bail
333 * out in that case.
334 */
335 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
336 return -1;
337
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200338 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
339 read(fd, &read_data, sizeof(read_data)) == -1)
340 return -1;
341
342 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
343 ++id_idx;
344 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
345 ++id_idx;
346
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200347 id = read_data[id_idx];
348
349 add:
350 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200351 return 0;
352}
353
Jiri Olsa932a3592012-10-11 14:10:35 +0200354struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200355{
356 struct hlist_head *head;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200357 struct perf_sample_id *sid;
358 int hash;
359
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200360 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
361 head = &evlist->heads[hash];
362
Sasha Levinb67bfe02013-02-27 17:06:00 -0800363 hlist_for_each_entry(sid, head, node)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200364 if (sid->id == id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200365 return sid;
366
367 return NULL;
368}
369
370struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
371{
372 struct perf_sample_id *sid;
373
374 if (evlist->nr_entries == 1)
375 return perf_evlist__first(evlist);
376
377 sid = perf_evlist__id2sid(evlist, id);
378 if (sid)
379 return sid->evsel;
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900380
381 if (!perf_evlist__sample_id_all(evlist))
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300382 return perf_evlist__first(evlist);
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900383
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200384 return NULL;
385}
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200386
Adrian Hunter75562572013-08-27 11:23:09 +0300387static int perf_evlist__event2id(struct perf_evlist *evlist,
388 union perf_event *event, u64 *id)
389{
390 const u64 *array = event->sample.array;
391 ssize_t n;
392
393 n = (event->header.size - sizeof(event->header)) >> 3;
394
395 if (event->header.type == PERF_RECORD_SAMPLE) {
396 if (evlist->id_pos >= n)
397 return -1;
398 *id = array[evlist->id_pos];
399 } else {
400 if (evlist->is_pos > n)
401 return -1;
402 n -= evlist->is_pos;
403 *id = array[n];
404 }
405 return 0;
406}
407
408static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
409 union perf_event *event)
410{
411 struct hlist_head *head;
412 struct perf_sample_id *sid;
413 int hash;
414 u64 id;
415
416 if (evlist->nr_entries == 1)
417 return perf_evlist__first(evlist);
418
419 if (perf_evlist__event2id(evlist, event, &id))
420 return NULL;
421
422 /* Synthesized events have an id of zero */
423 if (!id)
424 return perf_evlist__first(evlist);
425
426 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
427 head = &evlist->heads[hash];
428
429 hlist_for_each_entry(sid, head, node) {
430 if (sid->id == id)
431 return sid->evsel;
432 }
433 return NULL;
434}
435
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300436union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200437{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300438 struct perf_mmap *md = &evlist->mmap[idx];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200439 unsigned int head = perf_mmap__read_head(md);
440 unsigned int old = md->prev;
441 unsigned char *data = md->base + page_size;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200442 union perf_event *event = NULL;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200443
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200444 if (evlist->overwrite) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200445 /*
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200446 * If we're further behind than half the buffer, there's a chance
447 * the writer will bite our tail and mess up the samples under us.
448 *
449 * If we somehow ended up ahead of the head, we got messed up.
450 *
451 * In either case, truncate and restart at head.
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200452 */
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200453 int diff = head - old;
454 if (diff > md->mask / 2 || diff < 0) {
455 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
456
457 /*
458 * head points to a known good entry, start there.
459 */
460 old = head;
461 }
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200462 }
463
464 if (old != head) {
465 size_t size;
466
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200467 event = (union perf_event *)&data[old & md->mask];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200468 size = event->header.size;
469
470 /*
471 * Event straddles the mmap boundary -- header should always
472 * be inside due to u64 alignment of output.
473 */
474 if ((old & md->mask) + size != ((old + size) & md->mask)) {
475 unsigned int offset = old;
476 unsigned int len = min(sizeof(*event), size), cpy;
David Ahern0479b8b2013-02-05 14:12:42 -0700477 void *dst = &md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200478
479 do {
480 cpy = min(md->mask + 1 - (offset & md->mask), len);
481 memcpy(dst, &data[offset & md->mask], cpy);
482 offset += cpy;
483 dst += cpy;
484 len -= cpy;
485 } while (len);
486
David Ahern0479b8b2013-02-05 14:12:42 -0700487 event = &md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200488 }
489
490 old += size;
491 }
492
493 md->prev = old;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200494
495 if (!evlist->overwrite)
496 perf_mmap__write_tail(md, old);
497
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200498 return event;
499}
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200500
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300501static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
502{
503 if (evlist->mmap[idx].base != NULL) {
504 munmap(evlist->mmap[idx].base, evlist->mmap_len);
505 evlist->mmap[idx].base = NULL;
506 }
507}
508
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200509void perf_evlist__munmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200510{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300511 int i;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200512
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300513 for (i = 0; i < evlist->nr_mmaps; i++)
514 __perf_evlist__munmap(evlist, i);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300515
516 free(evlist->mmap);
517 evlist->mmap = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200518}
519
Arnaldo Carvalho de Melo806fb632011-11-29 08:05:52 -0200520static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200521{
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -0300522 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -0700523 if (cpu_map__empty(evlist->cpus))
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900524 evlist->nr_mmaps = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300525 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200526 return evlist->mmap != NULL ? 0 : -ENOMEM;
527}
528
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300529static int __perf_evlist__mmap(struct perf_evlist *evlist,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300530 int idx, int prot, int mask, int fd)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200531{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300532 evlist->mmap[idx].prev = 0;
533 evlist->mmap[idx].mask = mask;
534 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200535 MAP_SHARED, fd, 0);
Nelson Elhage301b1952011-12-19 08:39:30 -0500536 if (evlist->mmap[idx].base == MAP_FAILED) {
537 evlist->mmap[idx].base = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200538 return -1;
Nelson Elhage301b1952011-12-19 08:39:30 -0500539 }
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200540
541 perf_evlist__add_pollfd(evlist, fd);
542 return 0;
543}
544
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300545static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
546{
547 struct perf_evsel *evsel;
548 int cpu, thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900549 int nr_cpus = cpu_map__nr(evlist->cpus);
550 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300551
Adrian Huntere3e1a542013-08-14 15:48:24 +0300552 pr_debug2("perf event ring buffer mmapped per cpu\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900553 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300554 int output = -1;
555
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900556 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300557 list_for_each_entry(evsel, &evlist->entries, node) {
558 int fd = FD(evsel, cpu, thread);
559
560 if (output == -1) {
561 output = fd;
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300562 if (__perf_evlist__mmap(evlist, cpu,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300563 prot, mask, output) < 0)
564 goto out_unmap;
565 } else {
566 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
567 goto out_unmap;
568 }
569
570 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
571 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
572 goto out_unmap;
573 }
574 }
575 }
576
577 return 0;
578
579out_unmap:
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300580 for (cpu = 0; cpu < nr_cpus; cpu++)
581 __perf_evlist__munmap(evlist, cpu);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300582 return -1;
583}
584
585static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
586{
587 struct perf_evsel *evsel;
588 int thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900589 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300590
Adrian Huntere3e1a542013-08-14 15:48:24 +0300591 pr_debug2("perf event ring buffer mmapped per thread\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900592 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300593 int output = -1;
594
595 list_for_each_entry(evsel, &evlist->entries, node) {
596 int fd = FD(evsel, 0, thread);
597
598 if (output == -1) {
599 output = fd;
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300600 if (__perf_evlist__mmap(evlist, thread,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300601 prot, mask, output) < 0)
602 goto out_unmap;
603 } else {
604 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
605 goto out_unmap;
606 }
607
608 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
609 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
610 goto out_unmap;
611 }
612 }
613
614 return 0;
615
616out_unmap:
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300617 for (thread = 0; thread < nr_threads; thread++)
618 __perf_evlist__munmap(evlist, thread);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300619 return -1;
620}
621
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200622/** perf_evlist__mmap - Create per cpu maps to receive events
623 *
624 * @evlist - list of events
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200625 * @pages - map length in pages
626 * @overwrite - overwrite older events?
627 *
628 * If overwrite is false the user needs to signal event consuption using:
629 *
630 * struct perf_mmap *m = &evlist->mmap[cpu];
631 * unsigned int head = perf_mmap__read_head(m);
632 *
633 * perf_mmap__write_tail(m, head)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200634 *
635 * Using perf_evlist__read_on_cpu does this automatically.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200636 */
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -0200637int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
638 bool overwrite)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200639{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300640 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200641 const struct cpu_map *cpus = evlist->cpus;
642 const struct thread_map *threads = evlist->threads;
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -0200643 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
644
645 /* 512 kiB: default amount of unprivileged mlocked memory */
646 if (pages == UINT_MAX)
647 pages = (512 * 1024) / page_size;
Nelson Elhage41d0d932011-12-19 08:39:32 -0500648 else if (!is_power_of_2(pages))
649 return -EINVAL;
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -0200650
651 mask = pages * page_size - 1;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200652
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200653 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200654 return -ENOMEM;
655
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200656 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200657 return -ENOMEM;
658
659 evlist->overwrite = overwrite;
660 evlist->mmap_len = (pages + 1) * page_size;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200661
662 list_for_each_entry(evsel, &evlist->entries, node) {
663 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300664 evsel->sample_id == NULL &&
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -0300665 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200666 return -ENOMEM;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200667 }
668
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -0700669 if (cpu_map__empty(cpus))
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300670 return perf_evlist__mmap_per_thread(evlist, prot, mask);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200671
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300672 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200673}
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200674
Namhyung Kimb809ac12012-04-26 14:15:19 +0900675int perf_evlist__create_maps(struct perf_evlist *evlist,
676 struct perf_target *target)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200677{
Namhyung Kimb809ac12012-04-26 14:15:19 +0900678 evlist->threads = thread_map__new_str(target->pid, target->tid,
679 target->uid);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200680
681 if (evlist->threads == NULL)
682 return -1;
683
Namhyung Kim879d77d2012-05-16 18:45:48 +0900684 if (perf_target__has_task(target))
Namhyung Kimd67356e2012-05-07 14:09:03 +0900685 evlist->cpus = cpu_map__dummy_new();
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +0900686 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
687 evlist->cpus = cpu_map__dummy_new();
Namhyung Kim879d77d2012-05-16 18:45:48 +0900688 else
689 evlist->cpus = cpu_map__new(target->cpu_list);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200690
691 if (evlist->cpus == NULL)
692 goto out_delete_threads;
693
694 return 0;
695
696out_delete_threads:
697 thread_map__delete(evlist->threads);
698 return -1;
699}
700
701void perf_evlist__delete_maps(struct perf_evlist *evlist)
702{
703 cpu_map__delete(evlist->cpus);
704 thread_map__delete(evlist->threads);
705 evlist->cpus = NULL;
706 evlist->threads = NULL;
707}
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100708
Arnaldo Carvalho de Melo1491a632012-09-26 14:43:13 -0300709int perf_evlist__apply_filters(struct perf_evlist *evlist)
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100710{
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100711 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300712 int err = 0;
713 const int ncpus = cpu_map__nr(evlist->cpus),
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900714 nthreads = thread_map__nr(evlist->threads);
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100715
716 list_for_each_entry(evsel, &evlist->entries, node) {
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300717 if (evsel->filter == NULL)
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100718 continue;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300719
720 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
721 if (err)
722 break;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100723 }
724
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300725 return err;
726}
727
728int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
729{
730 struct perf_evsel *evsel;
731 int err = 0;
732 const int ncpus = cpu_map__nr(evlist->cpus),
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900733 nthreads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300734
735 list_for_each_entry(evsel, &evlist->entries, node) {
736 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
737 if (err)
738 break;
739 }
740
741 return err;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100742}
Frederic Weisbecker74429962011-05-21 17:49:00 +0200743
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300744bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +0200745{
Adrian Hunter75562572013-08-27 11:23:09 +0300746 struct perf_evsel *pos;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300747
Adrian Hunter75562572013-08-27 11:23:09 +0300748 if (evlist->nr_entries == 1)
749 return true;
750
751 if (evlist->id_pos < 0 || evlist->is_pos < 0)
752 return false;
753
754 list_for_each_entry(pos, &evlist->entries, node) {
755 if (pos->id_pos != evlist->id_pos ||
756 pos->is_pos != evlist->is_pos)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300757 return false;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200758 }
759
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300760 return true;
761}
762
Adrian Hunter75562572013-08-27 11:23:09 +0300763u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300764{
Adrian Hunter75562572013-08-27 11:23:09 +0300765 struct perf_evsel *evsel;
766
767 if (evlist->combined_sample_type)
768 return evlist->combined_sample_type;
769
770 list_for_each_entry(evsel, &evlist->entries, node)
771 evlist->combined_sample_type |= evsel->attr.sample_type;
772
773 return evlist->combined_sample_type;
774}
775
776u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
777{
778 evlist->combined_sample_type = 0;
779 return __perf_evlist__combined_sample_type(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300780}
781
Jiri Olsa9ede4732012-10-10 17:38:13 +0200782bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
783{
784 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
785 u64 read_format = first->attr.read_format;
786 u64 sample_type = first->attr.sample_type;
787
788 list_for_each_entry_continue(pos, &evlist->entries, node) {
789 if (read_format != pos->attr.read_format)
790 return false;
791 }
792
793 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
794 if ((sample_type & PERF_SAMPLE_READ) &&
795 !(read_format & PERF_FORMAT_ID)) {
796 return false;
797 }
798
799 return true;
800}
801
802u64 perf_evlist__read_format(struct perf_evlist *evlist)
803{
804 struct perf_evsel *first = perf_evlist__first(evlist);
805 return first->attr.read_format;
806}
807
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300808u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -0200809{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300810 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -0200811 struct perf_sample *data;
812 u64 sample_type;
813 u16 size = 0;
814
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -0200815 if (!first->attr.sample_id_all)
816 goto out;
817
818 sample_type = first->attr.sample_type;
819
820 if (sample_type & PERF_SAMPLE_TID)
821 size += sizeof(data->tid) * 2;
822
823 if (sample_type & PERF_SAMPLE_TIME)
824 size += sizeof(data->time);
825
826 if (sample_type & PERF_SAMPLE_ID)
827 size += sizeof(data->id);
828
829 if (sample_type & PERF_SAMPLE_STREAM_ID)
830 size += sizeof(data->stream_id);
831
832 if (sample_type & PERF_SAMPLE_CPU)
833 size += sizeof(data->cpu) * 2;
Adrian Hunter75562572013-08-27 11:23:09 +0300834
835 if (sample_type & PERF_SAMPLE_IDENTIFIER)
836 size += sizeof(data->id);
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -0200837out:
838 return size;
839}
840
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300841bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300842{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300843 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300844
845 list_for_each_entry_continue(pos, &evlist->entries, node) {
846 if (first->attr.sample_id_all != pos->attr.sample_id_all)
847 return false;
848 }
849
850 return true;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200851}
852
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300853bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +0200854{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300855 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300856 return first->attr.sample_id_all;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200857}
Arnaldo Carvalho de Melo81cce8d2011-10-05 19:11:32 -0300858
859void perf_evlist__set_selected(struct perf_evlist *evlist,
860 struct perf_evsel *evsel)
861{
862 evlist->selected = evsel;
863}
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200864
Namhyung Kima74b4b62013-03-15 14:48:48 +0900865void perf_evlist__close(struct perf_evlist *evlist)
866{
867 struct perf_evsel *evsel;
868 int ncpus = cpu_map__nr(evlist->cpus);
869 int nthreads = thread_map__nr(evlist->threads);
870
871 list_for_each_entry_reverse(evsel, &evlist->entries, node)
872 perf_evsel__close(evsel, ncpus, nthreads);
873}
874
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200875int perf_evlist__open(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200876{
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200877 struct perf_evsel *evsel;
Namhyung Kima74b4b62013-03-15 14:48:48 +0900878 int err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200879
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200880 list_for_each_entry(evsel, &evlist->entries, node) {
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200881 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200882 if (err < 0)
883 goto out_err;
884 }
885
886 return 0;
887out_err:
Namhyung Kima74b4b62013-03-15 14:48:48 +0900888 perf_evlist__close(evlist);
Namhyung Kim41c21a62012-02-23 12:13:36 +0900889 errno = -err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200890 return err;
891}
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200892
893int perf_evlist__prepare_workload(struct perf_evlist *evlist,
Namhyung Kim6ef73ec2013-03-11 16:43:15 +0900894 struct perf_target *target,
Namhyung Kim55e162e2013-03-11 16:43:17 +0900895 const char *argv[], bool pipe_output,
896 bool want_signal)
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200897{
898 int child_ready_pipe[2], go_pipe[2];
899 char bf;
900
901 if (pipe(child_ready_pipe) < 0) {
902 perror("failed to create 'ready' pipe");
903 return -1;
904 }
905
906 if (pipe(go_pipe) < 0) {
907 perror("failed to create 'go' pipe");
908 goto out_close_ready_pipe;
909 }
910
911 evlist->workload.pid = fork();
912 if (evlist->workload.pid < 0) {
913 perror("failed to fork");
914 goto out_close_pipes;
915 }
916
917 if (!evlist->workload.pid) {
Namhyung Kim119fa3c2013-03-11 16:43:16 +0900918 if (pipe_output)
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200919 dup2(2, 1);
920
David Ahern0817df02013-05-25 17:50:39 -0600921 signal(SIGTERM, SIG_DFL);
922
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200923 close(child_ready_pipe[0]);
924 close(go_pipe[1]);
925 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
926
927 /*
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200928 * Tell the parent we're ready to go
929 */
930 close(child_ready_pipe[1]);
931
932 /*
933 * Wait until the parent tells us to go.
934 */
935 if (read(go_pipe[0], &bf, 1) == -1)
936 perror("unable to read pipe");
937
938 execvp(argv[0], (char **)argv);
939
940 perror(argv[0]);
Namhyung Kim55e162e2013-03-11 16:43:17 +0900941 if (want_signal)
942 kill(getppid(), SIGUSR1);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200943 exit(-1);
944 }
945
Namhyung Kim6ef73ec2013-03-11 16:43:15 +0900946 if (perf_target__none(target))
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200947 evlist->threads->map[0] = evlist->workload.pid;
948
949 close(child_ready_pipe[1]);
950 close(go_pipe[0]);
951 /*
952 * wait for child to settle
953 */
954 if (read(child_ready_pipe[0], &bf, 1) == -1) {
955 perror("unable to read pipe");
956 goto out_close_pipes;
957 }
958
Namhyung Kimbcf31452013-06-26 16:14:15 +0900959 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200960 evlist->workload.cork_fd = go_pipe[1];
961 close(child_ready_pipe[0]);
962 return 0;
963
964out_close_pipes:
965 close(go_pipe[0]);
966 close(go_pipe[1]);
967out_close_ready_pipe:
968 close(child_ready_pipe[0]);
969 close(child_ready_pipe[1]);
970 return -1;
971}
972
973int perf_evlist__start_workload(struct perf_evlist *evlist)
974{
975 if (evlist->workload.cork_fd > 0) {
David Ahernb3824402013-07-02 13:27:21 -0600976 char bf = 0;
Namhyung Kimbcf31452013-06-26 16:14:15 +0900977 int ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200978 /*
979 * Remove the cork, let it rip!
980 */
Namhyung Kimbcf31452013-06-26 16:14:15 +0900981 ret = write(evlist->workload.cork_fd, &bf, 1);
982 if (ret < 0)
983 perror("enable to write to pipe");
984
985 close(evlist->workload.cork_fd);
986 return ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200987 }
988
989 return 0;
990}
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -0300991
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300992int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -0300993 struct perf_sample *sample)
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -0300994{
Adrian Hunter75562572013-08-27 11:23:09 +0300995 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
996
997 if (!evsel)
998 return -EFAULT;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -0300999 return perf_evsel__parse_sample(evsel, event, sample);
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001000}
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001001
1002size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1003{
1004 struct perf_evsel *evsel;
1005 size_t printed = 0;
1006
1007 list_for_each_entry(evsel, &evlist->entries, node) {
1008 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1009 perf_evsel__name(evsel));
1010 }
1011
1012 return printed + fprintf(fp, "\n");;
1013}