blob: 7101283ac3c54de1a24b1f175670d03e483b4364 [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
Arnaldo Carvalho de Meloa8c9ae12011-11-05 08:41:51 -02009#include "util.h"
Borislav Petkov85c66be2013-02-20 16:32:30 +010010#include <lk/debugfs.h>
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -020011#include <poll.h>
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020012#include "cpumap.h"
13#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090014#include "target.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020015#include "evlist.h"
16#include "evsel.h"
Adrian Huntere3e1a542013-08-14 15:48:24 +030017#include "debug.h"
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020018#include <unistd.h>
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020019
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020020#include "parse-events.h"
21
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020022#include <sys/mman.h>
23
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020024#include <linux/bitops.h>
25#include <linux/hash.h>
26
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020027#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030028#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020029
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020030void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
31 struct thread_map *threads)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020032{
33 int i;
34
35 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
36 INIT_HLIST_HEAD(&evlist->heads[i]);
37 INIT_LIST_HEAD(&evlist->entries);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020038 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020039 evlist->workload.pid = -1;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020040}
41
Namhyung Kim334fe7a2013-03-11 16:43:12 +090042struct perf_evlist *perf_evlist__new(void)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020043{
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020046 if (evlist != NULL)
Namhyung Kim334fe7a2013-03-11 16:43:12 +090047 perf_evlist__init(evlist, NULL, NULL);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020048
49 return evlist;
50}
51
Adrian Hunter75562572013-08-27 11:23:09 +030052/**
53 * perf_evlist__set_id_pos - set the positions of event ids.
54 * @evlist: selected event list
55 *
56 * Events with compatible sample types all have the same id_pos
57 * and is_pos. For convenience, put a copy on evlist.
58 */
59void perf_evlist__set_id_pos(struct perf_evlist *evlist)
60{
61 struct perf_evsel *first = perf_evlist__first(evlist);
62
63 evlist->id_pos = first->id_pos;
64 evlist->is_pos = first->is_pos;
65}
66
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020067static void perf_evlist__purge(struct perf_evlist *evlist)
68{
69 struct perf_evsel *pos, *n;
70
71 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
72 list_del_init(&pos->node);
73 perf_evsel__delete(pos);
74 }
75
76 evlist->nr_entries = 0;
77}
78
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020079void perf_evlist__exit(struct perf_evlist *evlist)
80{
81 free(evlist->mmap);
82 free(evlist->pollfd);
83 evlist->mmap = NULL;
84 evlist->pollfd = NULL;
85}
86
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020087void perf_evlist__delete(struct perf_evlist *evlist)
88{
89 perf_evlist__purge(evlist);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020090 perf_evlist__exit(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020091 free(evlist);
92}
93
94void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
95{
96 list_add_tail(&entry->node, &evlist->entries);
Adrian Hunter75562572013-08-27 11:23:09 +030097 if (!evlist->nr_entries++)
98 perf_evlist__set_id_pos(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020099}
100
Jiri Olsa0529bc12012-01-27 15:34:20 +0100101void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
102 struct list_head *list,
103 int nr_entries)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200104{
Adrian Hunter75562572013-08-27 11:23:09 +0300105 bool set_id_pos = !evlist->nr_entries;
106
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200107 list_splice_tail(list, &evlist->entries);
108 evlist->nr_entries += nr_entries;
Adrian Hunter75562572013-08-27 11:23:09 +0300109 if (set_id_pos)
110 perf_evlist__set_id_pos(evlist);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200111}
112
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300113void __perf_evlist__set_leader(struct list_head *list)
114{
115 struct perf_evsel *evsel, *leader;
116
117 leader = list_entry(list->next, struct perf_evsel, node);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900118 evsel = list_entry(list->prev, struct perf_evsel, node);
119
120 leader->nr_members = evsel->idx - leader->idx + 1;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300121
122 list_for_each_entry(evsel, list, node) {
Stephane Eranian74b21332013-01-31 13:54:37 +0100123 evsel->leader = leader;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300124 }
125}
126
127void perf_evlist__set_leader(struct perf_evlist *evlist)
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200128{
Namhyung Kim97f63e42013-01-22 18:09:29 +0900129 if (evlist->nr_entries) {
130 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300131 __perf_evlist__set_leader(&evlist->entries);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900132 }
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200133}
134
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200135int perf_evlist__add_default(struct perf_evlist *evlist)
136{
137 struct perf_event_attr attr = {
138 .type = PERF_TYPE_HARDWARE,
139 .config = PERF_COUNT_HW_CPU_CYCLES,
140 };
Joerg Roedel1aed2672012-01-04 17:54:20 +0100141 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200142
Joerg Roedel1aed2672012-01-04 17:54:20 +0100143 event_attr_init(&attr);
144
145 evsel = perf_evsel__new(&attr, 0);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200146 if (evsel == NULL)
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200147 goto error;
148
149 /* use strdup() because free(evsel) assumes name is allocated */
150 evsel->name = strdup("cycles");
151 if (!evsel->name)
152 goto error_free;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200153
154 perf_evlist__add(evlist, evsel);
155 return 0;
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200156error_free:
157 perf_evsel__delete(evsel);
158error:
159 return -ENOMEM;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200160}
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200161
Arnaldo Carvalho de Meloe60fc842012-10-03 11:50:55 -0300162static int perf_evlist__add_attrs(struct perf_evlist *evlist,
163 struct perf_event_attr *attrs, size_t nr_attrs)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200164{
165 struct perf_evsel *evsel, *n;
166 LIST_HEAD(head);
167 size_t i;
168
169 for (i = 0; i < nr_attrs; i++) {
170 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
171 if (evsel == NULL)
172 goto out_delete_partial_list;
173 list_add_tail(&evsel->node, &head);
174 }
175
176 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
177
178 return 0;
179
180out_delete_partial_list:
181 list_for_each_entry_safe(evsel, n, &head, node)
182 perf_evsel__delete(evsel);
183 return -1;
184}
185
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -0300186int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
187 struct perf_event_attr *attrs, size_t nr_attrs)
188{
189 size_t i;
190
191 for (i = 0; i < nr_attrs; i++)
192 event_attr_init(attrs + i);
193
194 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
195}
196
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -0300197struct perf_evsel *
198perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200199{
200 struct perf_evsel *evsel;
201
202 list_for_each_entry(evsel, &evlist->entries, node) {
203 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
204 (int)evsel->attr.config == id)
205 return evsel;
206 }
207
208 return NULL;
209}
210
David Aherna2f28042013-08-28 22:29:51 -0600211struct perf_evsel *
212perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
213 const char *name)
214{
215 struct perf_evsel *evsel;
216
217 list_for_each_entry(evsel, &evlist->entries, node) {
218 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
219 (strcmp(evsel->name, name) == 0))
220 return evsel;
221 }
222
223 return NULL;
224}
225
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300226int perf_evlist__add_newtp(struct perf_evlist *evlist,
227 const char *sys, const char *name, void *handler)
228{
229 struct perf_evsel *evsel;
230
231 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
232 if (evsel == NULL)
233 return -1;
234
235 evsel->handler.func = handler;
236 perf_evlist__add(evlist, evsel);
237 return 0;
238}
239
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300240void perf_evlist__disable(struct perf_evlist *evlist)
241{
242 int cpu, thread;
243 struct perf_evsel *pos;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900244 int nr_cpus = cpu_map__nr(evlist->cpus);
245 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300246
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900247 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300248 list_for_each_entry(pos, &evlist->entries, node) {
Adrian Hunter395c3072013-08-31 21:50:53 +0300249 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
Jiri Olsa3fe4430d2012-11-12 18:34:03 +0100250 continue;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900251 for (thread = 0; thread < nr_threads; thread++)
Namhyung Kim55da8002012-05-31 14:51:46 +0900252 ioctl(FD(pos, cpu, thread),
253 PERF_EVENT_IOC_DISABLE, 0);
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300254 }
255 }
256}
257
David Ahern764e16a32011-08-25 10:17:55 -0600258void perf_evlist__enable(struct perf_evlist *evlist)
259{
260 int cpu, thread;
261 struct perf_evsel *pos;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900262 int nr_cpus = cpu_map__nr(evlist->cpus);
263 int nr_threads = thread_map__nr(evlist->threads);
David Ahern764e16a32011-08-25 10:17:55 -0600264
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900265 for (cpu = 0; cpu < nr_cpus; cpu++) {
David Ahern764e16a32011-08-25 10:17:55 -0600266 list_for_each_entry(pos, &evlist->entries, node) {
Adrian Hunter395c3072013-08-31 21:50:53 +0300267 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
Jiri Olsa3fe4430d2012-11-12 18:34:03 +0100268 continue;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900269 for (thread = 0; thread < nr_threads; thread++)
Namhyung Kim55da8002012-05-31 14:51:46 +0900270 ioctl(FD(pos, cpu, thread),
271 PERF_EVENT_IOC_ENABLE, 0);
David Ahern764e16a32011-08-25 10:17:55 -0600272 }
273 }
274}
275
Adrian Hunter395c3072013-08-31 21:50:53 +0300276int perf_evlist__disable_event(struct perf_evlist *evlist,
277 struct perf_evsel *evsel)
278{
279 int cpu, thread, err;
280
281 if (!evsel->fd)
282 return 0;
283
284 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
285 for (thread = 0; thread < evlist->threads->nr; thread++) {
286 err = ioctl(FD(evsel, cpu, thread),
287 PERF_EVENT_IOC_DISABLE, 0);
288 if (err)
289 return err;
290 }
291 }
292 return 0;
293}
294
295int perf_evlist__enable_event(struct perf_evlist *evlist,
296 struct perf_evsel *evsel)
297{
298 int cpu, thread, err;
299
300 if (!evsel->fd)
301 return -EINVAL;
302
303 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
304 for (thread = 0; thread < evlist->threads->nr; thread++) {
305 err = ioctl(FD(evsel, cpu, thread),
306 PERF_EVENT_IOC_ENABLE, 0);
307 if (err)
308 return err;
309 }
310 }
311 return 0;
312}
313
Arnaldo Carvalho de Melo806fb632011-11-29 08:05:52 -0200314static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200315{
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900316 int nr_cpus = cpu_map__nr(evlist->cpus);
317 int nr_threads = thread_map__nr(evlist->threads);
318 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200319 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
320 return evlist->pollfd != NULL ? 0 : -ENOMEM;
321}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200322
323void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
324{
325 fcntl(fd, F_SETFL, O_NONBLOCK);
326 evlist->pollfd[evlist->nr_fds].fd = fd;
327 evlist->pollfd[evlist->nr_fds].events = POLLIN;
328 evlist->nr_fds++;
329}
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200330
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300331static void perf_evlist__id_hash(struct perf_evlist *evlist,
332 struct perf_evsel *evsel,
333 int cpu, int thread, u64 id)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200334{
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300335 int hash;
336 struct perf_sample_id *sid = SID(evsel, cpu, thread);
337
338 sid->id = id;
339 sid->evsel = evsel;
340 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
341 hlist_add_head(&sid->node, &evlist->heads[hash]);
342}
343
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300344void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
345 int cpu, int thread, u64 id)
346{
347 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
348 evsel->id[evsel->ids++] = id;
349}
350
351static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
352 struct perf_evsel *evsel,
353 int cpu, int thread, int fd)
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300354{
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200355 u64 read_data[4] = { 0, };
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300356 int id_idx = 1; /* The first entry is the counter value */
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200357 u64 id;
358 int ret;
359
360 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
361 if (!ret)
362 goto add;
363
364 if (errno != ENOTTY)
365 return -1;
366
367 /* Legacy way to get event id.. All hail to old kernels! */
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200368
Jiri Olsac4861af2012-10-12 13:02:21 +0200369 /*
370 * This way does not work with group format read, so bail
371 * out in that case.
372 */
373 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
374 return -1;
375
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200376 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
377 read(fd, &read_data, sizeof(read_data)) == -1)
378 return -1;
379
380 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
381 ++id_idx;
382 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
383 ++id_idx;
384
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200385 id = read_data[id_idx];
386
387 add:
388 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200389 return 0;
390}
391
Jiri Olsa932a3592012-10-11 14:10:35 +0200392struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200393{
394 struct hlist_head *head;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200395 struct perf_sample_id *sid;
396 int hash;
397
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200398 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
399 head = &evlist->heads[hash];
400
Sasha Levinb67bfe02013-02-27 17:06:00 -0800401 hlist_for_each_entry(sid, head, node)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200402 if (sid->id == id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200403 return sid;
404
405 return NULL;
406}
407
408struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
409{
410 struct perf_sample_id *sid;
411
412 if (evlist->nr_entries == 1)
413 return perf_evlist__first(evlist);
414
415 sid = perf_evlist__id2sid(evlist, id);
416 if (sid)
417 return sid->evsel;
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900418
419 if (!perf_evlist__sample_id_all(evlist))
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300420 return perf_evlist__first(evlist);
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900421
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200422 return NULL;
423}
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200424
Adrian Hunter75562572013-08-27 11:23:09 +0300425static int perf_evlist__event2id(struct perf_evlist *evlist,
426 union perf_event *event, u64 *id)
427{
428 const u64 *array = event->sample.array;
429 ssize_t n;
430
431 n = (event->header.size - sizeof(event->header)) >> 3;
432
433 if (event->header.type == PERF_RECORD_SAMPLE) {
434 if (evlist->id_pos >= n)
435 return -1;
436 *id = array[evlist->id_pos];
437 } else {
438 if (evlist->is_pos > n)
439 return -1;
440 n -= evlist->is_pos;
441 *id = array[n];
442 }
443 return 0;
444}
445
446static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
447 union perf_event *event)
448{
Adrian Hunter98be6962013-09-04 23:18:17 +0300449 struct perf_evsel *first = perf_evlist__first(evlist);
Adrian Hunter75562572013-08-27 11:23:09 +0300450 struct hlist_head *head;
451 struct perf_sample_id *sid;
452 int hash;
453 u64 id;
454
455 if (evlist->nr_entries == 1)
Adrian Hunter98be6962013-09-04 23:18:17 +0300456 return first;
457
458 if (!first->attr.sample_id_all &&
459 event->header.type != PERF_RECORD_SAMPLE)
460 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300461
462 if (perf_evlist__event2id(evlist, event, &id))
463 return NULL;
464
465 /* Synthesized events have an id of zero */
466 if (!id)
Adrian Hunter98be6962013-09-04 23:18:17 +0300467 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300468
469 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
470 head = &evlist->heads[hash];
471
472 hlist_for_each_entry(sid, head, node) {
473 if (sid->id == id)
474 return sid->evsel;
475 }
476 return NULL;
477}
478
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300479union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200480{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300481 struct perf_mmap *md = &evlist->mmap[idx];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200482 unsigned int head = perf_mmap__read_head(md);
483 unsigned int old = md->prev;
484 unsigned char *data = md->base + page_size;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200485 union perf_event *event = NULL;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200486
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200487 if (evlist->overwrite) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200488 /*
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200489 * If we're further behind than half the buffer, there's a chance
490 * the writer will bite our tail and mess up the samples under us.
491 *
492 * If we somehow ended up ahead of the head, we got messed up.
493 *
494 * In either case, truncate and restart at head.
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200495 */
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200496 int diff = head - old;
497 if (diff > md->mask / 2 || diff < 0) {
498 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
499
500 /*
501 * head points to a known good entry, start there.
502 */
503 old = head;
504 }
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200505 }
506
507 if (old != head) {
508 size_t size;
509
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200510 event = (union perf_event *)&data[old & md->mask];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200511 size = event->header.size;
512
513 /*
514 * Event straddles the mmap boundary -- header should always
515 * be inside due to u64 alignment of output.
516 */
517 if ((old & md->mask) + size != ((old + size) & md->mask)) {
518 unsigned int offset = old;
519 unsigned int len = min(sizeof(*event), size), cpy;
David Ahern0479b8b2013-02-05 14:12:42 -0700520 void *dst = &md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200521
522 do {
523 cpy = min(md->mask + 1 - (offset & md->mask), len);
524 memcpy(dst, &data[offset & md->mask], cpy);
525 offset += cpy;
526 dst += cpy;
527 len -= cpy;
528 } while (len);
529
David Ahern0479b8b2013-02-05 14:12:42 -0700530 event = &md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200531 }
532
533 old += size;
534 }
535
536 md->prev = old;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200537
538 if (!evlist->overwrite)
539 perf_mmap__write_tail(md, old);
540
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200541 return event;
542}
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200543
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300544static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
545{
546 if (evlist->mmap[idx].base != NULL) {
547 munmap(evlist->mmap[idx].base, evlist->mmap_len);
548 evlist->mmap[idx].base = NULL;
549 }
550}
551
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200552void perf_evlist__munmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200553{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300554 int i;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200555
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300556 for (i = 0; i < evlist->nr_mmaps; i++)
557 __perf_evlist__munmap(evlist, i);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300558
559 free(evlist->mmap);
560 evlist->mmap = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200561}
562
Arnaldo Carvalho de Melo806fb632011-11-29 08:05:52 -0200563static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200564{
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -0300565 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -0700566 if (cpu_map__empty(evlist->cpus))
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900567 evlist->nr_mmaps = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300568 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200569 return evlist->mmap != NULL ? 0 : -ENOMEM;
570}
571
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300572static int __perf_evlist__mmap(struct perf_evlist *evlist,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300573 int idx, int prot, int mask, int fd)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200574{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300575 evlist->mmap[idx].prev = 0;
576 evlist->mmap[idx].mask = mask;
577 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200578 MAP_SHARED, fd, 0);
Nelson Elhage301b1952011-12-19 08:39:30 -0500579 if (evlist->mmap[idx].base == MAP_FAILED) {
580 evlist->mmap[idx].base = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200581 return -1;
Nelson Elhage301b1952011-12-19 08:39:30 -0500582 }
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200583
584 perf_evlist__add_pollfd(evlist, fd);
585 return 0;
586}
587
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300588static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
589{
590 struct perf_evsel *evsel;
591 int cpu, thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900592 int nr_cpus = cpu_map__nr(evlist->cpus);
593 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300594
Adrian Huntere3e1a542013-08-14 15:48:24 +0300595 pr_debug2("perf event ring buffer mmapped per cpu\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900596 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300597 int output = -1;
598
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900599 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300600 list_for_each_entry(evsel, &evlist->entries, node) {
601 int fd = FD(evsel, cpu, thread);
602
603 if (output == -1) {
604 output = fd;
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300605 if (__perf_evlist__mmap(evlist, cpu,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300606 prot, mask, output) < 0)
607 goto out_unmap;
608 } else {
609 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
610 goto out_unmap;
611 }
612
613 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
614 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
615 goto out_unmap;
616 }
617 }
618 }
619
620 return 0;
621
622out_unmap:
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300623 for (cpu = 0; cpu < nr_cpus; cpu++)
624 __perf_evlist__munmap(evlist, cpu);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300625 return -1;
626}
627
628static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
629{
630 struct perf_evsel *evsel;
631 int thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900632 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300633
Adrian Huntere3e1a542013-08-14 15:48:24 +0300634 pr_debug2("perf event ring buffer mmapped per thread\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900635 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300636 int output = -1;
637
638 list_for_each_entry(evsel, &evlist->entries, node) {
639 int fd = FD(evsel, 0, thread);
640
641 if (output == -1) {
642 output = fd;
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300643 if (__perf_evlist__mmap(evlist, thread,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300644 prot, mask, output) < 0)
645 goto out_unmap;
646 } else {
647 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
648 goto out_unmap;
649 }
650
651 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
652 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
653 goto out_unmap;
654 }
655 }
656
657 return 0;
658
659out_unmap:
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300660 for (thread = 0; thread < nr_threads; thread++)
661 __perf_evlist__munmap(evlist, thread);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300662 return -1;
663}
664
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200665/** perf_evlist__mmap - Create per cpu maps to receive events
666 *
667 * @evlist - list of events
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200668 * @pages - map length in pages
669 * @overwrite - overwrite older events?
670 *
671 * If overwrite is false the user needs to signal event consuption using:
672 *
673 * struct perf_mmap *m = &evlist->mmap[cpu];
674 * unsigned int head = perf_mmap__read_head(m);
675 *
676 * perf_mmap__write_tail(m, head)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200677 *
678 * Using perf_evlist__read_on_cpu does this automatically.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200679 */
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -0200680int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
681 bool overwrite)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200682{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300683 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200684 const struct cpu_map *cpus = evlist->cpus;
685 const struct thread_map *threads = evlist->threads;
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -0200686 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
687
688 /* 512 kiB: default amount of unprivileged mlocked memory */
689 if (pages == UINT_MAX)
690 pages = (512 * 1024) / page_size;
Nelson Elhage41d0d932011-12-19 08:39:32 -0500691 else if (!is_power_of_2(pages))
692 return -EINVAL;
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -0200693
694 mask = pages * page_size - 1;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200695
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200696 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200697 return -ENOMEM;
698
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200699 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200700 return -ENOMEM;
701
702 evlist->overwrite = overwrite;
703 evlist->mmap_len = (pages + 1) * page_size;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200704
705 list_for_each_entry(evsel, &evlist->entries, node) {
706 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300707 evsel->sample_id == NULL &&
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -0300708 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200709 return -ENOMEM;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200710 }
711
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -0700712 if (cpu_map__empty(cpus))
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300713 return perf_evlist__mmap_per_thread(evlist, prot, mask);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200714
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300715 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200716}
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200717
Namhyung Kimb809ac12012-04-26 14:15:19 +0900718int perf_evlist__create_maps(struct perf_evlist *evlist,
719 struct perf_target *target)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200720{
Namhyung Kimb809ac12012-04-26 14:15:19 +0900721 evlist->threads = thread_map__new_str(target->pid, target->tid,
722 target->uid);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200723
724 if (evlist->threads == NULL)
725 return -1;
726
Namhyung Kim879d77d2012-05-16 18:45:48 +0900727 if (perf_target__has_task(target))
Namhyung Kimd67356e2012-05-07 14:09:03 +0900728 evlist->cpus = cpu_map__dummy_new();
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +0900729 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
730 evlist->cpus = cpu_map__dummy_new();
Namhyung Kim879d77d2012-05-16 18:45:48 +0900731 else
732 evlist->cpus = cpu_map__new(target->cpu_list);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200733
734 if (evlist->cpus == NULL)
735 goto out_delete_threads;
736
737 return 0;
738
739out_delete_threads:
740 thread_map__delete(evlist->threads);
741 return -1;
742}
743
744void perf_evlist__delete_maps(struct perf_evlist *evlist)
745{
746 cpu_map__delete(evlist->cpus);
747 thread_map__delete(evlist->threads);
748 evlist->cpus = NULL;
749 evlist->threads = NULL;
750}
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100751
Arnaldo Carvalho de Melo1491a632012-09-26 14:43:13 -0300752int perf_evlist__apply_filters(struct perf_evlist *evlist)
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100753{
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100754 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300755 int err = 0;
756 const int ncpus = cpu_map__nr(evlist->cpus),
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900757 nthreads = thread_map__nr(evlist->threads);
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100758
759 list_for_each_entry(evsel, &evlist->entries, node) {
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300760 if (evsel->filter == NULL)
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100761 continue;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300762
763 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
764 if (err)
765 break;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100766 }
767
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300768 return err;
769}
770
771int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
772{
773 struct perf_evsel *evsel;
774 int err = 0;
775 const int ncpus = cpu_map__nr(evlist->cpus),
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900776 nthreads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300777
778 list_for_each_entry(evsel, &evlist->entries, node) {
779 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
780 if (err)
781 break;
782 }
783
784 return err;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100785}
Frederic Weisbecker74429962011-05-21 17:49:00 +0200786
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300787bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +0200788{
Adrian Hunter75562572013-08-27 11:23:09 +0300789 struct perf_evsel *pos;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300790
Adrian Hunter75562572013-08-27 11:23:09 +0300791 if (evlist->nr_entries == 1)
792 return true;
793
794 if (evlist->id_pos < 0 || evlist->is_pos < 0)
795 return false;
796
797 list_for_each_entry(pos, &evlist->entries, node) {
798 if (pos->id_pos != evlist->id_pos ||
799 pos->is_pos != evlist->is_pos)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300800 return false;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200801 }
802
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300803 return true;
804}
805
Adrian Hunter75562572013-08-27 11:23:09 +0300806u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300807{
Adrian Hunter75562572013-08-27 11:23:09 +0300808 struct perf_evsel *evsel;
809
810 if (evlist->combined_sample_type)
811 return evlist->combined_sample_type;
812
813 list_for_each_entry(evsel, &evlist->entries, node)
814 evlist->combined_sample_type |= evsel->attr.sample_type;
815
816 return evlist->combined_sample_type;
817}
818
819u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
820{
821 evlist->combined_sample_type = 0;
822 return __perf_evlist__combined_sample_type(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300823}
824
Jiri Olsa9ede4732012-10-10 17:38:13 +0200825bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
826{
827 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
828 u64 read_format = first->attr.read_format;
829 u64 sample_type = first->attr.sample_type;
830
831 list_for_each_entry_continue(pos, &evlist->entries, node) {
832 if (read_format != pos->attr.read_format)
833 return false;
834 }
835
836 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
837 if ((sample_type & PERF_SAMPLE_READ) &&
838 !(read_format & PERF_FORMAT_ID)) {
839 return false;
840 }
841
842 return true;
843}
844
845u64 perf_evlist__read_format(struct perf_evlist *evlist)
846{
847 struct perf_evsel *first = perf_evlist__first(evlist);
848 return first->attr.read_format;
849}
850
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300851u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -0200852{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300853 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -0200854 struct perf_sample *data;
855 u64 sample_type;
856 u16 size = 0;
857
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -0200858 if (!first->attr.sample_id_all)
859 goto out;
860
861 sample_type = first->attr.sample_type;
862
863 if (sample_type & PERF_SAMPLE_TID)
864 size += sizeof(data->tid) * 2;
865
866 if (sample_type & PERF_SAMPLE_TIME)
867 size += sizeof(data->time);
868
869 if (sample_type & PERF_SAMPLE_ID)
870 size += sizeof(data->id);
871
872 if (sample_type & PERF_SAMPLE_STREAM_ID)
873 size += sizeof(data->stream_id);
874
875 if (sample_type & PERF_SAMPLE_CPU)
876 size += sizeof(data->cpu) * 2;
Adrian Hunter75562572013-08-27 11:23:09 +0300877
878 if (sample_type & PERF_SAMPLE_IDENTIFIER)
879 size += sizeof(data->id);
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -0200880out:
881 return size;
882}
883
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300884bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300885{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300886 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300887
888 list_for_each_entry_continue(pos, &evlist->entries, node) {
889 if (first->attr.sample_id_all != pos->attr.sample_id_all)
890 return false;
891 }
892
893 return true;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200894}
895
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300896bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +0200897{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300898 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300899 return first->attr.sample_id_all;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200900}
Arnaldo Carvalho de Melo81cce8d2011-10-05 19:11:32 -0300901
902void perf_evlist__set_selected(struct perf_evlist *evlist,
903 struct perf_evsel *evsel)
904{
905 evlist->selected = evsel;
906}
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200907
Namhyung Kima74b4b62013-03-15 14:48:48 +0900908void perf_evlist__close(struct perf_evlist *evlist)
909{
910 struct perf_evsel *evsel;
911 int ncpus = cpu_map__nr(evlist->cpus);
912 int nthreads = thread_map__nr(evlist->threads);
913
914 list_for_each_entry_reverse(evsel, &evlist->entries, node)
915 perf_evsel__close(evsel, ncpus, nthreads);
916}
917
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200918int perf_evlist__open(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200919{
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200920 struct perf_evsel *evsel;
Namhyung Kima74b4b62013-03-15 14:48:48 +0900921 int err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200922
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200923 list_for_each_entry(evsel, &evlist->entries, node) {
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200924 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200925 if (err < 0)
926 goto out_err;
927 }
928
929 return 0;
930out_err:
Namhyung Kima74b4b62013-03-15 14:48:48 +0900931 perf_evlist__close(evlist);
Namhyung Kim41c21a62012-02-23 12:13:36 +0900932 errno = -err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200933 return err;
934}
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200935
936int perf_evlist__prepare_workload(struct perf_evlist *evlist,
Namhyung Kim6ef73ec2013-03-11 16:43:15 +0900937 struct perf_target *target,
Namhyung Kim55e162e2013-03-11 16:43:17 +0900938 const char *argv[], bool pipe_output,
939 bool want_signal)
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200940{
941 int child_ready_pipe[2], go_pipe[2];
942 char bf;
943
944 if (pipe(child_ready_pipe) < 0) {
945 perror("failed to create 'ready' pipe");
946 return -1;
947 }
948
949 if (pipe(go_pipe) < 0) {
950 perror("failed to create 'go' pipe");
951 goto out_close_ready_pipe;
952 }
953
954 evlist->workload.pid = fork();
955 if (evlist->workload.pid < 0) {
956 perror("failed to fork");
957 goto out_close_pipes;
958 }
959
960 if (!evlist->workload.pid) {
Namhyung Kim119fa3c2013-03-11 16:43:16 +0900961 if (pipe_output)
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200962 dup2(2, 1);
963
David Ahern0817df02013-05-25 17:50:39 -0600964 signal(SIGTERM, SIG_DFL);
965
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200966 close(child_ready_pipe[0]);
967 close(go_pipe[1]);
968 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
969
970 /*
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200971 * Tell the parent we're ready to go
972 */
973 close(child_ready_pipe[1]);
974
975 /*
976 * Wait until the parent tells us to go.
977 */
978 if (read(go_pipe[0], &bf, 1) == -1)
979 perror("unable to read pipe");
980
981 execvp(argv[0], (char **)argv);
982
983 perror(argv[0]);
Namhyung Kim55e162e2013-03-11 16:43:17 +0900984 if (want_signal)
985 kill(getppid(), SIGUSR1);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200986 exit(-1);
987 }
988
Namhyung Kim6ef73ec2013-03-11 16:43:15 +0900989 if (perf_target__none(target))
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200990 evlist->threads->map[0] = evlist->workload.pid;
991
992 close(child_ready_pipe[1]);
993 close(go_pipe[0]);
994 /*
995 * wait for child to settle
996 */
997 if (read(child_ready_pipe[0], &bf, 1) == -1) {
998 perror("unable to read pipe");
999 goto out_close_pipes;
1000 }
1001
Namhyung Kimbcf31452013-06-26 16:14:15 +09001002 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001003 evlist->workload.cork_fd = go_pipe[1];
1004 close(child_ready_pipe[0]);
1005 return 0;
1006
1007out_close_pipes:
1008 close(go_pipe[0]);
1009 close(go_pipe[1]);
1010out_close_ready_pipe:
1011 close(child_ready_pipe[0]);
1012 close(child_ready_pipe[1]);
1013 return -1;
1014}
1015
1016int perf_evlist__start_workload(struct perf_evlist *evlist)
1017{
1018 if (evlist->workload.cork_fd > 0) {
David Ahernb3824402013-07-02 13:27:21 -06001019 char bf = 0;
Namhyung Kimbcf31452013-06-26 16:14:15 +09001020 int ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001021 /*
1022 * Remove the cork, let it rip!
1023 */
Namhyung Kimbcf31452013-06-26 16:14:15 +09001024 ret = write(evlist->workload.cork_fd, &bf, 1);
1025 if (ret < 0)
1026 perror("enable to write to pipe");
1027
1028 close(evlist->workload.cork_fd);
1029 return ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001030 }
1031
1032 return 0;
1033}
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001034
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001035int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001036 struct perf_sample *sample)
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001037{
Adrian Hunter75562572013-08-27 11:23:09 +03001038 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1039
1040 if (!evsel)
1041 return -EFAULT;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001042 return perf_evsel__parse_sample(evsel, event, sample);
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001043}
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001044
1045size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1046{
1047 struct perf_evsel *evsel;
1048 size_t printed = 0;
1049
1050 list_for_each_entry(evsel, &evlist->entries, node) {
1051 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1052 perf_evsel__name(evsel));
1053 }
1054
1055 return printed + fprintf(fp, "\n");;
1056}