blob: b8727ae45e3b9dc959734f1cd4bc9fc888a6dc4b [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
Arnaldo Carvalho de Meloa8c9ae12011-11-05 08:41:51 -02009#include "util.h"
Borislav Petkov85c66be2013-02-20 16:32:30 +010010#include <lk/debugfs.h>
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -020011#include <poll.h>
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020012#include "cpumap.h"
13#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090014#include "target.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020015#include "evlist.h"
16#include "evsel.h"
Adrian Huntere3e1a542013-08-14 15:48:24 +030017#include "debug.h"
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020018#include <unistd.h>
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020019
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020020#include "parse-events.h"
21
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020022#include <sys/mman.h>
23
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020024#include <linux/bitops.h>
25#include <linux/hash.h>
26
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020027#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030028#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020029
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020030void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
31 struct thread_map *threads)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020032{
33 int i;
34
35 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
36 INIT_HLIST_HEAD(&evlist->heads[i]);
37 INIT_LIST_HEAD(&evlist->entries);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020038 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020039 evlist->workload.pid = -1;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020040}
41
Namhyung Kim334fe7a2013-03-11 16:43:12 +090042struct perf_evlist *perf_evlist__new(void)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020043{
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020046 if (evlist != NULL)
Namhyung Kim334fe7a2013-03-11 16:43:12 +090047 perf_evlist__init(evlist, NULL, NULL);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020048
49 return evlist;
50}
51
Adrian Hunter75562572013-08-27 11:23:09 +030052/**
53 * perf_evlist__set_id_pos - set the positions of event ids.
54 * @evlist: selected event list
55 *
56 * Events with compatible sample types all have the same id_pos
57 * and is_pos. For convenience, put a copy on evlist.
58 */
59void perf_evlist__set_id_pos(struct perf_evlist *evlist)
60{
61 struct perf_evsel *first = perf_evlist__first(evlist);
62
63 evlist->id_pos = first->id_pos;
64 evlist->is_pos = first->is_pos;
65}
66
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020067static void perf_evlist__purge(struct perf_evlist *evlist)
68{
69 struct perf_evsel *pos, *n;
70
71 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
72 list_del_init(&pos->node);
73 perf_evsel__delete(pos);
74 }
75
76 evlist->nr_entries = 0;
77}
78
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020079void perf_evlist__exit(struct perf_evlist *evlist)
80{
81 free(evlist->mmap);
82 free(evlist->pollfd);
83 evlist->mmap = NULL;
84 evlist->pollfd = NULL;
85}
86
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020087void perf_evlist__delete(struct perf_evlist *evlist)
88{
89 perf_evlist__purge(evlist);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020090 perf_evlist__exit(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020091 free(evlist);
92}
93
94void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
95{
96 list_add_tail(&entry->node, &evlist->entries);
Adrian Hunter75562572013-08-27 11:23:09 +030097 if (!evlist->nr_entries++)
98 perf_evlist__set_id_pos(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020099}
100
Jiri Olsa0529bc12012-01-27 15:34:20 +0100101void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
102 struct list_head *list,
103 int nr_entries)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200104{
Adrian Hunter75562572013-08-27 11:23:09 +0300105 bool set_id_pos = !evlist->nr_entries;
106
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200107 list_splice_tail(list, &evlist->entries);
108 evlist->nr_entries += nr_entries;
Adrian Hunter75562572013-08-27 11:23:09 +0300109 if (set_id_pos)
110 perf_evlist__set_id_pos(evlist);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200111}
112
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300113void __perf_evlist__set_leader(struct list_head *list)
114{
115 struct perf_evsel *evsel, *leader;
116
117 leader = list_entry(list->next, struct perf_evsel, node);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900118 evsel = list_entry(list->prev, struct perf_evsel, node);
119
120 leader->nr_members = evsel->idx - leader->idx + 1;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300121
122 list_for_each_entry(evsel, list, node) {
Stephane Eranian74b21332013-01-31 13:54:37 +0100123 evsel->leader = leader;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300124 }
125}
126
127void perf_evlist__set_leader(struct perf_evlist *evlist)
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200128{
Namhyung Kim97f63e42013-01-22 18:09:29 +0900129 if (evlist->nr_entries) {
130 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300131 __perf_evlist__set_leader(&evlist->entries);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900132 }
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200133}
134
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200135int perf_evlist__add_default(struct perf_evlist *evlist)
136{
137 struct perf_event_attr attr = {
138 .type = PERF_TYPE_HARDWARE,
139 .config = PERF_COUNT_HW_CPU_CYCLES,
140 };
Joerg Roedel1aed2672012-01-04 17:54:20 +0100141 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200142
Joerg Roedel1aed2672012-01-04 17:54:20 +0100143 event_attr_init(&attr);
144
145 evsel = perf_evsel__new(&attr, 0);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200146 if (evsel == NULL)
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200147 goto error;
148
149 /* use strdup() because free(evsel) assumes name is allocated */
150 evsel->name = strdup("cycles");
151 if (!evsel->name)
152 goto error_free;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200153
154 perf_evlist__add(evlist, evsel);
155 return 0;
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200156error_free:
157 perf_evsel__delete(evsel);
158error:
159 return -ENOMEM;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200160}
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200161
Arnaldo Carvalho de Meloe60fc842012-10-03 11:50:55 -0300162static int perf_evlist__add_attrs(struct perf_evlist *evlist,
163 struct perf_event_attr *attrs, size_t nr_attrs)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200164{
165 struct perf_evsel *evsel, *n;
166 LIST_HEAD(head);
167 size_t i;
168
169 for (i = 0; i < nr_attrs; i++) {
170 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
171 if (evsel == NULL)
172 goto out_delete_partial_list;
173 list_add_tail(&evsel->node, &head);
174 }
175
176 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
177
178 return 0;
179
180out_delete_partial_list:
181 list_for_each_entry_safe(evsel, n, &head, node)
182 perf_evsel__delete(evsel);
183 return -1;
184}
185
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -0300186int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
187 struct perf_event_attr *attrs, size_t nr_attrs)
188{
189 size_t i;
190
191 for (i = 0; i < nr_attrs; i++)
192 event_attr_init(attrs + i);
193
194 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
195}
196
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -0300197struct perf_evsel *
198perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200199{
200 struct perf_evsel *evsel;
201
202 list_for_each_entry(evsel, &evlist->entries, node) {
203 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
204 (int)evsel->attr.config == id)
205 return evsel;
206 }
207
208 return NULL;
209}
210
David Aherna2f28042013-08-28 22:29:51 -0600211struct perf_evsel *
212perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
213 const char *name)
214{
215 struct perf_evsel *evsel;
216
217 list_for_each_entry(evsel, &evlist->entries, node) {
218 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
219 (strcmp(evsel->name, name) == 0))
220 return evsel;
221 }
222
223 return NULL;
224}
225
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300226int perf_evlist__add_newtp(struct perf_evlist *evlist,
227 const char *sys, const char *name, void *handler)
228{
229 struct perf_evsel *evsel;
230
231 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
232 if (evsel == NULL)
233 return -1;
234
235 evsel->handler.func = handler;
236 perf_evlist__add(evlist, evsel);
237 return 0;
238}
239
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300240void perf_evlist__disable(struct perf_evlist *evlist)
241{
242 int cpu, thread;
243 struct perf_evsel *pos;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900244 int nr_cpus = cpu_map__nr(evlist->cpus);
245 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300246
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900247 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300248 list_for_each_entry(pos, &evlist->entries, node) {
Adrian Hunter395c3072013-08-31 21:50:53 +0300249 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
Jiri Olsa3fe4430d2012-11-12 18:34:03 +0100250 continue;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900251 for (thread = 0; thread < nr_threads; thread++)
Namhyung Kim55da8002012-05-31 14:51:46 +0900252 ioctl(FD(pos, cpu, thread),
253 PERF_EVENT_IOC_DISABLE, 0);
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300254 }
255 }
256}
257
David Ahern764e16a32011-08-25 10:17:55 -0600258void perf_evlist__enable(struct perf_evlist *evlist)
259{
260 int cpu, thread;
261 struct perf_evsel *pos;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900262 int nr_cpus = cpu_map__nr(evlist->cpus);
263 int nr_threads = thread_map__nr(evlist->threads);
David Ahern764e16a32011-08-25 10:17:55 -0600264
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900265 for (cpu = 0; cpu < nr_cpus; cpu++) {
David Ahern764e16a32011-08-25 10:17:55 -0600266 list_for_each_entry(pos, &evlist->entries, node) {
Adrian Hunter395c3072013-08-31 21:50:53 +0300267 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
Jiri Olsa3fe4430d2012-11-12 18:34:03 +0100268 continue;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900269 for (thread = 0; thread < nr_threads; thread++)
Namhyung Kim55da8002012-05-31 14:51:46 +0900270 ioctl(FD(pos, cpu, thread),
271 PERF_EVENT_IOC_ENABLE, 0);
David Ahern764e16a32011-08-25 10:17:55 -0600272 }
273 }
274}
275
Adrian Hunter395c3072013-08-31 21:50:53 +0300276int perf_evlist__disable_event(struct perf_evlist *evlist,
277 struct perf_evsel *evsel)
278{
279 int cpu, thread, err;
280
281 if (!evsel->fd)
282 return 0;
283
284 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
285 for (thread = 0; thread < evlist->threads->nr; thread++) {
286 err = ioctl(FD(evsel, cpu, thread),
287 PERF_EVENT_IOC_DISABLE, 0);
288 if (err)
289 return err;
290 }
291 }
292 return 0;
293}
294
295int perf_evlist__enable_event(struct perf_evlist *evlist,
296 struct perf_evsel *evsel)
297{
298 int cpu, thread, err;
299
300 if (!evsel->fd)
301 return -EINVAL;
302
303 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
304 for (thread = 0; thread < evlist->threads->nr; thread++) {
305 err = ioctl(FD(evsel, cpu, thread),
306 PERF_EVENT_IOC_ENABLE, 0);
307 if (err)
308 return err;
309 }
310 }
311 return 0;
312}
313
Arnaldo Carvalho de Melo806fb632011-11-29 08:05:52 -0200314static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200315{
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900316 int nr_cpus = cpu_map__nr(evlist->cpus);
317 int nr_threads = thread_map__nr(evlist->threads);
318 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200319 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
320 return evlist->pollfd != NULL ? 0 : -ENOMEM;
321}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200322
323void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
324{
325 fcntl(fd, F_SETFL, O_NONBLOCK);
326 evlist->pollfd[evlist->nr_fds].fd = fd;
327 evlist->pollfd[evlist->nr_fds].events = POLLIN;
328 evlist->nr_fds++;
329}
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200330
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300331static void perf_evlist__id_hash(struct perf_evlist *evlist,
332 struct perf_evsel *evsel,
333 int cpu, int thread, u64 id)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200334{
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300335 int hash;
336 struct perf_sample_id *sid = SID(evsel, cpu, thread);
337
338 sid->id = id;
339 sid->evsel = evsel;
340 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
341 hlist_add_head(&sid->node, &evlist->heads[hash]);
342}
343
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300344void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
345 int cpu, int thread, u64 id)
346{
347 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
348 evsel->id[evsel->ids++] = id;
349}
350
351static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
352 struct perf_evsel *evsel,
353 int cpu, int thread, int fd)
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300354{
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200355 u64 read_data[4] = { 0, };
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300356 int id_idx = 1; /* The first entry is the counter value */
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200357 u64 id;
358 int ret;
359
360 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
361 if (!ret)
362 goto add;
363
364 if (errno != ENOTTY)
365 return -1;
366
367 /* Legacy way to get event id.. All hail to old kernels! */
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200368
Jiri Olsac4861af2012-10-12 13:02:21 +0200369 /*
370 * This way does not work with group format read, so bail
371 * out in that case.
372 */
373 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
374 return -1;
375
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200376 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
377 read(fd, &read_data, sizeof(read_data)) == -1)
378 return -1;
379
380 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
381 ++id_idx;
382 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
383 ++id_idx;
384
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200385 id = read_data[id_idx];
386
387 add:
388 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200389 return 0;
390}
391
Jiri Olsa932a3592012-10-11 14:10:35 +0200392struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200393{
394 struct hlist_head *head;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200395 struct perf_sample_id *sid;
396 int hash;
397
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200398 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
399 head = &evlist->heads[hash];
400
Sasha Levinb67bfe02013-02-27 17:06:00 -0800401 hlist_for_each_entry(sid, head, node)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200402 if (sid->id == id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200403 return sid;
404
405 return NULL;
406}
407
408struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
409{
410 struct perf_sample_id *sid;
411
412 if (evlist->nr_entries == 1)
413 return perf_evlist__first(evlist);
414
415 sid = perf_evlist__id2sid(evlist, id);
416 if (sid)
417 return sid->evsel;
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900418
419 if (!perf_evlist__sample_id_all(evlist))
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300420 return perf_evlist__first(evlist);
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900421
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200422 return NULL;
423}
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200424
Adrian Hunter75562572013-08-27 11:23:09 +0300425static int perf_evlist__event2id(struct perf_evlist *evlist,
426 union perf_event *event, u64 *id)
427{
428 const u64 *array = event->sample.array;
429 ssize_t n;
430
431 n = (event->header.size - sizeof(event->header)) >> 3;
432
433 if (event->header.type == PERF_RECORD_SAMPLE) {
434 if (evlist->id_pos >= n)
435 return -1;
436 *id = array[evlist->id_pos];
437 } else {
438 if (evlist->is_pos > n)
439 return -1;
440 n -= evlist->is_pos;
441 *id = array[n];
442 }
443 return 0;
444}
445
446static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
447 union perf_event *event)
448{
449 struct hlist_head *head;
450 struct perf_sample_id *sid;
451 int hash;
452 u64 id;
453
454 if (evlist->nr_entries == 1)
455 return perf_evlist__first(evlist);
456
457 if (perf_evlist__event2id(evlist, event, &id))
458 return NULL;
459
460 /* Synthesized events have an id of zero */
461 if (!id)
462 return perf_evlist__first(evlist);
463
464 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
465 head = &evlist->heads[hash];
466
467 hlist_for_each_entry(sid, head, node) {
468 if (sid->id == id)
469 return sid->evsel;
470 }
471 return NULL;
472}
473
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300474union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200475{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300476 struct perf_mmap *md = &evlist->mmap[idx];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200477 unsigned int head = perf_mmap__read_head(md);
478 unsigned int old = md->prev;
479 unsigned char *data = md->base + page_size;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200480 union perf_event *event = NULL;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200481
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200482 if (evlist->overwrite) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200483 /*
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200484 * If we're further behind than half the buffer, there's a chance
485 * the writer will bite our tail and mess up the samples under us.
486 *
487 * If we somehow ended up ahead of the head, we got messed up.
488 *
489 * In either case, truncate and restart at head.
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200490 */
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200491 int diff = head - old;
492 if (diff > md->mask / 2 || diff < 0) {
493 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
494
495 /*
496 * head points to a known good entry, start there.
497 */
498 old = head;
499 }
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200500 }
501
502 if (old != head) {
503 size_t size;
504
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200505 event = (union perf_event *)&data[old & md->mask];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200506 size = event->header.size;
507
508 /*
509 * Event straddles the mmap boundary -- header should always
510 * be inside due to u64 alignment of output.
511 */
512 if ((old & md->mask) + size != ((old + size) & md->mask)) {
513 unsigned int offset = old;
514 unsigned int len = min(sizeof(*event), size), cpy;
David Ahern0479b8b2013-02-05 14:12:42 -0700515 void *dst = &md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200516
517 do {
518 cpy = min(md->mask + 1 - (offset & md->mask), len);
519 memcpy(dst, &data[offset & md->mask], cpy);
520 offset += cpy;
521 dst += cpy;
522 len -= cpy;
523 } while (len);
524
David Ahern0479b8b2013-02-05 14:12:42 -0700525 event = &md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200526 }
527
528 old += size;
529 }
530
531 md->prev = old;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200532
533 if (!evlist->overwrite)
534 perf_mmap__write_tail(md, old);
535
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200536 return event;
537}
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200538
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300539static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
540{
541 if (evlist->mmap[idx].base != NULL) {
542 munmap(evlist->mmap[idx].base, evlist->mmap_len);
543 evlist->mmap[idx].base = NULL;
544 }
545}
546
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200547void perf_evlist__munmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200548{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300549 int i;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200550
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300551 for (i = 0; i < evlist->nr_mmaps; i++)
552 __perf_evlist__munmap(evlist, i);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300553
554 free(evlist->mmap);
555 evlist->mmap = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200556}
557
Arnaldo Carvalho de Melo806fb632011-11-29 08:05:52 -0200558static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200559{
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -0300560 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -0700561 if (cpu_map__empty(evlist->cpus))
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900562 evlist->nr_mmaps = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300563 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200564 return evlist->mmap != NULL ? 0 : -ENOMEM;
565}
566
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300567static int __perf_evlist__mmap(struct perf_evlist *evlist,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300568 int idx, int prot, int mask, int fd)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200569{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300570 evlist->mmap[idx].prev = 0;
571 evlist->mmap[idx].mask = mask;
572 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200573 MAP_SHARED, fd, 0);
Nelson Elhage301b1952011-12-19 08:39:30 -0500574 if (evlist->mmap[idx].base == MAP_FAILED) {
575 evlist->mmap[idx].base = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200576 return -1;
Nelson Elhage301b1952011-12-19 08:39:30 -0500577 }
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200578
579 perf_evlist__add_pollfd(evlist, fd);
580 return 0;
581}
582
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300583static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
584{
585 struct perf_evsel *evsel;
586 int cpu, thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900587 int nr_cpus = cpu_map__nr(evlist->cpus);
588 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300589
Adrian Huntere3e1a542013-08-14 15:48:24 +0300590 pr_debug2("perf event ring buffer mmapped per cpu\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900591 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300592 int output = -1;
593
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900594 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300595 list_for_each_entry(evsel, &evlist->entries, node) {
596 int fd = FD(evsel, cpu, thread);
597
598 if (output == -1) {
599 output = fd;
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300600 if (__perf_evlist__mmap(evlist, cpu,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300601 prot, mask, output) < 0)
602 goto out_unmap;
603 } else {
604 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
605 goto out_unmap;
606 }
607
608 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
609 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
610 goto out_unmap;
611 }
612 }
613 }
614
615 return 0;
616
617out_unmap:
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300618 for (cpu = 0; cpu < nr_cpus; cpu++)
619 __perf_evlist__munmap(evlist, cpu);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300620 return -1;
621}
622
623static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
624{
625 struct perf_evsel *evsel;
626 int thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900627 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300628
Adrian Huntere3e1a542013-08-14 15:48:24 +0300629 pr_debug2("perf event ring buffer mmapped per thread\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900630 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300631 int output = -1;
632
633 list_for_each_entry(evsel, &evlist->entries, node) {
634 int fd = FD(evsel, 0, thread);
635
636 if (output == -1) {
637 output = fd;
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300638 if (__perf_evlist__mmap(evlist, thread,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300639 prot, mask, output) < 0)
640 goto out_unmap;
641 } else {
642 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
643 goto out_unmap;
644 }
645
646 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
647 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
648 goto out_unmap;
649 }
650 }
651
652 return 0;
653
654out_unmap:
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300655 for (thread = 0; thread < nr_threads; thread++)
656 __perf_evlist__munmap(evlist, thread);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300657 return -1;
658}
659
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200660/** perf_evlist__mmap - Create per cpu maps to receive events
661 *
662 * @evlist - list of events
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200663 * @pages - map length in pages
664 * @overwrite - overwrite older events?
665 *
666 * If overwrite is false the user needs to signal event consuption using:
667 *
668 * struct perf_mmap *m = &evlist->mmap[cpu];
669 * unsigned int head = perf_mmap__read_head(m);
670 *
671 * perf_mmap__write_tail(m, head)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200672 *
673 * Using perf_evlist__read_on_cpu does this automatically.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200674 */
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -0200675int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
676 bool overwrite)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200677{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300678 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200679 const struct cpu_map *cpus = evlist->cpus;
680 const struct thread_map *threads = evlist->threads;
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -0200681 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
682
683 /* 512 kiB: default amount of unprivileged mlocked memory */
684 if (pages == UINT_MAX)
685 pages = (512 * 1024) / page_size;
Nelson Elhage41d0d932011-12-19 08:39:32 -0500686 else if (!is_power_of_2(pages))
687 return -EINVAL;
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -0200688
689 mask = pages * page_size - 1;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200690
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200691 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200692 return -ENOMEM;
693
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200694 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200695 return -ENOMEM;
696
697 evlist->overwrite = overwrite;
698 evlist->mmap_len = (pages + 1) * page_size;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200699
700 list_for_each_entry(evsel, &evlist->entries, node) {
701 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300702 evsel->sample_id == NULL &&
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -0300703 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200704 return -ENOMEM;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200705 }
706
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -0700707 if (cpu_map__empty(cpus))
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300708 return perf_evlist__mmap_per_thread(evlist, prot, mask);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200709
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300710 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200711}
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200712
Namhyung Kimb809ac12012-04-26 14:15:19 +0900713int perf_evlist__create_maps(struct perf_evlist *evlist,
714 struct perf_target *target)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200715{
Namhyung Kimb809ac12012-04-26 14:15:19 +0900716 evlist->threads = thread_map__new_str(target->pid, target->tid,
717 target->uid);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200718
719 if (evlist->threads == NULL)
720 return -1;
721
Namhyung Kim879d77d2012-05-16 18:45:48 +0900722 if (perf_target__has_task(target))
Namhyung Kimd67356e2012-05-07 14:09:03 +0900723 evlist->cpus = cpu_map__dummy_new();
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +0900724 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
725 evlist->cpus = cpu_map__dummy_new();
Namhyung Kim879d77d2012-05-16 18:45:48 +0900726 else
727 evlist->cpus = cpu_map__new(target->cpu_list);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200728
729 if (evlist->cpus == NULL)
730 goto out_delete_threads;
731
732 return 0;
733
734out_delete_threads:
735 thread_map__delete(evlist->threads);
736 return -1;
737}
738
739void perf_evlist__delete_maps(struct perf_evlist *evlist)
740{
741 cpu_map__delete(evlist->cpus);
742 thread_map__delete(evlist->threads);
743 evlist->cpus = NULL;
744 evlist->threads = NULL;
745}
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100746
Arnaldo Carvalho de Melo1491a632012-09-26 14:43:13 -0300747int perf_evlist__apply_filters(struct perf_evlist *evlist)
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100748{
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100749 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300750 int err = 0;
751 const int ncpus = cpu_map__nr(evlist->cpus),
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900752 nthreads = thread_map__nr(evlist->threads);
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100753
754 list_for_each_entry(evsel, &evlist->entries, node) {
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300755 if (evsel->filter == NULL)
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100756 continue;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300757
758 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
759 if (err)
760 break;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100761 }
762
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300763 return err;
764}
765
766int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
767{
768 struct perf_evsel *evsel;
769 int err = 0;
770 const int ncpus = cpu_map__nr(evlist->cpus),
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900771 nthreads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300772
773 list_for_each_entry(evsel, &evlist->entries, node) {
774 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
775 if (err)
776 break;
777 }
778
779 return err;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100780}
Frederic Weisbecker74429962011-05-21 17:49:00 +0200781
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300782bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +0200783{
Adrian Hunter75562572013-08-27 11:23:09 +0300784 struct perf_evsel *pos;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300785
Adrian Hunter75562572013-08-27 11:23:09 +0300786 if (evlist->nr_entries == 1)
787 return true;
788
789 if (evlist->id_pos < 0 || evlist->is_pos < 0)
790 return false;
791
792 list_for_each_entry(pos, &evlist->entries, node) {
793 if (pos->id_pos != evlist->id_pos ||
794 pos->is_pos != evlist->is_pos)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300795 return false;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200796 }
797
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300798 return true;
799}
800
Adrian Hunter75562572013-08-27 11:23:09 +0300801u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300802{
Adrian Hunter75562572013-08-27 11:23:09 +0300803 struct perf_evsel *evsel;
804
805 if (evlist->combined_sample_type)
806 return evlist->combined_sample_type;
807
808 list_for_each_entry(evsel, &evlist->entries, node)
809 evlist->combined_sample_type |= evsel->attr.sample_type;
810
811 return evlist->combined_sample_type;
812}
813
814u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
815{
816 evlist->combined_sample_type = 0;
817 return __perf_evlist__combined_sample_type(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300818}
819
Jiri Olsa9ede4732012-10-10 17:38:13 +0200820bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
821{
822 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
823 u64 read_format = first->attr.read_format;
824 u64 sample_type = first->attr.sample_type;
825
826 list_for_each_entry_continue(pos, &evlist->entries, node) {
827 if (read_format != pos->attr.read_format)
828 return false;
829 }
830
831 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
832 if ((sample_type & PERF_SAMPLE_READ) &&
833 !(read_format & PERF_FORMAT_ID)) {
834 return false;
835 }
836
837 return true;
838}
839
840u64 perf_evlist__read_format(struct perf_evlist *evlist)
841{
842 struct perf_evsel *first = perf_evlist__first(evlist);
843 return first->attr.read_format;
844}
845
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300846u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -0200847{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300848 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -0200849 struct perf_sample *data;
850 u64 sample_type;
851 u16 size = 0;
852
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -0200853 if (!first->attr.sample_id_all)
854 goto out;
855
856 sample_type = first->attr.sample_type;
857
858 if (sample_type & PERF_SAMPLE_TID)
859 size += sizeof(data->tid) * 2;
860
861 if (sample_type & PERF_SAMPLE_TIME)
862 size += sizeof(data->time);
863
864 if (sample_type & PERF_SAMPLE_ID)
865 size += sizeof(data->id);
866
867 if (sample_type & PERF_SAMPLE_STREAM_ID)
868 size += sizeof(data->stream_id);
869
870 if (sample_type & PERF_SAMPLE_CPU)
871 size += sizeof(data->cpu) * 2;
Adrian Hunter75562572013-08-27 11:23:09 +0300872
873 if (sample_type & PERF_SAMPLE_IDENTIFIER)
874 size += sizeof(data->id);
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -0200875out:
876 return size;
877}
878
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300879bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300880{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300881 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300882
883 list_for_each_entry_continue(pos, &evlist->entries, node) {
884 if (first->attr.sample_id_all != pos->attr.sample_id_all)
885 return false;
886 }
887
888 return true;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200889}
890
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300891bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +0200892{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300893 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300894 return first->attr.sample_id_all;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200895}
Arnaldo Carvalho de Melo81cce8d2011-10-05 19:11:32 -0300896
897void perf_evlist__set_selected(struct perf_evlist *evlist,
898 struct perf_evsel *evsel)
899{
900 evlist->selected = evsel;
901}
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200902
Namhyung Kima74b4b62013-03-15 14:48:48 +0900903void perf_evlist__close(struct perf_evlist *evlist)
904{
905 struct perf_evsel *evsel;
906 int ncpus = cpu_map__nr(evlist->cpus);
907 int nthreads = thread_map__nr(evlist->threads);
908
909 list_for_each_entry_reverse(evsel, &evlist->entries, node)
910 perf_evsel__close(evsel, ncpus, nthreads);
911}
912
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200913int perf_evlist__open(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200914{
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200915 struct perf_evsel *evsel;
Namhyung Kima74b4b62013-03-15 14:48:48 +0900916 int err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200917
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200918 list_for_each_entry(evsel, &evlist->entries, node) {
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200919 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200920 if (err < 0)
921 goto out_err;
922 }
923
924 return 0;
925out_err:
Namhyung Kima74b4b62013-03-15 14:48:48 +0900926 perf_evlist__close(evlist);
Namhyung Kim41c21a62012-02-23 12:13:36 +0900927 errno = -err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200928 return err;
929}
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200930
931int perf_evlist__prepare_workload(struct perf_evlist *evlist,
Namhyung Kim6ef73ec2013-03-11 16:43:15 +0900932 struct perf_target *target,
Namhyung Kim55e162e2013-03-11 16:43:17 +0900933 const char *argv[], bool pipe_output,
934 bool want_signal)
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200935{
936 int child_ready_pipe[2], go_pipe[2];
937 char bf;
938
939 if (pipe(child_ready_pipe) < 0) {
940 perror("failed to create 'ready' pipe");
941 return -1;
942 }
943
944 if (pipe(go_pipe) < 0) {
945 perror("failed to create 'go' pipe");
946 goto out_close_ready_pipe;
947 }
948
949 evlist->workload.pid = fork();
950 if (evlist->workload.pid < 0) {
951 perror("failed to fork");
952 goto out_close_pipes;
953 }
954
955 if (!evlist->workload.pid) {
Namhyung Kim119fa3c2013-03-11 16:43:16 +0900956 if (pipe_output)
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200957 dup2(2, 1);
958
David Ahern0817df02013-05-25 17:50:39 -0600959 signal(SIGTERM, SIG_DFL);
960
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200961 close(child_ready_pipe[0]);
962 close(go_pipe[1]);
963 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
964
965 /*
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200966 * Tell the parent we're ready to go
967 */
968 close(child_ready_pipe[1]);
969
970 /*
971 * Wait until the parent tells us to go.
972 */
973 if (read(go_pipe[0], &bf, 1) == -1)
974 perror("unable to read pipe");
975
976 execvp(argv[0], (char **)argv);
977
978 perror(argv[0]);
Namhyung Kim55e162e2013-03-11 16:43:17 +0900979 if (want_signal)
980 kill(getppid(), SIGUSR1);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200981 exit(-1);
982 }
983
Namhyung Kim6ef73ec2013-03-11 16:43:15 +0900984 if (perf_target__none(target))
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200985 evlist->threads->map[0] = evlist->workload.pid;
986
987 close(child_ready_pipe[1]);
988 close(go_pipe[0]);
989 /*
990 * wait for child to settle
991 */
992 if (read(child_ready_pipe[0], &bf, 1) == -1) {
993 perror("unable to read pipe");
994 goto out_close_pipes;
995 }
996
Namhyung Kimbcf31452013-06-26 16:14:15 +0900997 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200998 evlist->workload.cork_fd = go_pipe[1];
999 close(child_ready_pipe[0]);
1000 return 0;
1001
1002out_close_pipes:
1003 close(go_pipe[0]);
1004 close(go_pipe[1]);
1005out_close_ready_pipe:
1006 close(child_ready_pipe[0]);
1007 close(child_ready_pipe[1]);
1008 return -1;
1009}
1010
1011int perf_evlist__start_workload(struct perf_evlist *evlist)
1012{
1013 if (evlist->workload.cork_fd > 0) {
David Ahernb3824402013-07-02 13:27:21 -06001014 char bf = 0;
Namhyung Kimbcf31452013-06-26 16:14:15 +09001015 int ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001016 /*
1017 * Remove the cork, let it rip!
1018 */
Namhyung Kimbcf31452013-06-26 16:14:15 +09001019 ret = write(evlist->workload.cork_fd, &bf, 1);
1020 if (ret < 0)
1021 perror("enable to write to pipe");
1022
1023 close(evlist->workload.cork_fd);
1024 return ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001025 }
1026
1027 return 0;
1028}
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001029
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001030int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001031 struct perf_sample *sample)
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001032{
Adrian Hunter75562572013-08-27 11:23:09 +03001033 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1034
1035 if (!evsel)
1036 return -EFAULT;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001037 return perf_evsel__parse_sample(evsel, event, sample);
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001038}
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001039
1040size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1041{
1042 struct perf_evsel *evsel;
1043 size_t printed = 0;
1044
1045 list_for_each_entry(evsel, &evlist->entries, node) {
1046 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1047 perf_evsel__name(evsel));
1048 }
1049
1050 return printed + fprintf(fp, "\n");;
1051}