blob: cf27039df1007f084e78dc8c41e442113422f06e [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
Arnaldo Carvalho de Meloa8c9ae12011-11-05 08:41:51 -02009#include "util.h"
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -030010#include <api/fs/fs.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030011#include <inttypes.h>
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -020012#include <poll.h>
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020013#include "cpumap.h"
14#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090015#include "target.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020016#include "evlist.h"
17#include "evsel.h"
Adrian Huntere3e1a542013-08-14 15:48:24 +030018#include "debug.h"
Wang Nan54cc54d2016-07-14 08:34:42 +000019#include "asm/bug.h"
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020020#include <unistd.h>
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020021
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020022#include "parse-events.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060023#include <subcmd/parse-options.h>
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020024
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020025#include <sys/mman.h>
26
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020027#include <linux/bitops.h>
28#include <linux/hash.h>
Arnaldo Carvalho de Melo0389cd12014-12-15 16:04:11 -030029#include <linux/log2.h>
Jiri Olsa8dd2a132015-09-07 10:38:06 +020030#include <linux/err.h>
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020031
Wang Nan8db6d6b2016-07-14 08:34:35 +000032static void perf_mmap__munmap(struct perf_mmap *map);
Wang Nan48760752016-07-14 08:34:37 +000033static void perf_mmap__put(struct perf_mmap *map);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -030034
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020035#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030036#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020037
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020038void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
39 struct thread_map *threads)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020040{
41 int i;
42
43 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
44 INIT_HLIST_HEAD(&evlist->heads[i]);
45 INIT_LIST_HEAD(&evlist->entries);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020046 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -030047 fdarray__init(&evlist->pollfd, 64);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020048 evlist->workload.pid = -1;
Wang Nan54cc54d2016-07-14 08:34:42 +000049 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020050}
51
Namhyung Kim334fe7a2013-03-11 16:43:12 +090052struct perf_evlist *perf_evlist__new(void)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020053{
54 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
55
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020056 if (evlist != NULL)
Namhyung Kim334fe7a2013-03-11 16:43:12 +090057 perf_evlist__init(evlist, NULL, NULL);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020058
59 return evlist;
60}
61
Jiri Olsab22d54b2013-09-01 12:36:14 +020062struct perf_evlist *perf_evlist__new_default(void)
63{
64 struct perf_evlist *evlist = perf_evlist__new();
65
66 if (evlist && perf_evlist__add_default(evlist)) {
67 perf_evlist__delete(evlist);
68 evlist = NULL;
69 }
70
71 return evlist;
72}
73
Arnaldo Carvalho de Melo5bae0252016-01-07 13:14:56 -030074struct perf_evlist *perf_evlist__new_dummy(void)
75{
76 struct perf_evlist *evlist = perf_evlist__new();
77
78 if (evlist && perf_evlist__add_dummy(evlist)) {
79 perf_evlist__delete(evlist);
80 evlist = NULL;
81 }
82
83 return evlist;
84}
85
Adrian Hunter75562572013-08-27 11:23:09 +030086/**
87 * perf_evlist__set_id_pos - set the positions of event ids.
88 * @evlist: selected event list
89 *
90 * Events with compatible sample types all have the same id_pos
91 * and is_pos. For convenience, put a copy on evlist.
92 */
93void perf_evlist__set_id_pos(struct perf_evlist *evlist)
94{
95 struct perf_evsel *first = perf_evlist__first(evlist);
96
97 evlist->id_pos = first->id_pos;
98 evlist->is_pos = first->is_pos;
99}
100
Adrian Hunter733cd2f2013-09-06 22:40:11 +0300101static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
102{
103 struct perf_evsel *evsel;
104
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300105 evlist__for_each_entry(evlist, evsel)
Adrian Hunter733cd2f2013-09-06 22:40:11 +0300106 perf_evsel__calc_id_pos(evsel);
107
108 perf_evlist__set_id_pos(evlist);
109}
110
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200111static void perf_evlist__purge(struct perf_evlist *evlist)
112{
113 struct perf_evsel *pos, *n;
114
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300115 evlist__for_each_entry_safe(evlist, n, pos) {
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200116 list_del_init(&pos->node);
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400117 pos->evlist = NULL;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200118 perf_evsel__delete(pos);
119 }
120
121 evlist->nr_entries = 0;
122}
123
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200124void perf_evlist__exit(struct perf_evlist *evlist)
125{
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -0300126 zfree(&evlist->mmap);
Wang Nanb2cb6152016-07-14 08:34:39 +0000127 zfree(&evlist->backward_mmap);
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300128 fdarray__exit(&evlist->pollfd);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200129}
130
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200131void perf_evlist__delete(struct perf_evlist *evlist)
132{
Arnaldo Carvalho de Melo0b04b3d2016-06-21 18:15:45 -0300133 if (evlist == NULL)
134 return;
135
Arnaldo Carvalho de Melo983874d2014-01-03 17:25:49 -0300136 perf_evlist__munmap(evlist);
Arnaldo Carvalho de Melof26e1c72014-01-03 16:54:12 -0300137 perf_evlist__close(evlist);
Jiri Olsaf30a79b2015-06-23 00:36:04 +0200138 cpu_map__put(evlist->cpus);
Jiri Olsa186fbb72015-06-23 00:36:05 +0200139 thread_map__put(evlist->threads);
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -0300140 evlist->cpus = NULL;
141 evlist->threads = NULL;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200142 perf_evlist__purge(evlist);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200143 perf_evlist__exit(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200144 free(evlist);
145}
146
Adrian Hunteradc0c3e2015-09-08 10:58:58 +0300147static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
148 struct perf_evsel *evsel)
149{
150 /*
151 * We already have cpus for evsel (via PMU sysfs) so
152 * keep it, if there's no target cpu list defined.
153 */
154 if (!evsel->own_cpus || evlist->has_user_cpus) {
155 cpu_map__put(evsel->cpus);
156 evsel->cpus = cpu_map__get(evlist->cpus);
157 } else if (evsel->cpus != evsel->own_cpus) {
158 cpu_map__put(evsel->cpus);
159 evsel->cpus = cpu_map__get(evsel->own_cpus);
160 }
161
162 thread_map__put(evsel->threads);
163 evsel->threads = thread_map__get(evlist->threads);
164}
165
166static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
167{
168 struct perf_evsel *evsel;
169
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300170 evlist__for_each_entry(evlist, evsel)
Adrian Hunteradc0c3e2015-09-08 10:58:58 +0300171 __perf_evlist__propagate_maps(evlist, evsel);
172}
173
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200174void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
175{
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400176 entry->evlist = evlist;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200177 list_add_tail(&entry->node, &evlist->entries);
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300178 entry->idx = evlist->nr_entries;
Adrian Hunter60b08962014-07-31 09:00:52 +0300179 entry->tracking = !entry->idx;
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300180
Adrian Hunter75562572013-08-27 11:23:09 +0300181 if (!evlist->nr_entries++)
182 perf_evlist__set_id_pos(evlist);
Adrian Hunter44c42d72015-09-08 10:58:59 +0300183
184 __perf_evlist__propagate_maps(evlist, entry);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200185}
186
Adrian Hunter47682302015-09-25 16:15:53 +0300187void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
188{
189 evsel->evlist = NULL;
190 list_del_init(&evsel->node);
191 evlist->nr_entries -= 1;
192}
193
Jiri Olsa0529bc12012-01-27 15:34:20 +0100194void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300195 struct list_head *list)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200196{
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300197 struct perf_evsel *evsel, *temp;
Adrian Hunter75562572013-08-27 11:23:09 +0300198
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300199 __evlist__for_each_entry_safe(list, temp, evsel) {
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300200 list_del_init(&evsel->node);
201 perf_evlist__add(evlist, evsel);
202 }
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200203}
204
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300205void __perf_evlist__set_leader(struct list_head *list)
206{
207 struct perf_evsel *evsel, *leader;
208
209 leader = list_entry(list->next, struct perf_evsel, node);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900210 evsel = list_entry(list->prev, struct perf_evsel, node);
211
212 leader->nr_members = evsel->idx - leader->idx + 1;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300213
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300214 __evlist__for_each_entry(list, evsel) {
Stephane Eranian74b21332013-01-31 13:54:37 +0100215 evsel->leader = leader;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300216 }
217}
218
219void perf_evlist__set_leader(struct perf_evlist *evlist)
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200220{
Namhyung Kim97f63e42013-01-22 18:09:29 +0900221 if (evlist->nr_entries) {
222 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300223 __perf_evlist__set_leader(&evlist->entries);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900224 }
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200225}
226
Jiri Olsa45cf6c32015-10-05 20:06:04 +0200227void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
Arnaldo Carvalho de Melo7f8d1ad2015-09-30 17:49:49 -0300228{
229 attr->precise_ip = 3;
230
231 while (attr->precise_ip != 0) {
232 int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
233 if (fd != -1) {
234 close(fd);
235 break;
236 }
237 --attr->precise_ip;
238 }
239}
240
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200241int perf_evlist__add_default(struct perf_evlist *evlist)
242{
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300243 struct perf_evsel *evsel = perf_evsel__new_cycles();
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200244
245 if (evsel == NULL)
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300246 return -ENOMEM;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200247
248 perf_evlist__add(evlist, evsel);
249 return 0;
250}
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200251
Arnaldo Carvalho de Melo5bae0252016-01-07 13:14:56 -0300252int perf_evlist__add_dummy(struct perf_evlist *evlist)
253{
254 struct perf_event_attr attr = {
255 .type = PERF_TYPE_SOFTWARE,
256 .config = PERF_COUNT_SW_DUMMY,
257 .size = sizeof(attr), /* to capture ABI version */
258 };
259 struct perf_evsel *evsel = perf_evsel__new(&attr);
260
261 if (evsel == NULL)
262 return -ENOMEM;
263
264 perf_evlist__add(evlist, evsel);
265 return 0;
266}
267
Arnaldo Carvalho de Meloe60fc842012-10-03 11:50:55 -0300268static int perf_evlist__add_attrs(struct perf_evlist *evlist,
269 struct perf_event_attr *attrs, size_t nr_attrs)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200270{
271 struct perf_evsel *evsel, *n;
272 LIST_HEAD(head);
273 size_t i;
274
275 for (i = 0; i < nr_attrs; i++) {
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300276 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200277 if (evsel == NULL)
278 goto out_delete_partial_list;
279 list_add_tail(&evsel->node, &head);
280 }
281
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300282 perf_evlist__splice_list_tail(evlist, &head);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200283
284 return 0;
285
286out_delete_partial_list:
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300287 __evlist__for_each_entry_safe(&head, n, evsel)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200288 perf_evsel__delete(evsel);
289 return -1;
290}
291
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -0300292int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
293 struct perf_event_attr *attrs, size_t nr_attrs)
294{
295 size_t i;
296
297 for (i = 0; i < nr_attrs; i++)
298 event_attr_init(attrs + i);
299
300 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
301}
302
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -0300303struct perf_evsel *
304perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200305{
306 struct perf_evsel *evsel;
307
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300308 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200309 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
310 (int)evsel->attr.config == id)
311 return evsel;
312 }
313
314 return NULL;
315}
316
David Aherna2f28042013-08-28 22:29:51 -0600317struct perf_evsel *
318perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
319 const char *name)
320{
321 struct perf_evsel *evsel;
322
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300323 evlist__for_each_entry(evlist, evsel) {
David Aherna2f28042013-08-28 22:29:51 -0600324 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
325 (strcmp(evsel->name, name) == 0))
326 return evsel;
327 }
328
329 return NULL;
330}
331
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300332int perf_evlist__add_newtp(struct perf_evlist *evlist,
333 const char *sys, const char *name, void *handler)
334{
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300335 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300336
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200337 if (IS_ERR(evsel))
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300338 return -1;
339
Arnaldo Carvalho de Melo744a9712013-11-06 10:17:38 -0300340 evsel->handler = handler;
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300341 perf_evlist__add(evlist, evsel);
342 return 0;
343}
344
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300345static int perf_evlist__nr_threads(struct perf_evlist *evlist,
346 struct perf_evsel *evsel)
347{
348 if (evsel->system_wide)
349 return 1;
350 else
351 return thread_map__nr(evlist->threads);
352}
353
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300354void perf_evlist__disable(struct perf_evlist *evlist)
355{
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300356 struct perf_evsel *pos;
357
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300358 evlist__for_each_entry(evlist, pos) {
Jiri Olsa3e27c922015-12-03 10:06:42 +0100359 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
360 continue;
361 perf_evsel__disable(pos);
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300362 }
Arnaldo Carvalho de Melo2b56bcf2015-06-17 16:40:26 -0300363
364 evlist->enabled = false;
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300365}
366
David Ahern764e16a32011-08-25 10:17:55 -0600367void perf_evlist__enable(struct perf_evlist *evlist)
368{
David Ahern764e16a32011-08-25 10:17:55 -0600369 struct perf_evsel *pos;
370
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300371 evlist__for_each_entry(evlist, pos) {
Jiri Olsa3e27c922015-12-03 10:06:42 +0100372 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
373 continue;
374 perf_evsel__enable(pos);
David Ahern764e16a32011-08-25 10:17:55 -0600375 }
Arnaldo Carvalho de Melo2b56bcf2015-06-17 16:40:26 -0300376
377 evlist->enabled = true;
378}
379
380void perf_evlist__toggle_enable(struct perf_evlist *evlist)
381{
382 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
David Ahern764e16a32011-08-25 10:17:55 -0600383}
384
Adrian Hunter1c650562014-07-31 09:00:56 +0300385static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
386 struct perf_evsel *evsel, int cpu)
387{
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300388 int thread;
Adrian Hunter1c650562014-07-31 09:00:56 +0300389 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
390
391 if (!evsel->fd)
392 return -EINVAL;
393
394 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300395 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
Adrian Hunter1c650562014-07-31 09:00:56 +0300396 if (err)
397 return err;
398 }
399 return 0;
400}
401
402static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
403 struct perf_evsel *evsel,
404 int thread)
405{
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300406 int cpu;
Adrian Hunter1c650562014-07-31 09:00:56 +0300407 int nr_cpus = cpu_map__nr(evlist->cpus);
408
409 if (!evsel->fd)
410 return -EINVAL;
411
412 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300413 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
Adrian Hunter1c650562014-07-31 09:00:56 +0300414 if (err)
415 return err;
416 }
417 return 0;
418}
419
420int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
421 struct perf_evsel *evsel, int idx)
422{
423 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
424
425 if (per_cpu_mmaps)
426 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
427 else
428 return perf_evlist__enable_event_thread(evlist, evsel, idx);
429}
430
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300431int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200432{
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900433 int nr_cpus = cpu_map__nr(evlist->cpus);
434 int nr_threads = thread_map__nr(evlist->threads);
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300435 int nfds = 0;
436 struct perf_evsel *evsel;
437
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300438 evlist__for_each_entry(evlist, evsel) {
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300439 if (evsel->system_wide)
440 nfds += nr_cpus;
441 else
442 nfds += nr_cpus * nr_threads;
443 }
444
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300445 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
446 fdarray__grow(&evlist->pollfd, nfds) < 0)
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300447 return -ENOMEM;
448
449 return 0;
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200450}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200451
Wang Nan48760752016-07-14 08:34:37 +0000452static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
453 struct perf_mmap *map, short revent)
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300454{
Wang Nanf3058a12016-05-24 02:28:59 +0000455 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300456 /*
457 * Save the idx so that when we filter out fds POLLHUP'ed we can
458 * close the associated evlist->mmap[] entry.
459 */
460 if (pos >= 0) {
Wang Nan48760752016-07-14 08:34:37 +0000461 evlist->pollfd.priv[pos].ptr = map;
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300462
463 fcntl(fd, F_SETFL, O_NONBLOCK);
464 }
465
466 return pos;
467}
468
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300469int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200470{
Wang Nan48760752016-07-14 08:34:37 +0000471 return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300472}
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300473
Wang Nan258e4bf2016-05-25 13:44:57 +0000474static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
475 void *arg __maybe_unused)
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300476{
Wang Nan48760752016-07-14 08:34:37 +0000477 struct perf_mmap *map = fda->priv[fd].ptr;
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300478
Wang Nan48760752016-07-14 08:34:37 +0000479 if (map)
480 perf_mmap__put(map);
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200481}
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200482
Arnaldo Carvalho de Melo1ddec7f2014-08-12 23:04:11 -0300483int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
484{
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300485 return fdarray__filter(&evlist->pollfd, revents_and_mask,
Wang Nan258e4bf2016-05-25 13:44:57 +0000486 perf_evlist__munmap_filtered, NULL);
Arnaldo Carvalho de Melo1ddec7f2014-08-12 23:04:11 -0300487}
488
Arnaldo Carvalho de Melof66a889d2014-08-18 17:25:59 -0300489int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
490{
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300491 return fdarray__poll(&evlist->pollfd, timeout);
Arnaldo Carvalho de Melof66a889d2014-08-18 17:25:59 -0300492}
493
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300494static void perf_evlist__id_hash(struct perf_evlist *evlist,
495 struct perf_evsel *evsel,
496 int cpu, int thread, u64 id)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200497{
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300498 int hash;
499 struct perf_sample_id *sid = SID(evsel, cpu, thread);
500
501 sid->id = id;
502 sid->evsel = evsel;
503 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
504 hlist_add_head(&sid->node, &evlist->heads[hash]);
505}
506
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300507void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
508 int cpu, int thread, u64 id)
509{
510 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
511 evsel->id[evsel->ids++] = id;
512}
513
Jiri Olsa1c596122015-11-05 15:40:49 +0100514int perf_evlist__id_add_fd(struct perf_evlist *evlist,
515 struct perf_evsel *evsel,
516 int cpu, int thread, int fd)
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300517{
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200518 u64 read_data[4] = { 0, };
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300519 int id_idx = 1; /* The first entry is the counter value */
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200520 u64 id;
521 int ret;
522
523 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
524 if (!ret)
525 goto add;
526
527 if (errno != ENOTTY)
528 return -1;
529
530 /* Legacy way to get event id.. All hail to old kernels! */
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200531
Jiri Olsac4861af2012-10-12 13:02:21 +0200532 /*
533 * This way does not work with group format read, so bail
534 * out in that case.
535 */
536 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
537 return -1;
538
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200539 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
540 read(fd, &read_data, sizeof(read_data)) == -1)
541 return -1;
542
543 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
544 ++id_idx;
545 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
546 ++id_idx;
547
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200548 id = read_data[id_idx];
549
550 add:
551 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200552 return 0;
553}
554
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200555static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
556 struct perf_evsel *evsel, int idx, int cpu,
557 int thread)
558{
559 struct perf_sample_id *sid = SID(evsel, cpu, thread);
560 sid->idx = idx;
561 if (evlist->cpus && cpu >= 0)
562 sid->cpu = evlist->cpus->map[cpu];
563 else
564 sid->cpu = -1;
565 if (!evsel->system_wide && evlist->threads && thread >= 0)
Jiri Olsae13798c2015-06-23 00:36:02 +0200566 sid->tid = thread_map__pid(evlist->threads, thread);
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200567 else
568 sid->tid = -1;
569}
570
Jiri Olsa932a3592012-10-11 14:10:35 +0200571struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200572{
573 struct hlist_head *head;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200574 struct perf_sample_id *sid;
575 int hash;
576
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200577 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
578 head = &evlist->heads[hash];
579
Sasha Levinb67bfe02013-02-27 17:06:00 -0800580 hlist_for_each_entry(sid, head, node)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200581 if (sid->id == id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200582 return sid;
583
584 return NULL;
585}
586
587struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
588{
589 struct perf_sample_id *sid;
590
Adrian Hunter05169df2015-08-20 11:26:45 +0300591 if (evlist->nr_entries == 1 || !id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200592 return perf_evlist__first(evlist);
593
594 sid = perf_evlist__id2sid(evlist, id);
595 if (sid)
596 return sid->evsel;
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900597
598 if (!perf_evlist__sample_id_all(evlist))
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300599 return perf_evlist__first(evlist);
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900600
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200601 return NULL;
602}
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200603
Adrian Hunterdddcf6a2015-09-25 16:15:52 +0300604struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
605 u64 id)
606{
607 struct perf_sample_id *sid;
608
609 if (!id)
610 return NULL;
611
612 sid = perf_evlist__id2sid(evlist, id);
613 if (sid)
614 return sid->evsel;
615
616 return NULL;
617}
618
Adrian Hunter75562572013-08-27 11:23:09 +0300619static int perf_evlist__event2id(struct perf_evlist *evlist,
620 union perf_event *event, u64 *id)
621{
622 const u64 *array = event->sample.array;
623 ssize_t n;
624
625 n = (event->header.size - sizeof(event->header)) >> 3;
626
627 if (event->header.type == PERF_RECORD_SAMPLE) {
628 if (evlist->id_pos >= n)
629 return -1;
630 *id = array[evlist->id_pos];
631 } else {
632 if (evlist->is_pos > n)
633 return -1;
634 n -= evlist->is_pos;
635 *id = array[n];
636 }
637 return 0;
638}
639
Jiri Olsa7cb5c5a2016-07-10 13:07:53 +0200640struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
641 union perf_event *event)
Adrian Hunter75562572013-08-27 11:23:09 +0300642{
Adrian Hunter98be6962013-09-04 23:18:17 +0300643 struct perf_evsel *first = perf_evlist__first(evlist);
Adrian Hunter75562572013-08-27 11:23:09 +0300644 struct hlist_head *head;
645 struct perf_sample_id *sid;
646 int hash;
647 u64 id;
648
649 if (evlist->nr_entries == 1)
Adrian Hunter98be6962013-09-04 23:18:17 +0300650 return first;
651
652 if (!first->attr.sample_id_all &&
653 event->header.type != PERF_RECORD_SAMPLE)
654 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300655
656 if (perf_evlist__event2id(evlist, event, &id))
657 return NULL;
658
659 /* Synthesized events have an id of zero */
660 if (!id)
Adrian Hunter98be6962013-09-04 23:18:17 +0300661 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300662
663 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
664 head = &evlist->heads[hash];
665
666 hlist_for_each_entry(sid, head, node) {
667 if (sid->id == id)
668 return sid->evsel;
669 }
670 return NULL;
671}
672
Wang Nan65aea232016-05-23 07:13:38 +0000673static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
674{
675 int i;
676
Wang Nan078c3382016-07-14 08:34:40 +0000677 if (!evlist->backward_mmap)
678 return 0;
679
Wang Nan65aea232016-05-23 07:13:38 +0000680 for (i = 0; i < evlist->nr_mmaps; i++) {
Wang Nan078c3382016-07-14 08:34:40 +0000681 int fd = evlist->backward_mmap[i].fd;
Wang Nan65aea232016-05-23 07:13:38 +0000682 int err;
683
684 if (fd < 0)
685 continue;
686 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
687 if (err)
688 return err;
689 }
690 return 0;
691}
692
Wang Nanf6cdff82016-07-14 08:34:44 +0000693static int perf_evlist__pause(struct perf_evlist *evlist)
Wang Nan65aea232016-05-23 07:13:38 +0000694{
695 return perf_evlist__set_paused(evlist, true);
696}
697
Wang Nanf6cdff82016-07-14 08:34:44 +0000698static int perf_evlist__resume(struct perf_evlist *evlist)
Wang Nan65aea232016-05-23 07:13:38 +0000699{
700 return perf_evlist__set_paused(evlist, false);
701}
702
Wang Nanb6b85da2016-04-27 02:19:21 +0000703/* When check_messup is true, 'end' must points to a good entry */
Wang Nan0f4ccd12016-04-27 02:19:20 +0000704static union perf_event *
Wang Nanb6b85da2016-04-27 02:19:21 +0000705perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
706 u64 end, u64 *prev)
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200707{
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200708 unsigned char *data = md->base + page_size;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200709 union perf_event *event = NULL;
Wang Nanb6b85da2016-04-27 02:19:21 +0000710 int diff = end - start;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200711
Wang Nanb6b85da2016-04-27 02:19:21 +0000712 if (check_messup) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200713 /*
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200714 * If we're further behind than half the buffer, there's a chance
715 * the writer will bite our tail and mess up the samples under us.
716 *
Wang Nanb6b85da2016-04-27 02:19:21 +0000717 * If we somehow ended up ahead of the 'end', we got messed up.
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200718 *
Wang Nanb6b85da2016-04-27 02:19:21 +0000719 * In either case, truncate and restart at 'end'.
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200720 */
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200721 if (diff > md->mask / 2 || diff < 0) {
722 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
723
724 /*
Wang Nanb6b85da2016-04-27 02:19:21 +0000725 * 'end' points to a known good entry, start there.
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200726 */
Wang Nanb6b85da2016-04-27 02:19:21 +0000727 start = end;
Wang Nanb04b7022016-04-26 02:28:54 +0000728 diff = 0;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200729 }
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200730 }
731
Wang Nanb04b7022016-04-26 02:28:54 +0000732 if (diff >= (int)sizeof(event->header)) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200733 size_t size;
734
Wang Nanb6b85da2016-04-27 02:19:21 +0000735 event = (union perf_event *)&data[start & md->mask];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200736 size = event->header.size;
737
Wang Nanb04b7022016-04-26 02:28:54 +0000738 if (size < sizeof(event->header) || diff < (int)size) {
739 event = NULL;
740 goto broken_event;
741 }
742
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200743 /*
744 * Event straddles the mmap boundary -- header should always
745 * be inside due to u64 alignment of output.
746 */
Wang Nanb6b85da2016-04-27 02:19:21 +0000747 if ((start & md->mask) + size != ((start + size) & md->mask)) {
748 unsigned int offset = start;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200749 unsigned int len = min(sizeof(*event), size), cpy;
Jiri Olsaa65cb4b2013-10-02 15:46:39 +0200750 void *dst = md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200751
752 do {
753 cpy = min(md->mask + 1 - (offset & md->mask), len);
754 memcpy(dst, &data[offset & md->mask], cpy);
755 offset += cpy;
756 dst += cpy;
757 len -= cpy;
758 } while (len);
759
Jiri Olsaa65cb4b2013-10-02 15:46:39 +0200760 event = (union perf_event *) md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200761 }
762
Wang Nanb6b85da2016-04-27 02:19:21 +0000763 start += size;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200764 }
765
Wang Nanb04b7022016-04-26 02:28:54 +0000766broken_event:
Wang Nan0f4ccd12016-04-27 02:19:20 +0000767 if (prev)
Wang Nanb6b85da2016-04-27 02:19:21 +0000768 *prev = start;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200769
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200770 return event;
771}
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200772
Wang Nan8db6d6b2016-07-14 08:34:35 +0000773union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
Wang Nan0f4ccd12016-04-27 02:19:20 +0000774{
Wang Nan0f4ccd12016-04-27 02:19:20 +0000775 u64 head;
776 u64 old = md->prev;
777
778 /*
779 * Check if event was unmapped due to a POLLHUP/POLLERR.
780 */
Elena Reshetova25a37202017-02-21 17:35:01 +0200781 if (!refcount_read(&md->refcnt))
Wang Nan0f4ccd12016-04-27 02:19:20 +0000782 return NULL;
783
784 head = perf_mmap__read_head(md);
785
Wang Nan8db6d6b2016-07-14 08:34:35 +0000786 return perf_mmap__read(md, check_messup, old, head, &md->prev);
Wang Nan0f4ccd12016-04-27 02:19:20 +0000787}
788
Wang Nane24c7522016-05-09 01:47:50 +0000789union perf_event *
Wang Nan8db6d6b2016-07-14 08:34:35 +0000790perf_mmap__read_backward(struct perf_mmap *md)
Wang Nane24c7522016-05-09 01:47:50 +0000791{
Wang Nane24c7522016-05-09 01:47:50 +0000792 u64 head, end;
793 u64 start = md->prev;
794
795 /*
796 * Check if event was unmapped due to a POLLHUP/POLLERR.
797 */
Elena Reshetova25a37202017-02-21 17:35:01 +0200798 if (!refcount_read(&md->refcnt))
Wang Nane24c7522016-05-09 01:47:50 +0000799 return NULL;
800
801 head = perf_mmap__read_head(md);
802 if (!head)
803 return NULL;
804
805 /*
806 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
807 * it each time when kernel writes to it, so in fact 'head' is
808 * negative. 'end' pointer is made manually by adding the size of
809 * the ring buffer to 'head' pointer, means the validate data can
810 * read is the whole ring buffer. If 'end' is positive, the ring
811 * buffer has not fully filled, so we must adjust 'end' to 0.
812 *
813 * However, since both 'head' and 'end' is unsigned, we can't
814 * simply compare 'end' against 0. Here we compare '-head' and
815 * the size of the ring buffer, where -head is the number of bytes
816 * kernel write to the ring buffer.
817 */
818 if (-head < (u64)(md->mask + 1))
819 end = 0;
820 else
821 end = head + md->mask + 1;
822
823 return perf_mmap__read(md, false, start, end, &md->prev);
824}
825
Wang Nan8db6d6b2016-07-14 08:34:35 +0000826union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
827{
828 struct perf_mmap *md = &evlist->mmap[idx];
829
830 /*
831 * Check messup is required for forward overwritable ring buffer:
832 * memory pointed by md->prev can be overwritten in this case.
833 * No need for read-write ring buffer: kernel stop outputting when
834 * it hit md->prev (perf_mmap__consume()).
835 */
836 return perf_mmap__read_forward(md, evlist->overwrite);
837}
838
839union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
840{
841 struct perf_mmap *md = &evlist->mmap[idx];
842
843 /*
844 * No need to check messup for backward ring buffer:
845 * We can always read arbitrary long data from a backward
846 * ring buffer unless we forget to pause it before reading.
847 */
848 return perf_mmap__read_backward(md);
849}
850
Wang Nan5a5ddeb2016-05-25 13:44:50 +0000851union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
852{
Wang Nana0c6f452016-07-14 08:34:41 +0000853 return perf_evlist__mmap_read_forward(evlist, idx);
Wang Nan5a5ddeb2016-05-25 13:44:50 +0000854}
855
Wang Nan8db6d6b2016-07-14 08:34:35 +0000856void perf_mmap__read_catchup(struct perf_mmap *md)
Wang Nane24c7522016-05-09 01:47:50 +0000857{
Wang Nane24c7522016-05-09 01:47:50 +0000858 u64 head;
859
Elena Reshetova25a37202017-02-21 17:35:01 +0200860 if (!refcount_read(&md->refcnt))
Wang Nane24c7522016-05-09 01:47:50 +0000861 return;
862
863 head = perf_mmap__read_head(md);
864 md->prev = head;
865}
866
Wang Nan8db6d6b2016-07-14 08:34:35 +0000867void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
868{
869 perf_mmap__read_catchup(&evlist->mmap[idx]);
870}
871
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300872static bool perf_mmap__empty(struct perf_mmap *md)
873{
Adrian Hunterb72e74d2015-04-24 22:29:43 +0300874 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300875}
876
Wang Nan8db6d6b2016-07-14 08:34:35 +0000877static void perf_mmap__get(struct perf_mmap *map)
878{
Elena Reshetova25a37202017-02-21 17:35:01 +0200879 refcount_inc(&map->refcnt);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000880}
881
882static void perf_mmap__put(struct perf_mmap *md)
883{
Elena Reshetova25a37202017-02-21 17:35:01 +0200884 BUG_ON(md->base && refcount_read(&md->refcnt) == 0);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000885
Elena Reshetova25a37202017-02-21 17:35:01 +0200886 if (refcount_dec_and_test(&md->refcnt))
Wang Nan8db6d6b2016-07-14 08:34:35 +0000887 perf_mmap__munmap(md);
888}
889
Wang Nan8db6d6b2016-07-14 08:34:35 +0000890void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800891{
Wang Nan8db6d6b2016-07-14 08:34:35 +0000892 if (!overwrite) {
David Ahern7b8283b52015-04-07 09:20:37 -0600893 u64 old = md->prev;
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800894
895 perf_mmap__write_tail(md, old);
896 }
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300897
Elena Reshetova25a37202017-02-21 17:35:01 +0200898 if (refcount_read(&md->refcnt) == 1 && perf_mmap__empty(md))
Wang Nan8db6d6b2016-07-14 08:34:35 +0000899 perf_mmap__put(md);
900}
901
902void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
903{
904 perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800905}
906
Adrian Hunter718c6022015-04-09 18:53:42 +0300907int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
908 struct auxtrace_mmap_params *mp __maybe_unused,
909 void *userpg __maybe_unused,
910 int fd __maybe_unused)
911{
912 return 0;
913}
914
915void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
916{
917}
918
919void __weak auxtrace_mmap_params__init(
920 struct auxtrace_mmap_params *mp __maybe_unused,
921 off_t auxtrace_offset __maybe_unused,
922 unsigned int auxtrace_pages __maybe_unused,
923 bool auxtrace_overwrite __maybe_unused)
924{
925}
926
927void __weak auxtrace_mmap_params__set_idx(
928 struct auxtrace_mmap_params *mp __maybe_unused,
929 struct perf_evlist *evlist __maybe_unused,
930 int idx __maybe_unused,
931 bool per_cpu __maybe_unused)
932{
933}
934
Wang Nan8db6d6b2016-07-14 08:34:35 +0000935static void perf_mmap__munmap(struct perf_mmap *map)
936{
937 if (map->base != NULL) {
938 munmap(map->base, perf_mmap__mmap_len(map));
939 map->base = NULL;
940 map->fd = -1;
Elena Reshetova25a37202017-02-21 17:35:01 +0200941 refcount_set(&map->refcnt, 0);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000942 }
943 auxtrace_mmap__munmap(&map->auxtrace_mmap);
944}
945
Wang Nana1f72612016-07-14 08:34:38 +0000946static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200947{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300948 int i;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200949
Wang Nanb2cb6152016-07-14 08:34:39 +0000950 if (evlist->mmap)
951 for (i = 0; i < evlist->nr_mmaps; i++)
952 perf_mmap__munmap(&evlist->mmap[i]);
Arnaldo Carvalho de Melo983874d2014-01-03 17:25:49 -0300953
Wang Nanb2cb6152016-07-14 08:34:39 +0000954 if (evlist->backward_mmap)
955 for (i = 0; i < evlist->nr_mmaps; i++)
956 perf_mmap__munmap(&evlist->backward_mmap[i]);
Wang Nana1f72612016-07-14 08:34:38 +0000957}
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300958
Wang Nana1f72612016-07-14 08:34:38 +0000959void perf_evlist__munmap(struct perf_evlist *evlist)
960{
961 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -0300962 zfree(&evlist->mmap);
Wang Nanb2cb6152016-07-14 08:34:39 +0000963 zfree(&evlist->backward_mmap);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200964}
965
Wang Nan8db6d6b2016-07-14 08:34:35 +0000966static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200967{
Wang Nand4c6fb32016-05-20 16:38:24 +0000968 int i;
Wang Nan8db6d6b2016-07-14 08:34:35 +0000969 struct perf_mmap *map;
Wang Nand4c6fb32016-05-20 16:38:24 +0000970
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -0300971 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -0700972 if (cpu_map__empty(evlist->cpus))
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900973 evlist->nr_mmaps = thread_map__nr(evlist->threads);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000974 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
975 if (!map)
976 return NULL;
Wang Nan946ae1d2016-05-31 13:06:15 +0000977
Arnaldo Carvalho de Melo4738ca32017-02-23 13:24:34 -0300978 for (i = 0; i < evlist->nr_mmaps; i++) {
Wang Nan8db6d6b2016-07-14 08:34:35 +0000979 map[i].fd = -1;
Arnaldo Carvalho de Melo4738ca32017-02-23 13:24:34 -0300980 /*
981 * When the perf_mmap() call is made we grab one refcount, plus
982 * one extra to let perf_evlist__mmap_consume() get the last
983 * events after all real references (perf_mmap__get()) are
984 * dropped.
985 *
986 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
987 * thus does perf_mmap__get() on it.
988 */
989 refcount_set(&map[i].refcnt, 0);
990 }
Wang Nan8db6d6b2016-07-14 08:34:35 +0000991 return map;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200992}
993
Adrian Huntera8a8f3e2014-07-14 13:02:52 +0300994struct mmap_params {
995 int prot;
996 int mask;
Adrian Hunter718c6022015-04-09 18:53:42 +0300997 struct auxtrace_mmap_params auxtrace_mp;
Adrian Huntera8a8f3e2014-07-14 13:02:52 +0300998};
999
Wang Nan8db6d6b2016-07-14 08:34:35 +00001000static int perf_mmap__mmap(struct perf_mmap *map,
1001 struct mmap_params *mp, int fd)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001002{
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001003 /*
1004 * The last one will be done at perf_evlist__mmap_consume(), so that we
1005 * make sure we don't prevent tools from consuming every last event in
1006 * the ring buffer.
1007 *
1008 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
1009 * anymore, but the last events for it are still in the ring buffer,
1010 * waiting to be consumed.
1011 *
1012 * Tools can chose to ignore this at their own discretion, but the
1013 * evlist layer can't just drop it when filtering events in
1014 * perf_evlist__filter_pollfd().
1015 */
Elena Reshetova25a37202017-02-21 17:35:01 +02001016 refcount_set(&map->refcnt, 2);
Wang Nan8db6d6b2016-07-14 08:34:35 +00001017 map->prev = 0;
1018 map->mask = mp->mask;
1019 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
1020 MAP_SHARED, fd, 0);
1021 if (map->base == MAP_FAILED) {
Adrian Hunter02635962013-11-01 15:51:33 +02001022 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
1023 errno);
Wang Nan8db6d6b2016-07-14 08:34:35 +00001024 map->base = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001025 return -1;
Nelson Elhage301b1952011-12-19 08:39:30 -05001026 }
Wang Nan8db6d6b2016-07-14 08:34:35 +00001027 map->fd = fd;
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -03001028
Wang Nan8db6d6b2016-07-14 08:34:35 +00001029 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
1030 &mp->auxtrace_mp, map->base, fd))
Adrian Hunter718c6022015-04-09 18:53:42 +03001031 return -1;
1032
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001033 return 0;
1034}
1035
Wang Nanf3058a12016-05-24 02:28:59 +00001036static bool
1037perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
1038 struct perf_evsel *evsel)
1039{
Arnaldo Carvalho de Melo32a951b2016-07-14 08:34:33 +00001040 if (evsel->attr.write_backward)
Wang Nanf3058a12016-05-24 02:28:59 +00001041 return false;
1042 return true;
1043}
1044
Adrian Hunter04e21312013-10-18 15:29:13 +03001045static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
Mark Rutland9f21b812016-09-08 11:21:51 +01001046 struct mmap_params *mp, int cpu_idx,
Wang Nan078c3382016-07-14 08:34:40 +00001047 int thread, int *_output, int *_output_backward)
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001048{
1049 struct perf_evsel *evsel;
Wang Nanf3058a12016-05-24 02:28:59 +00001050 int revent;
Mark Rutland9f21b812016-09-08 11:21:51 +01001051 int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx);
Adrian Hunter04e21312013-10-18 15:29:13 +03001052
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001053 evlist__for_each_entry(evlist, evsel) {
Wang Nan078c3382016-07-14 08:34:40 +00001054 struct perf_mmap *maps = evlist->mmap;
1055 int *output = _output;
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001056 int fd;
Mark Rutland9f21b812016-09-08 11:21:51 +01001057 int cpu;
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001058
Wang Nan078c3382016-07-14 08:34:40 +00001059 if (evsel->attr.write_backward) {
1060 output = _output_backward;
1061 maps = evlist->backward_mmap;
1062
1063 if (!maps) {
1064 maps = perf_evlist__alloc_mmap(evlist);
1065 if (!maps)
1066 return -1;
1067 evlist->backward_mmap = maps;
Wang Nan54cc54d2016-07-14 08:34:42 +00001068 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
1069 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
Wang Nan078c3382016-07-14 08:34:40 +00001070 }
1071 }
Wang Nanf3058a12016-05-24 02:28:59 +00001072
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001073 if (evsel->system_wide && thread)
1074 continue;
1075
Mark Rutland9f21b812016-09-08 11:21:51 +01001076 cpu = cpu_map__idx(evsel->cpus, evlist_cpu);
1077 if (cpu == -1)
1078 continue;
1079
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001080 fd = FD(evsel, cpu, thread);
Adrian Hunter04e21312013-10-18 15:29:13 +03001081
1082 if (*output == -1) {
1083 *output = fd;
Wang Nan078c3382016-07-14 08:34:40 +00001084
1085 if (perf_mmap__mmap(&maps[idx], mp, *output) < 0)
Adrian Hunter04e21312013-10-18 15:29:13 +03001086 return -1;
1087 } else {
1088 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
1089 return -1;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001090
Wang Nan078c3382016-07-14 08:34:40 +00001091 perf_mmap__get(&maps[idx]);
Adrian Hunter04e21312013-10-18 15:29:13 +03001092 }
1093
Wang Nanf3058a12016-05-24 02:28:59 +00001094 revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
1095
Adrian Hunterf90d1942014-11-11 16:16:39 +02001096 /*
1097 * The system_wide flag causes a selected event to be opened
1098 * always without a pid. Consequently it will never get a
1099 * POLLHUP, but it is used for tracking in combination with
1100 * other events, so it should not need to be polled anyway.
1101 * Therefore don't add it for polling.
1102 */
1103 if (!evsel->system_wide &&
Wang Nan078c3382016-07-14 08:34:40 +00001104 __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
1105 perf_mmap__put(&maps[idx]);
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -03001106 return -1;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001107 }
Arnaldo Carvalho de Melo033fa712014-09-08 12:55:12 -03001108
Adrian Hunter3c659ee2014-10-27 15:49:22 +02001109 if (evsel->attr.read_format & PERF_FORMAT_ID) {
1110 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
1111 fd) < 0)
1112 return -1;
1113 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
1114 thread);
1115 }
Adrian Hunter04e21312013-10-18 15:29:13 +03001116 }
1117
1118 return 0;
1119}
1120
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001121static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
1122 struct mmap_params *mp)
Adrian Hunter04e21312013-10-18 15:29:13 +03001123{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001124 int cpu, thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001125 int nr_cpus = cpu_map__nr(evlist->cpus);
1126 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001127
Adrian Huntere3e1a542013-08-14 15:48:24 +03001128 pr_debug2("perf event ring buffer mmapped per cpu\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001129 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001130 int output = -1;
Wang Nan078c3382016-07-14 08:34:40 +00001131 int output_backward = -1;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001132
Adrian Hunter718c6022015-04-09 18:53:42 +03001133 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
1134 true);
1135
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001136 for (thread = 0; thread < nr_threads; thread++) {
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001137 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
Wang Nan078c3382016-07-14 08:34:40 +00001138 thread, &output, &output_backward))
Adrian Hunter04e21312013-10-18 15:29:13 +03001139 goto out_unmap;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001140 }
1141 }
1142
1143 return 0;
1144
1145out_unmap:
Wang Nana1f72612016-07-14 08:34:38 +00001146 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001147 return -1;
1148}
1149
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001150static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
1151 struct mmap_params *mp)
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001152{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001153 int thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001154 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001155
Adrian Huntere3e1a542013-08-14 15:48:24 +03001156 pr_debug2("perf event ring buffer mmapped per thread\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001157 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001158 int output = -1;
Wang Nan078c3382016-07-14 08:34:40 +00001159 int output_backward = -1;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001160
Adrian Hunter718c6022015-04-09 18:53:42 +03001161 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
1162 false);
1163
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001164 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
Wang Nan078c3382016-07-14 08:34:40 +00001165 &output, &output_backward))
Adrian Hunter04e21312013-10-18 15:29:13 +03001166 goto out_unmap;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001167 }
1168
1169 return 0;
1170
1171out_unmap:
Wang Nana1f72612016-07-14 08:34:38 +00001172 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001173 return -1;
1174}
1175
Arnaldo Carvalho de Melof5e71502016-04-15 17:46:31 -03001176unsigned long perf_event_mlock_kb_in_pages(void)
1177{
1178 unsigned long pages;
1179 int max;
1180
1181 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
1182 /*
1183 * Pick a once upon a time good value, i.e. things look
1184 * strange since we can't read a sysctl value, but lets not
1185 * die yet...
1186 */
1187 max = 512;
1188 } else {
1189 max -= (page_size / 1024);
1190 }
1191
1192 pages = (max * 1024) / page_size;
1193 if (!is_power_of_2(pages))
1194 pages = rounddown_pow_of_two(pages);
1195
1196 return pages;
1197}
1198
Jiri Olsa0c582442017-01-09 10:51:59 +01001199size_t perf_evlist__mmap_size(unsigned long pages)
Jiri Olsa994a1f72013-09-01 12:36:12 +02001200{
Arnaldo Carvalho de Melof5e71502016-04-15 17:46:31 -03001201 if (pages == UINT_MAX)
1202 pages = perf_event_mlock_kb_in_pages();
1203 else if (!is_power_of_2(pages))
Jiri Olsa994a1f72013-09-01 12:36:12 +02001204 return 0;
1205
1206 return (pages + 1) * page_size;
1207}
1208
David Ahern33c2dcf2013-11-12 07:46:55 -07001209static long parse_pages_arg(const char *str, unsigned long min,
1210 unsigned long max)
Jiri Olsa994a1f72013-09-01 12:36:12 +02001211{
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001212 unsigned long pages, val;
Jiri Olsa27050f52013-09-01 12:36:13 +02001213 static struct parse_tag tags[] = {
1214 { .tag = 'B', .mult = 1 },
1215 { .tag = 'K', .mult = 1 << 10 },
1216 { .tag = 'M', .mult = 1 << 20 },
1217 { .tag = 'G', .mult = 1 << 30 },
1218 { .tag = 0 },
1219 };
Jiri Olsa994a1f72013-09-01 12:36:12 +02001220
David Ahern89735042013-11-12 07:46:53 -07001221 if (str == NULL)
David Ahern33c2dcf2013-11-12 07:46:55 -07001222 return -EINVAL;
David Ahern89735042013-11-12 07:46:53 -07001223
Jiri Olsa27050f52013-09-01 12:36:13 +02001224 val = parse_tag_value(str, tags);
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001225 if (val != (unsigned long) -1) {
Jiri Olsa27050f52013-09-01 12:36:13 +02001226 /* we got file size value */
1227 pages = PERF_ALIGN(val, page_size) / page_size;
Jiri Olsa27050f52013-09-01 12:36:13 +02001228 } else {
1229 /* we got pages count value */
1230 char *eptr;
1231 pages = strtoul(str, &eptr, 10);
David Ahern33c2dcf2013-11-12 07:46:55 -07001232 if (*eptr != '\0')
1233 return -EINVAL;
Jiri Olsa994a1f72013-09-01 12:36:12 +02001234 }
1235
Adrian Hunter2bcab6c2013-12-09 15:18:37 +02001236 if (pages == 0 && min == 0) {
David Ahern33c2dcf2013-11-12 07:46:55 -07001237 /* leave number of pages at 0 */
Adrian Hunter1dbfa9382013-12-09 15:18:39 +02001238 } else if (!is_power_of_2(pages)) {
Jiri Olsa98081432017-01-09 10:51:55 +01001239 char buf[100];
1240
David Ahern33c2dcf2013-11-12 07:46:55 -07001241 /* round pages up to next power of 2 */
Arnaldo Carvalho de Melo91529832014-12-16 13:24:41 -03001242 pages = roundup_pow_of_two(pages);
Adrian Hunter1dbfa9382013-12-09 15:18:39 +02001243 if (!pages)
1244 return -EINVAL;
Jiri Olsa98081432017-01-09 10:51:55 +01001245
1246 unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
1247 pr_info("rounding mmap pages size to %s (%lu pages)\n",
1248 buf, pages);
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001249 }
1250
David Ahern33c2dcf2013-11-12 07:46:55 -07001251 if (pages > max)
1252 return -EINVAL;
1253
1254 return pages;
1255}
1256
Adrian Huntere9db1312015-04-09 18:53:46 +03001257int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
David Ahern33c2dcf2013-11-12 07:46:55 -07001258{
David Ahern33c2dcf2013-11-12 07:46:55 -07001259 unsigned long max = UINT_MAX;
1260 long pages;
1261
Adrian Hunterf5ae9c42013-12-09 15:18:38 +02001262 if (max > SIZE_MAX / page_size)
David Ahern33c2dcf2013-11-12 07:46:55 -07001263 max = SIZE_MAX / page_size;
1264
1265 pages = parse_pages_arg(str, 1, max);
1266 if (pages < 0) {
1267 pr_err("Invalid argument for --mmap_pages/-m\n");
Jiri Olsa994a1f72013-09-01 12:36:12 +02001268 return -1;
1269 }
1270
1271 *mmap_pages = pages;
1272 return 0;
1273}
1274
Adrian Huntere9db1312015-04-09 18:53:46 +03001275int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1276 int unset __maybe_unused)
1277{
1278 return __perf_evlist__parse_mmap_pages(opt->value, str);
1279}
1280
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001281/**
Adrian Hunter718c6022015-04-09 18:53:42 +03001282 * perf_evlist__mmap_ex - Create mmaps to receive events.
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001283 * @evlist: list of events
1284 * @pages: map length in pages
1285 * @overwrite: overwrite older events?
Adrian Hunter718c6022015-04-09 18:53:42 +03001286 * @auxtrace_pages - auxtrace map length in pages
1287 * @auxtrace_overwrite - overwrite older auxtrace data?
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001288 *
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001289 * If @overwrite is %false the user needs to signal event consumption using
1290 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
1291 * automatically.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001292 *
Adrian Hunter718c6022015-04-09 18:53:42 +03001293 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1294 * consumption using auxtrace_mmap__write_tail().
1295 *
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001296 * Return: %0 on success, negative error code otherwise.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001297 */
Adrian Hunter718c6022015-04-09 18:53:42 +03001298int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1299 bool overwrite, unsigned int auxtrace_pages,
1300 bool auxtrace_overwrite)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001301{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001302 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001303 const struct cpu_map *cpus = evlist->cpus;
1304 const struct thread_map *threads = evlist->threads;
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001305 struct mmap_params mp = {
1306 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1307 };
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -02001308
Wang Nan8db6d6b2016-07-14 08:34:35 +00001309 if (!evlist->mmap)
1310 evlist->mmap = perf_evlist__alloc_mmap(evlist);
1311 if (!evlist->mmap)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001312 return -ENOMEM;
1313
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -03001314 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001315 return -ENOMEM;
1316
1317 evlist->overwrite = overwrite;
Jiri Olsa994a1f72013-09-01 12:36:12 +02001318 evlist->mmap_len = perf_evlist__mmap_size(pages);
Adrian Hunter2af68ef2013-10-18 15:29:07 +03001319 pr_debug("mmap size %zuB\n", evlist->mmap_len);
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001320 mp.mask = evlist->mmap_len - page_size - 1;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001321
Adrian Hunter718c6022015-04-09 18:53:42 +03001322 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1323 auxtrace_pages, auxtrace_overwrite);
1324
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001325 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001326 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03001327 evsel->sample_id == NULL &&
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -03001328 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001329 return -ENOMEM;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001330 }
1331
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -07001332 if (cpu_map__empty(cpus))
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001333 return perf_evlist__mmap_per_thread(evlist, &mp);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001334
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001335 return perf_evlist__mmap_per_cpu(evlist, &mp);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001336}
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001337
Adrian Hunter718c6022015-04-09 18:53:42 +03001338int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1339 bool overwrite)
1340{
1341 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1342}
1343
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001344int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001345{
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001346 struct cpu_map *cpus;
1347 struct thread_map *threads;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001348
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001349 threads = thread_map__new_str(target->pid, target->tid, target->uid);
1350
1351 if (!threads)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001352 return -1;
1353
Dongsheng Yang9c105fb2013-12-04 17:56:40 -05001354 if (target__uses_dummy_map(target))
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001355 cpus = cpu_map__dummy_new();
Namhyung Kim879d77d2012-05-16 18:45:48 +09001356 else
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001357 cpus = cpu_map__new(target->cpu_list);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001358
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001359 if (!cpus)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001360 goto out_delete_threads;
1361
Adrian Hunterec9a77a2015-09-08 10:58:52 +03001362 evlist->has_user_cpus = !!target->cpu_list;
1363
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001364 perf_evlist__set_maps(evlist, cpus, threads);
Adrian Hunterd5bc0562015-09-08 10:58:51 +03001365
1366 return 0;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001367
1368out_delete_threads:
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001369 thread_map__put(threads);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001370 return -1;
1371}
1372
Adrian Hunterd5bc0562015-09-08 10:58:51 +03001373void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1374 struct thread_map *threads)
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001375{
Adrian Hunter934e0f22015-09-08 10:58:56 +03001376 /*
1377 * Allow for the possibility that one or another of the maps isn't being
1378 * changed i.e. don't put it. Note we are assuming the maps that are
1379 * being applied are brand new and evlist is taking ownership of the
1380 * original reference count of 1. If that is not the case it is up to
1381 * the caller to increase the reference count.
1382 */
1383 if (cpus != evlist->cpus) {
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001384 cpu_map__put(evlist->cpus);
Arnaldo Carvalho de Meloa55e5662016-02-17 10:57:19 -03001385 evlist->cpus = cpu_map__get(cpus);
Adrian Hunter934e0f22015-09-08 10:58:56 +03001386 }
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001387
Adrian Hunter934e0f22015-09-08 10:58:56 +03001388 if (threads != evlist->threads) {
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001389 thread_map__put(evlist->threads);
Arnaldo Carvalho de Meloa55e5662016-02-17 10:57:19 -03001390 evlist->threads = thread_map__get(threads);
Adrian Hunter934e0f22015-09-08 10:58:56 +03001391 }
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001392
Adrian Hunterec9a77a2015-09-08 10:58:52 +03001393 perf_evlist__propagate_maps(evlist);
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001394}
1395
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001396void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
1397 enum perf_event_sample_format bit)
1398{
1399 struct perf_evsel *evsel;
1400
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001401 evlist__for_each_entry(evlist, evsel)
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001402 __perf_evsel__set_sample_bit(evsel, bit);
1403}
1404
1405void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
1406 enum perf_event_sample_format bit)
1407{
1408 struct perf_evsel *evsel;
1409
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001410 evlist__for_each_entry(evlist, evsel)
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001411 __perf_evsel__reset_sample_bit(evsel, bit);
1412}
1413
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001414int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001415{
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001416 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001417 int err = 0;
1418 const int ncpus = cpu_map__nr(evlist->cpus),
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001419 nthreads = thread_map__nr(evlist->threads);
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001420
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001421 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001422 if (evsel->filter == NULL)
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001423 continue;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001424
Kan Liangd988d5e2015-08-21 02:23:14 -04001425 /*
1426 * filters only work for tracepoint event, which doesn't have cpu limit.
1427 * So evlist and evsel should always be same.
1428 */
Arnaldo Carvalho de Melof47805a2015-07-03 15:53:49 -03001429 err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001430 if (err) {
1431 *err_evsel = evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001432 break;
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001433 }
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001434 }
1435
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001436 return err;
1437}
1438
1439int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1440{
1441 struct perf_evsel *evsel;
1442 int err = 0;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001443
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001444 evlist__for_each_entry(evlist, evsel) {
Wang Nanfdf14722016-02-26 09:31:53 +00001445 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1446 continue;
1447
Arnaldo Carvalho de Melo94ad89b2015-07-03 17:42:03 -03001448 err = perf_evsel__set_filter(evsel, filter);
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001449 if (err)
1450 break;
1451 }
1452
1453 return err;
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001454}
Frederic Weisbecker74429962011-05-21 17:49:00 +02001455
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001456int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001457{
1458 char *filter;
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001459 int ret = -1;
1460 size_t i;
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001461
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001462 for (i = 0; i < npids; ++i) {
1463 if (i == 0) {
1464 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1465 return -1;
1466 } else {
1467 char *tmp;
1468
1469 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1470 goto out_free;
1471
1472 free(filter);
1473 filter = tmp;
1474 }
1475 }
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001476
1477 ret = perf_evlist__set_filter(evlist, filter);
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001478out_free:
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001479 free(filter);
1480 return ret;
1481}
1482
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001483int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1484{
1485 return perf_evlist__set_filter_pids(evlist, 1, &pid);
1486}
1487
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001488bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +02001489{
Adrian Hunter75562572013-08-27 11:23:09 +03001490 struct perf_evsel *pos;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001491
Adrian Hunter75562572013-08-27 11:23:09 +03001492 if (evlist->nr_entries == 1)
1493 return true;
1494
1495 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1496 return false;
1497
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001498 evlist__for_each_entry(evlist, pos) {
Adrian Hunter75562572013-08-27 11:23:09 +03001499 if (pos->id_pos != evlist->id_pos ||
1500 pos->is_pos != evlist->is_pos)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001501 return false;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001502 }
1503
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001504 return true;
1505}
1506
Adrian Hunter75562572013-08-27 11:23:09 +03001507u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001508{
Adrian Hunter75562572013-08-27 11:23:09 +03001509 struct perf_evsel *evsel;
1510
1511 if (evlist->combined_sample_type)
1512 return evlist->combined_sample_type;
1513
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001514 evlist__for_each_entry(evlist, evsel)
Adrian Hunter75562572013-08-27 11:23:09 +03001515 evlist->combined_sample_type |= evsel->attr.sample_type;
1516
1517 return evlist->combined_sample_type;
1518}
1519
1520u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1521{
1522 evlist->combined_sample_type = 0;
1523 return __perf_evlist__combined_sample_type(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001524}
1525
Andi Kleen98df8582015-07-18 08:24:47 -07001526u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1527{
1528 struct perf_evsel *evsel;
1529 u64 branch_type = 0;
1530
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001531 evlist__for_each_entry(evlist, evsel)
Andi Kleen98df8582015-07-18 08:24:47 -07001532 branch_type |= evsel->attr.branch_sample_type;
1533 return branch_type;
1534}
1535
Jiri Olsa9ede4732012-10-10 17:38:13 +02001536bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1537{
1538 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1539 u64 read_format = first->attr.read_format;
1540 u64 sample_type = first->attr.sample_type;
1541
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001542 evlist__for_each_entry(evlist, pos) {
Jiri Olsa9ede4732012-10-10 17:38:13 +02001543 if (read_format != pos->attr.read_format)
1544 return false;
1545 }
1546
1547 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1548 if ((sample_type & PERF_SAMPLE_READ) &&
1549 !(read_format & PERF_FORMAT_ID)) {
1550 return false;
1551 }
1552
1553 return true;
1554}
1555
1556u64 perf_evlist__read_format(struct perf_evlist *evlist)
1557{
1558 struct perf_evsel *first = perf_evlist__first(evlist);
1559 return first->attr.read_format;
1560}
1561
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001562u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -02001563{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001564 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -02001565 struct perf_sample *data;
1566 u64 sample_type;
1567 u16 size = 0;
1568
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -02001569 if (!first->attr.sample_id_all)
1570 goto out;
1571
1572 sample_type = first->attr.sample_type;
1573
1574 if (sample_type & PERF_SAMPLE_TID)
1575 size += sizeof(data->tid) * 2;
1576
1577 if (sample_type & PERF_SAMPLE_TIME)
1578 size += sizeof(data->time);
1579
1580 if (sample_type & PERF_SAMPLE_ID)
1581 size += sizeof(data->id);
1582
1583 if (sample_type & PERF_SAMPLE_STREAM_ID)
1584 size += sizeof(data->stream_id);
1585
1586 if (sample_type & PERF_SAMPLE_CPU)
1587 size += sizeof(data->cpu) * 2;
Adrian Hunter75562572013-08-27 11:23:09 +03001588
1589 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1590 size += sizeof(data->id);
Arnaldo Carvalho de Melo81e36bff2011-11-11 22:28:50 -02001591out:
1592 return size;
1593}
1594
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001595bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001596{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001597 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001598
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001599 evlist__for_each_entry_continue(evlist, pos) {
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001600 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1601 return false;
1602 }
1603
1604 return true;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001605}
1606
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001607bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +02001608{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001609 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001610 return first->attr.sample_id_all;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001611}
Arnaldo Carvalho de Melo81cce8d2011-10-05 19:11:32 -03001612
1613void perf_evlist__set_selected(struct perf_evlist *evlist,
1614 struct perf_evsel *evsel)
1615{
1616 evlist->selected = evsel;
1617}
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001618
Namhyung Kima74b4b62013-03-15 14:48:48 +09001619void perf_evlist__close(struct perf_evlist *evlist)
1620{
1621 struct perf_evsel *evsel;
1622 int ncpus = cpu_map__nr(evlist->cpus);
1623 int nthreads = thread_map__nr(evlist->threads);
1624
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001625 evlist__for_each_entry_reverse(evlist, evsel) {
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -03001626 int n = evsel->cpus ? evsel->cpus->nr : ncpus;
Stephane Eranian8ad92192014-01-17 16:34:06 +01001627 perf_evsel__close(evsel, n, nthreads);
1628 }
Namhyung Kima74b4b62013-03-15 14:48:48 +09001629}
1630
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001631static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1632{
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001633 struct cpu_map *cpus;
1634 struct thread_map *threads;
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001635 int err = -ENOMEM;
1636
1637 /*
1638 * Try reading /sys/devices/system/cpu/online to get
1639 * an all cpus map.
1640 *
1641 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1642 * code needs an overhaul to properly forward the
1643 * error, and we may not want to do that fallback to a
1644 * default cpu identity map :-\
1645 */
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001646 cpus = cpu_map__new(NULL);
1647 if (!cpus)
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001648 goto out;
1649
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001650 threads = thread_map__new_dummy();
1651 if (!threads)
1652 goto out_put;
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001653
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001654 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001655out:
1656 return err;
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001657out_put:
1658 cpu_map__put(cpus);
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001659 goto out;
1660}
1661
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001662int perf_evlist__open(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001663{
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001664 struct perf_evsel *evsel;
Namhyung Kima74b4b62013-03-15 14:48:48 +09001665 int err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001666
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001667 /*
1668 * Default: one fd per CPU, all threads, aka systemwide
1669 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1670 */
1671 if (evlist->threads == NULL && evlist->cpus == NULL) {
1672 err = perf_evlist__create_syswide_maps(evlist);
1673 if (err < 0)
1674 goto out_err;
1675 }
1676
Adrian Hunter733cd2f2013-09-06 22:40:11 +03001677 perf_evlist__update_id_pos(evlist);
1678
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001679 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter23df7f72016-01-07 10:13:59 +01001680 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001681 if (err < 0)
1682 goto out_err;
1683 }
1684
1685 return 0;
1686out_err:
Namhyung Kima74b4b62013-03-15 14:48:48 +09001687 perf_evlist__close(evlist);
Namhyung Kim41c21a62012-02-23 12:13:36 +09001688 errno = -err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001689 return err;
1690}
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001691
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001692int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
Namhyung Kim55e162e2013-03-11 16:43:17 +09001693 const char *argv[], bool pipe_output,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001694 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001695{
1696 int child_ready_pipe[2], go_pipe[2];
1697 char bf;
1698
1699 if (pipe(child_ready_pipe) < 0) {
1700 perror("failed to create 'ready' pipe");
1701 return -1;
1702 }
1703
1704 if (pipe(go_pipe) < 0) {
1705 perror("failed to create 'go' pipe");
1706 goto out_close_ready_pipe;
1707 }
1708
1709 evlist->workload.pid = fork();
1710 if (evlist->workload.pid < 0) {
1711 perror("failed to fork");
1712 goto out_close_pipes;
1713 }
1714
1715 if (!evlist->workload.pid) {
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001716 int ret;
1717
Namhyung Kim119fa3c2013-03-11 16:43:16 +09001718 if (pipe_output)
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001719 dup2(2, 1);
1720
David Ahern0817df02013-05-25 17:50:39 -06001721 signal(SIGTERM, SIG_DFL);
1722
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001723 close(child_ready_pipe[0]);
1724 close(go_pipe[1]);
1725 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1726
1727 /*
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001728 * Tell the parent we're ready to go
1729 */
1730 close(child_ready_pipe[1]);
1731
1732 /*
1733 * Wait until the parent tells us to go.
1734 */
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001735 ret = read(go_pipe[0], &bf, 1);
1736 /*
1737 * The parent will ask for the execvp() to be performed by
1738 * writing exactly one byte, in workload.cork_fd, usually via
1739 * perf_evlist__start_workload().
1740 *
Arnaldo Carvalho de Melo20f86fc2015-02-03 13:29:05 -03001741 * For cancelling the workload without actually running it,
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001742 * the parent will just close workload.cork_fd, without writing
1743 * anything, i.e. read will return zero and we just exit()
1744 * here.
1745 */
1746 if (ret != 1) {
1747 if (ret == -1)
1748 perror("unable to read pipe");
1749 exit(ret);
1750 }
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001751
1752 execvp(argv[0], (char **)argv);
1753
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001754 if (exec_error) {
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001755 union sigval val;
1756
1757 val.sival_int = errno;
1758 if (sigqueue(getppid(), SIGUSR1, val))
1759 perror(argv[0]);
1760 } else
1761 perror(argv[0]);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001762 exit(-1);
1763 }
1764
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001765 if (exec_error) {
1766 struct sigaction act = {
1767 .sa_flags = SA_SIGINFO,
1768 .sa_sigaction = exec_error,
1769 };
1770 sigaction(SIGUSR1, &act, NULL);
1771 }
1772
Arnaldo Carvalho de Melo1aaf63b2014-10-10 14:29:49 -03001773 if (target__none(target)) {
1774 if (evlist->threads == NULL) {
1775 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1776 __func__, __LINE__);
1777 goto out_close_pipes;
1778 }
Jiri Olsae13798c2015-06-23 00:36:02 +02001779 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
Arnaldo Carvalho de Melo1aaf63b2014-10-10 14:29:49 -03001780 }
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001781
1782 close(child_ready_pipe[1]);
1783 close(go_pipe[0]);
1784 /*
1785 * wait for child to settle
1786 */
1787 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1788 perror("unable to read pipe");
1789 goto out_close_pipes;
1790 }
1791
Namhyung Kimbcf31452013-06-26 16:14:15 +09001792 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001793 evlist->workload.cork_fd = go_pipe[1];
1794 close(child_ready_pipe[0]);
1795 return 0;
1796
1797out_close_pipes:
1798 close(go_pipe[0]);
1799 close(go_pipe[1]);
1800out_close_ready_pipe:
1801 close(child_ready_pipe[0]);
1802 close(child_ready_pipe[1]);
1803 return -1;
1804}
1805
1806int perf_evlist__start_workload(struct perf_evlist *evlist)
1807{
1808 if (evlist->workload.cork_fd > 0) {
David Ahernb3824402013-07-02 13:27:21 -06001809 char bf = 0;
Namhyung Kimbcf31452013-06-26 16:14:15 +09001810 int ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001811 /*
1812 * Remove the cork, let it rip!
1813 */
Namhyung Kimbcf31452013-06-26 16:14:15 +09001814 ret = write(evlist->workload.cork_fd, &bf, 1);
1815 if (ret < 0)
Soramichi Akiyamae978be92017-01-10 10:41:00 -03001816 perror("unable to write to pipe");
Namhyung Kimbcf31452013-06-26 16:14:15 +09001817
1818 close(evlist->workload.cork_fd);
1819 return ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001820 }
1821
1822 return 0;
1823}
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001824
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001825int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001826 struct perf_sample *sample)
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001827{
Adrian Hunter75562572013-08-27 11:23:09 +03001828 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1829
1830 if (!evsel)
1831 return -EFAULT;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001832 return perf_evsel__parse_sample(evsel, event, sample);
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001833}
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001834
1835size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1836{
1837 struct perf_evsel *evsel;
1838 size_t printed = 0;
1839
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001840 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001841 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1842 perf_evsel__name(evsel));
1843 }
1844
Davidlohr Buesob2222132013-11-12 22:24:24 -08001845 return printed + fprintf(fp, "\n");
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001846}
Arnaldo Carvalho de Melo6ef068c2013-10-17 12:07:58 -03001847
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001848int perf_evlist__strerror_open(struct perf_evlist *evlist,
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001849 int err, char *buf, size_t size)
1850{
1851 int printed, value;
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001852 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001853
1854 switch (err) {
1855 case EACCES:
1856 case EPERM:
1857 printed = scnprintf(buf, size,
1858 "Error:\t%s.\n"
1859 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1860
Adrian Hunter1a472452013-12-11 14:36:23 +02001861 value = perf_event_paranoid();
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001862
1863 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1864
1865 if (value >= 2) {
1866 printed += scnprintf(buf + printed, size - printed,
1867 "For your workloads it needs to be <= 1\nHint:\t");
1868 }
1869 printed += scnprintf(buf + printed, size - printed,
Arnaldo Carvalho de Melo5229e362014-06-10 17:18:54 -03001870 "For system wide tracing it needs to be set to -1.\n");
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001871
1872 printed += scnprintf(buf + printed, size - printed,
Arnaldo Carvalho de Melo5229e362014-06-10 17:18:54 -03001873 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1874 "Hint:\tThe current value is %d.", value);
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001875 break;
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001876 case EINVAL: {
1877 struct perf_evsel *first = perf_evlist__first(evlist);
1878 int max_freq;
1879
1880 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1881 goto out_default;
1882
1883 if (first->attr.sample_freq < (u64)max_freq)
1884 goto out_default;
1885
1886 printed = scnprintf(buf, size,
1887 "Error:\t%s.\n"
1888 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1889 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1890 emsg, max_freq, first->attr.sample_freq);
1891 break;
1892 }
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001893 default:
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001894out_default:
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001895 scnprintf(buf, size, "%s", emsg);
1896 break;
1897 }
1898
1899 return 0;
1900}
Adrian Huntera025e4f2013-12-11 14:36:35 +02001901
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001902int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1903{
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001904 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001905 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001906
1907 switch (err) {
1908 case EPERM:
Arnaldo Carvalho de Meloe5d4a292014-12-12 15:59:51 -03001909 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001910 printed += scnprintf(buf + printed, size - printed,
1911 "Error:\t%s.\n"
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001912 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001913 "Hint:\tTried using %zd kB.\n",
Arnaldo Carvalho de Meloe5d4a292014-12-12 15:59:51 -03001914 emsg, pages_max_per_user, pages_attempted);
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001915
1916 if (pages_attempted >= pages_max_per_user) {
1917 printed += scnprintf(buf + printed, size - printed,
1918 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1919 pages_max_per_user + pages_attempted);
1920 }
1921
1922 printed += scnprintf(buf + printed, size - printed,
1923 "Hint:\tTry using a smaller -m/--mmap-pages value.");
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001924 break;
1925 default:
1926 scnprintf(buf, size, "%s", emsg);
1927 break;
1928 }
1929
1930 return 0;
1931}
1932
Adrian Huntera025e4f2013-12-11 14:36:35 +02001933void perf_evlist__to_front(struct perf_evlist *evlist,
1934 struct perf_evsel *move_evsel)
1935{
1936 struct perf_evsel *evsel, *n;
1937 LIST_HEAD(move);
1938
1939 if (move_evsel == perf_evlist__first(evlist))
1940 return;
1941
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001942 evlist__for_each_entry_safe(evlist, n, evsel) {
Adrian Huntera025e4f2013-12-11 14:36:35 +02001943 if (evsel->leader == move_evsel->leader)
1944 list_move_tail(&evsel->node, &move);
1945 }
1946
1947 list_splice(&move, &evlist->entries);
1948}
Adrian Hunter60b08962014-07-31 09:00:52 +03001949
1950void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1951 struct perf_evsel *tracking_evsel)
1952{
1953 struct perf_evsel *evsel;
1954
1955 if (tracking_evsel->tracking)
1956 return;
1957
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001958 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter60b08962014-07-31 09:00:52 +03001959 if (evsel != tracking_evsel)
1960 evsel->tracking = false;
1961 }
1962
1963 tracking_evsel->tracking = true;
1964}
Wang Nan7630b3e2016-02-22 09:10:33 +00001965
1966struct perf_evsel *
1967perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1968 const char *str)
1969{
1970 struct perf_evsel *evsel;
1971
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001972 evlist__for_each_entry(evlist, evsel) {
Wang Nan7630b3e2016-02-22 09:10:33 +00001973 if (!evsel->name)
1974 continue;
1975 if (strcmp(str, evsel->name) == 0)
1976 return evsel;
1977 }
1978
1979 return NULL;
1980}
Wang Nan54cc54d2016-07-14 08:34:42 +00001981
1982void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
1983 enum bkw_mmap_state state)
1984{
1985 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1986 enum action {
1987 NONE,
1988 PAUSE,
1989 RESUME,
1990 } action = NONE;
1991
1992 if (!evlist->backward_mmap)
1993 return;
1994
1995 switch (old_state) {
1996 case BKW_MMAP_NOTREADY: {
1997 if (state != BKW_MMAP_RUNNING)
1998 goto state_err;;
1999 break;
2000 }
2001 case BKW_MMAP_RUNNING: {
2002 if (state != BKW_MMAP_DATA_PENDING)
2003 goto state_err;
2004 action = PAUSE;
2005 break;
2006 }
2007 case BKW_MMAP_DATA_PENDING: {
2008 if (state != BKW_MMAP_EMPTY)
2009 goto state_err;
2010 break;
2011 }
2012 case BKW_MMAP_EMPTY: {
2013 if (state != BKW_MMAP_RUNNING)
2014 goto state_err;
2015 action = RESUME;
2016 break;
2017 }
2018 default:
2019 WARN_ONCE(1, "Shouldn't get there\n");
2020 }
2021
2022 evlist->bkw_mmap_state = state;
2023
2024 switch (action) {
2025 case PAUSE:
2026 perf_evlist__pause(evlist);
2027 break;
2028 case RESUME:
2029 perf_evlist__resume(evlist);
2030 break;
2031 case NONE:
2032 default:
2033 break;
2034 }
2035
2036state_err:
2037 return;
2038}