blob: f4dc9a20833261938c0cb7b33971afc2e5d8054c [file] [log] [blame]
Jiri Olsa285a30c2019-07-21 13:24:21 +02001// SPDX-License-Identifier: GPL-2.0
2#include <perf/evlist.h>
Jiri Olsa80dc2b32019-07-21 13:24:55 +02003#include <perf/evsel.h>
Jiri Olsa285a30c2019-07-21 13:24:21 +02004#include <linux/list.h>
5#include <internal/evlist.h>
Jiri Olsa9a5edde2019-07-21 13:24:26 +02006#include <internal/evsel.h>
Jiri Olsa634912d2019-07-21 13:24:32 +02007#include <linux/zalloc.h>
Jiri Olsa57f0c3b2019-07-21 13:24:35 +02008#include <stdlib.h>
Jiri Olsa453fa032019-07-21 13:24:43 +02009#include <perf/cpumap.h>
10#include <perf/threadmap.h>
Jiri Olsa4562a732019-07-21 13:24:25 +020011
12void perf_evlist__init(struct perf_evlist *evlist)
13{
14 INIT_LIST_HEAD(&evlist->entries);
Jiri Olsa6484d2f2019-07-21 13:24:28 +020015 evlist->nr_entries = 0;
Jiri Olsa4562a732019-07-21 13:24:25 +020016}
Jiri Olsa9a5edde2019-07-21 13:24:26 +020017
Jiri Olsa453fa032019-07-21 13:24:43 +020018static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
19 struct perf_evsel *evsel)
20{
21 /*
22 * We already have cpus for evsel (via PMU sysfs) so
23 * keep it, if there's no target cpu list defined.
24 */
25 if (!evsel->own_cpus || evlist->has_user_cpus) {
26 perf_cpu_map__put(evsel->cpus);
27 evsel->cpus = perf_cpu_map__get(evlist->cpus);
28 } else if (evsel->cpus != evsel->own_cpus) {
29 perf_cpu_map__put(evsel->cpus);
30 evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
31 }
32
33 perf_thread_map__put(evsel->threads);
34 evsel->threads = perf_thread_map__get(evlist->threads);
35}
36
37static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
38{
39 struct perf_evsel *evsel;
40
41 perf_evlist__for_each_evsel(evlist, evsel)
42 __perf_evlist__propagate_maps(evlist, evsel);
43}
44
Jiri Olsa9a5edde2019-07-21 13:24:26 +020045void perf_evlist__add(struct perf_evlist *evlist,
46 struct perf_evsel *evsel)
47{
48 list_add_tail(&evsel->node, &evlist->entries);
Jiri Olsa6484d2f2019-07-21 13:24:28 +020049 evlist->nr_entries += 1;
Jiri Olsa453fa032019-07-21 13:24:43 +020050 __perf_evlist__propagate_maps(evlist, evsel);
Jiri Olsa9a5edde2019-07-21 13:24:26 +020051}
Jiri Olsa52e22fb2019-07-21 13:24:27 +020052
53void perf_evlist__remove(struct perf_evlist *evlist,
54 struct perf_evsel *evsel)
55{
56 list_del_init(&evsel->node);
Jiri Olsa6484d2f2019-07-21 13:24:28 +020057 evlist->nr_entries -= 1;
Jiri Olsa52e22fb2019-07-21 13:24:27 +020058}
Jiri Olsa634912d2019-07-21 13:24:32 +020059
60struct perf_evlist *perf_evlist__new(void)
61{
62 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
63
64 if (evlist != NULL)
65 perf_evlist__init(evlist);
66
67 return evlist;
68}
Jiri Olsa651bf382019-07-21 13:24:34 +020069
70struct perf_evsel *
71perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
72{
73 struct perf_evsel *next;
74
75 if (!prev) {
76 next = list_first_entry(&evlist->entries,
77 struct perf_evsel,
78 node);
79 } else {
80 next = list_next_entry(prev, node);
81 }
82
83 /* Empty list is noticed here so don't need checking on entry. */
84 if (&next->node == &evlist->entries)
85 return NULL;
86
87 return next;
88}
Jiri Olsa57f0c3b2019-07-21 13:24:35 +020089
90void perf_evlist__delete(struct perf_evlist *evlist)
91{
92 free(evlist);
93}
Jiri Olsa453fa032019-07-21 13:24:43 +020094
95void perf_evlist__set_maps(struct perf_evlist *evlist,
96 struct perf_cpu_map *cpus,
97 struct perf_thread_map *threads)
98{
99 /*
100 * Allow for the possibility that one or another of the maps isn't being
101 * changed i.e. don't put it. Note we are assuming the maps that are
102 * being applied are brand new and evlist is taking ownership of the
103 * original reference count of 1. If that is not the case it is up to
104 * the caller to increase the reference count.
105 */
106 if (cpus != evlist->cpus) {
107 perf_cpu_map__put(evlist->cpus);
108 evlist->cpus = perf_cpu_map__get(cpus);
109 }
110
111 if (threads != evlist->threads) {
112 perf_thread_map__put(evlist->threads);
113 evlist->threads = perf_thread_map__get(threads);
114 }
115
116 perf_evlist__propagate_maps(evlist);
117}
Jiri Olsa80dc2b32019-07-21 13:24:55 +0200118
119int perf_evlist__open(struct perf_evlist *evlist)
120{
121 struct perf_evsel *evsel;
122 int err;
123
124 perf_evlist__for_each_entry(evlist, evsel) {
125 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
126 if (err < 0)
127 goto out_err;
128 }
129
130 return 0;
131
132out_err:
133 perf_evlist__close(evlist);
134 return err;
135}
136
137void perf_evlist__close(struct perf_evlist *evlist)
138{
139 struct perf_evsel *evsel;
140
141 perf_evlist__for_each_entry_reverse(evlist, evsel)
142 perf_evsel__close(evsel);
143}
Jiri Olsafcc97c32019-07-21 13:24:56 +0200144
145void perf_evlist__enable(struct perf_evlist *evlist)
146{
147 struct perf_evsel *evsel;
148
149 perf_evlist__for_each_entry(evlist, evsel)
150 perf_evsel__enable(evsel);
151}
152
153void perf_evlist__disable(struct perf_evlist *evlist)
154{
155 struct perf_evsel *evsel;
156
157 perf_evlist__for_each_entry(evlist, evsel)
158 perf_evsel__disable(evsel);
159}