blob: a29ee8a746d99934847290dbef29cbf70644d193 [file] [log] [blame]
Jiri Olsa285a30c2019-07-21 13:24:21 +02001// SPDX-License-Identifier: GPL-2.0
2#include <perf/evlist.h>
Jiri Olsa80dc2b32019-07-21 13:24:55 +02003#include <perf/evsel.h>
Jiri Olsab0031c22019-09-03 11:01:04 +02004#include <linux/bitops.h>
Jiri Olsa285a30c2019-07-21 13:24:21 +02005#include <linux/list.h>
Jiri Olsab0031c22019-09-03 11:01:04 +02006#include <linux/hash.h>
Jiri Olsa285a30c2019-07-21 13:24:21 +02007#include <internal/evlist.h>
Jiri Olsa9a5edde2019-07-21 13:24:26 +02008#include <internal/evsel.h>
Jiri Olsab0031c22019-09-03 11:01:04 +02009#include <internal/xyarray.h>
Jiri Olsa634912d2019-07-21 13:24:32 +020010#include <linux/zalloc.h>
Jiri Olsa57f0c3b2019-07-21 13:24:35 +020011#include <stdlib.h>
Jiri Olsa453fa032019-07-21 13:24:43 +020012#include <perf/cpumap.h>
13#include <perf/threadmap.h>
Jiri Olsa4562a732019-07-21 13:24:25 +020014
15void perf_evlist__init(struct perf_evlist *evlist)
16{
Jiri Olsa1d5af022019-09-02 22:20:12 +020017 int i;
18
19 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
20 INIT_HLIST_HEAD(&evlist->heads[i]);
Jiri Olsa4562a732019-07-21 13:24:25 +020021 INIT_LIST_HEAD(&evlist->entries);
Jiri Olsa6484d2f2019-07-21 13:24:28 +020022 evlist->nr_entries = 0;
Jiri Olsa4562a732019-07-21 13:24:25 +020023}
Jiri Olsa9a5edde2019-07-21 13:24:26 +020024
Jiri Olsa453fa032019-07-21 13:24:43 +020025static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
26 struct perf_evsel *evsel)
27{
28 /*
29 * We already have cpus for evsel (via PMU sysfs) so
30 * keep it, if there's no target cpu list defined.
31 */
32 if (!evsel->own_cpus || evlist->has_user_cpus) {
33 perf_cpu_map__put(evsel->cpus);
34 evsel->cpus = perf_cpu_map__get(evlist->cpus);
35 } else if (evsel->cpus != evsel->own_cpus) {
36 perf_cpu_map__put(evsel->cpus);
37 evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
38 }
39
40 perf_thread_map__put(evsel->threads);
41 evsel->threads = perf_thread_map__get(evlist->threads);
42}
43
44static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
45{
46 struct perf_evsel *evsel;
47
48 perf_evlist__for_each_evsel(evlist, evsel)
49 __perf_evlist__propagate_maps(evlist, evsel);
50}
51
Jiri Olsa9a5edde2019-07-21 13:24:26 +020052void perf_evlist__add(struct perf_evlist *evlist,
53 struct perf_evsel *evsel)
54{
55 list_add_tail(&evsel->node, &evlist->entries);
Jiri Olsa6484d2f2019-07-21 13:24:28 +020056 evlist->nr_entries += 1;
Jiri Olsa453fa032019-07-21 13:24:43 +020057 __perf_evlist__propagate_maps(evlist, evsel);
Jiri Olsa9a5edde2019-07-21 13:24:26 +020058}
Jiri Olsa52e22fb2019-07-21 13:24:27 +020059
60void perf_evlist__remove(struct perf_evlist *evlist,
61 struct perf_evsel *evsel)
62{
63 list_del_init(&evsel->node);
Jiri Olsa6484d2f2019-07-21 13:24:28 +020064 evlist->nr_entries -= 1;
Jiri Olsa52e22fb2019-07-21 13:24:27 +020065}
Jiri Olsa634912d2019-07-21 13:24:32 +020066
67struct perf_evlist *perf_evlist__new(void)
68{
69 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
70
71 if (evlist != NULL)
72 perf_evlist__init(evlist);
73
74 return evlist;
75}
Jiri Olsa651bf382019-07-21 13:24:34 +020076
77struct perf_evsel *
78perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
79{
80 struct perf_evsel *next;
81
82 if (!prev) {
83 next = list_first_entry(&evlist->entries,
84 struct perf_evsel,
85 node);
86 } else {
87 next = list_next_entry(prev, node);
88 }
89
90 /* Empty list is noticed here so don't need checking on entry. */
91 if (&next->node == &evlist->entries)
92 return NULL;
93
94 return next;
95}
Jiri Olsa57f0c3b2019-07-21 13:24:35 +020096
97void perf_evlist__delete(struct perf_evlist *evlist)
98{
99 free(evlist);
100}
Jiri Olsa453fa032019-07-21 13:24:43 +0200101
102void perf_evlist__set_maps(struct perf_evlist *evlist,
103 struct perf_cpu_map *cpus,
104 struct perf_thread_map *threads)
105{
106 /*
107 * Allow for the possibility that one or another of the maps isn't being
108 * changed i.e. don't put it. Note we are assuming the maps that are
109 * being applied are brand new and evlist is taking ownership of the
110 * original reference count of 1. If that is not the case it is up to
111 * the caller to increase the reference count.
112 */
113 if (cpus != evlist->cpus) {
114 perf_cpu_map__put(evlist->cpus);
115 evlist->cpus = perf_cpu_map__get(cpus);
116 }
117
118 if (threads != evlist->threads) {
119 perf_thread_map__put(evlist->threads);
120 evlist->threads = perf_thread_map__get(threads);
121 }
122
123 perf_evlist__propagate_maps(evlist);
124}
Jiri Olsa80dc2b32019-07-21 13:24:55 +0200125
126int perf_evlist__open(struct perf_evlist *evlist)
127{
128 struct perf_evsel *evsel;
129 int err;
130
131 perf_evlist__for_each_entry(evlist, evsel) {
132 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
133 if (err < 0)
134 goto out_err;
135 }
136
137 return 0;
138
139out_err:
140 perf_evlist__close(evlist);
141 return err;
142}
143
144void perf_evlist__close(struct perf_evlist *evlist)
145{
146 struct perf_evsel *evsel;
147
148 perf_evlist__for_each_entry_reverse(evlist, evsel)
149 perf_evsel__close(evsel);
150}
Jiri Olsafcc97c32019-07-21 13:24:56 +0200151
152void perf_evlist__enable(struct perf_evlist *evlist)
153{
154 struct perf_evsel *evsel;
155
156 perf_evlist__for_each_entry(evlist, evsel)
157 perf_evsel__enable(evsel);
158}
159
160void perf_evlist__disable(struct perf_evlist *evlist)
161{
162 struct perf_evsel *evsel;
163
164 perf_evlist__for_each_entry(evlist, evsel)
165 perf_evsel__disable(evsel);
166}
Jiri Olsaff47d862019-09-03 10:54:48 +0200167
168u64 perf_evlist__read_format(struct perf_evlist *evlist)
169{
170 struct perf_evsel *first = perf_evlist__first(evlist);
171
172 return first->attr.read_format;
173}
Jiri Olsab0031c22019-09-03 11:01:04 +0200174
175#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
176
177static void perf_evlist__id_hash(struct perf_evlist *evlist,
178 struct perf_evsel *evsel,
179 int cpu, int thread, u64 id)
180{
181 int hash;
182 struct perf_sample_id *sid = SID(evsel, cpu, thread);
183
184 sid->id = id;
185 sid->evsel = evsel;
186 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
187 hlist_add_head(&sid->node, &evlist->heads[hash]);
188}
189
190void perf_evlist__id_add(struct perf_evlist *evlist,
191 struct perf_evsel *evsel,
192 int cpu, int thread, u64 id)
193{
194 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
195 evsel->id[evsel->ids++] = id;
196}