Jiri Olsa | 285a30c | 2019-07-21 13:24:21 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #include <perf/evlist.h> |
Jiri Olsa | 80dc2b3 | 2019-07-21 13:24:55 +0200 | [diff] [blame] | 3 | #include <perf/evsel.h> |
Jiri Olsa | b0031c2 | 2019-09-03 11:01:04 +0200 | [diff] [blame] | 4 | #include <linux/bitops.h> |
Jiri Olsa | 285a30c | 2019-07-21 13:24:21 +0200 | [diff] [blame] | 5 | #include <linux/list.h> |
Jiri Olsa | b0031c2 | 2019-09-03 11:01:04 +0200 | [diff] [blame] | 6 | #include <linux/hash.h> |
Jiri Olsa | d5a9948 | 2019-09-03 11:19:56 +0200 | [diff] [blame] | 7 | #include <sys/ioctl.h> |
Jiri Olsa | 285a30c | 2019-07-21 13:24:21 +0200 | [diff] [blame] | 8 | #include <internal/evlist.h> |
Jiri Olsa | 9a5edde | 2019-07-21 13:24:26 +0200 | [diff] [blame] | 9 | #include <internal/evsel.h> |
Jiri Olsa | b0031c2 | 2019-09-03 11:01:04 +0200 | [diff] [blame] | 10 | #include <internal/xyarray.h> |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 11 | #include <internal/mmap.h> |
| 12 | #include <internal/cpumap.h> |
| 13 | #include <internal/threadmap.h> |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 14 | #include <internal/lib.h> |
Jiri Olsa | 634912d | 2019-07-21 13:24:32 +0200 | [diff] [blame] | 15 | #include <linux/zalloc.h> |
Jiri Olsa | 57f0c3b | 2019-07-21 13:24:35 +0200 | [diff] [blame] | 16 | #include <stdlib.h> |
Jiri Olsa | d5a9948 | 2019-09-03 11:19:56 +0200 | [diff] [blame] | 17 | #include <errno.h> |
| 18 | #include <unistd.h> |
Jiri Olsa | f4009e7 | 2019-08-16 16:00:45 +0200 | [diff] [blame] | 19 | #include <fcntl.h> |
| 20 | #include <signal.h> |
| 21 | #include <poll.h> |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 22 | #include <sys/mman.h> |
Jiri Olsa | 453fa03 | 2019-07-21 13:24:43 +0200 | [diff] [blame] | 23 | #include <perf/cpumap.h> |
| 24 | #include <perf/threadmap.h> |
Jiri Olsa | 31f67fc | 2019-08-06 13:21:53 +0200 | [diff] [blame] | 25 | #include <api/fd/array.h> |
Jiri Olsa | 4562a73 | 2019-07-21 13:24:25 +0200 | [diff] [blame] | 26 | |
| 27 | void perf_evlist__init(struct perf_evlist *evlist) |
| 28 | { |
Jiri Olsa | 1d5af02 | 2019-09-02 22:20:12 +0200 | [diff] [blame] | 29 | int i; |
| 30 | |
| 31 | for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) |
| 32 | INIT_HLIST_HEAD(&evlist->heads[i]); |
Jiri Olsa | 4562a73 | 2019-07-21 13:24:25 +0200 | [diff] [blame] | 33 | INIT_LIST_HEAD(&evlist->entries); |
Jiri Olsa | 6484d2f | 2019-07-21 13:24:28 +0200 | [diff] [blame] | 34 | evlist->nr_entries = 0; |
Jiri Olsa | 230662e | 2019-10-07 14:53:31 +0200 | [diff] [blame] | 35 | fdarray__init(&evlist->pollfd, 64); |
Jiri Olsa | 4562a73 | 2019-07-21 13:24:25 +0200 | [diff] [blame] | 36 | } |
Jiri Olsa | 9a5edde | 2019-07-21 13:24:26 +0200 | [diff] [blame] | 37 | |
Jiri Olsa | 453fa03 | 2019-07-21 13:24:43 +0200 | [diff] [blame] | 38 | static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, |
| 39 | struct perf_evsel *evsel) |
| 40 | { |
| 41 | /* |
| 42 | * We already have cpus for evsel (via PMU sysfs) so |
| 43 | * keep it, if there's no target cpu list defined. |
| 44 | */ |
| 45 | if (!evsel->own_cpus || evlist->has_user_cpus) { |
| 46 | perf_cpu_map__put(evsel->cpus); |
| 47 | evsel->cpus = perf_cpu_map__get(evlist->cpus); |
| 48 | } else if (evsel->cpus != evsel->own_cpus) { |
| 49 | perf_cpu_map__put(evsel->cpus); |
| 50 | evsel->cpus = perf_cpu_map__get(evsel->own_cpus); |
| 51 | } |
| 52 | |
| 53 | perf_thread_map__put(evsel->threads); |
| 54 | evsel->threads = perf_thread_map__get(evlist->threads); |
Andi Kleen | a2408a7 | 2019-11-20 16:15:14 -0800 | [diff] [blame] | 55 | evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus); |
Jiri Olsa | 453fa03 | 2019-07-21 13:24:43 +0200 | [diff] [blame] | 56 | } |
| 57 | |
| 58 | static void perf_evlist__propagate_maps(struct perf_evlist *evlist) |
| 59 | { |
| 60 | struct perf_evsel *evsel; |
| 61 | |
| 62 | perf_evlist__for_each_evsel(evlist, evsel) |
| 63 | __perf_evlist__propagate_maps(evlist, evsel); |
| 64 | } |
| 65 | |
Jiri Olsa | 9a5edde | 2019-07-21 13:24:26 +0200 | [diff] [blame] | 66 | void perf_evlist__add(struct perf_evlist *evlist, |
| 67 | struct perf_evsel *evsel) |
| 68 | { |
| 69 | list_add_tail(&evsel->node, &evlist->entries); |
Jiri Olsa | 6484d2f | 2019-07-21 13:24:28 +0200 | [diff] [blame] | 70 | evlist->nr_entries += 1; |
Jiri Olsa | 453fa03 | 2019-07-21 13:24:43 +0200 | [diff] [blame] | 71 | __perf_evlist__propagate_maps(evlist, evsel); |
Jiri Olsa | 9a5edde | 2019-07-21 13:24:26 +0200 | [diff] [blame] | 72 | } |
Jiri Olsa | 52e22fb | 2019-07-21 13:24:27 +0200 | [diff] [blame] | 73 | |
| 74 | void perf_evlist__remove(struct perf_evlist *evlist, |
| 75 | struct perf_evsel *evsel) |
| 76 | { |
| 77 | list_del_init(&evsel->node); |
Jiri Olsa | 6484d2f | 2019-07-21 13:24:28 +0200 | [diff] [blame] | 78 | evlist->nr_entries -= 1; |
Jiri Olsa | 52e22fb | 2019-07-21 13:24:27 +0200 | [diff] [blame] | 79 | } |
Jiri Olsa | 634912d | 2019-07-21 13:24:32 +0200 | [diff] [blame] | 80 | |
| 81 | struct perf_evlist *perf_evlist__new(void) |
| 82 | { |
| 83 | struct perf_evlist *evlist = zalloc(sizeof(*evlist)); |
| 84 | |
| 85 | if (evlist != NULL) |
| 86 | perf_evlist__init(evlist); |
| 87 | |
| 88 | return evlist; |
| 89 | } |
Jiri Olsa | 651bf38 | 2019-07-21 13:24:34 +0200 | [diff] [blame] | 90 | |
| 91 | struct perf_evsel * |
| 92 | perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev) |
| 93 | { |
| 94 | struct perf_evsel *next; |
| 95 | |
| 96 | if (!prev) { |
| 97 | next = list_first_entry(&evlist->entries, |
| 98 | struct perf_evsel, |
| 99 | node); |
| 100 | } else { |
| 101 | next = list_next_entry(prev, node); |
| 102 | } |
| 103 | |
| 104 | /* Empty list is noticed here so don't need checking on entry. */ |
| 105 | if (&next->node == &evlist->entries) |
| 106 | return NULL; |
| 107 | |
| 108 | return next; |
| 109 | } |
Jiri Olsa | 57f0c3b | 2019-07-21 13:24:35 +0200 | [diff] [blame] | 110 | |
Jiri Olsa | 696f27c | 2019-10-07 14:53:33 +0200 | [diff] [blame] | 111 | static void perf_evlist__purge(struct perf_evlist *evlist) |
| 112 | { |
| 113 | struct perf_evsel *pos, *n; |
| 114 | |
| 115 | perf_evlist__for_each_entry_safe(evlist, n, pos) { |
| 116 | list_del_init(&pos->node); |
| 117 | perf_evsel__delete(pos); |
| 118 | } |
| 119 | |
| 120 | evlist->nr_entries = 0; |
| 121 | } |
| 122 | |
Jiri Olsa | 93dd6e2 | 2019-10-07 14:53:32 +0200 | [diff] [blame] | 123 | void perf_evlist__exit(struct perf_evlist *evlist) |
| 124 | { |
| 125 | perf_cpu_map__put(evlist->cpus); |
Ian Rogers | 4599d29 | 2020-03-18 19:31:01 -0700 | [diff] [blame] | 126 | perf_cpu_map__put(evlist->all_cpus); |
Jiri Olsa | 93dd6e2 | 2019-10-07 14:53:32 +0200 | [diff] [blame] | 127 | perf_thread_map__put(evlist->threads); |
| 128 | evlist->cpus = NULL; |
He Zhe | 44d041b | 2020-03-08 18:59:17 +0800 | [diff] [blame] | 129 | evlist->all_cpus = NULL; |
Jiri Olsa | 93dd6e2 | 2019-10-07 14:53:32 +0200 | [diff] [blame] | 130 | evlist->threads = NULL; |
| 131 | fdarray__exit(&evlist->pollfd); |
| 132 | } |
| 133 | |
Jiri Olsa | 57f0c3b | 2019-07-21 13:24:35 +0200 | [diff] [blame] | 134 | void perf_evlist__delete(struct perf_evlist *evlist) |
| 135 | { |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 136 | if (evlist == NULL) |
| 137 | return; |
| 138 | |
| 139 | perf_evlist__munmap(evlist); |
Jiri Olsa | 93dd6e2 | 2019-10-07 14:53:32 +0200 | [diff] [blame] | 140 | perf_evlist__close(evlist); |
Jiri Olsa | 696f27c | 2019-10-07 14:53:33 +0200 | [diff] [blame] | 141 | perf_evlist__purge(evlist); |
Jiri Olsa | 93dd6e2 | 2019-10-07 14:53:32 +0200 | [diff] [blame] | 142 | perf_evlist__exit(evlist); |
Jiri Olsa | 57f0c3b | 2019-07-21 13:24:35 +0200 | [diff] [blame] | 143 | free(evlist); |
| 144 | } |
Jiri Olsa | 453fa03 | 2019-07-21 13:24:43 +0200 | [diff] [blame] | 145 | |
| 146 | void perf_evlist__set_maps(struct perf_evlist *evlist, |
| 147 | struct perf_cpu_map *cpus, |
| 148 | struct perf_thread_map *threads) |
| 149 | { |
| 150 | /* |
| 151 | * Allow for the possibility that one or another of the maps isn't being |
| 152 | * changed i.e. don't put it. Note we are assuming the maps that are |
| 153 | * being applied are brand new and evlist is taking ownership of the |
| 154 | * original reference count of 1. If that is not the case it is up to |
| 155 | * the caller to increase the reference count. |
| 156 | */ |
| 157 | if (cpus != evlist->cpus) { |
| 158 | perf_cpu_map__put(evlist->cpus); |
| 159 | evlist->cpus = perf_cpu_map__get(cpus); |
| 160 | } |
| 161 | |
| 162 | if (threads != evlist->threads) { |
| 163 | perf_thread_map__put(evlist->threads); |
| 164 | evlist->threads = perf_thread_map__get(threads); |
| 165 | } |
| 166 | |
Jiri Olsa | cb71f7d | 2020-01-10 16:15:37 +0100 | [diff] [blame] | 167 | if (!evlist->all_cpus && cpus) |
| 168 | evlist->all_cpus = perf_cpu_map__get(cpus); |
| 169 | |
Jiri Olsa | 453fa03 | 2019-07-21 13:24:43 +0200 | [diff] [blame] | 170 | perf_evlist__propagate_maps(evlist); |
| 171 | } |
Jiri Olsa | 80dc2b3 | 2019-07-21 13:24:55 +0200 | [diff] [blame] | 172 | |
| 173 | int perf_evlist__open(struct perf_evlist *evlist) |
| 174 | { |
| 175 | struct perf_evsel *evsel; |
| 176 | int err; |
| 177 | |
| 178 | perf_evlist__for_each_entry(evlist, evsel) { |
| 179 | err = perf_evsel__open(evsel, evsel->cpus, evsel->threads); |
| 180 | if (err < 0) |
| 181 | goto out_err; |
| 182 | } |
| 183 | |
| 184 | return 0; |
| 185 | |
| 186 | out_err: |
| 187 | perf_evlist__close(evlist); |
| 188 | return err; |
| 189 | } |
| 190 | |
| 191 | void perf_evlist__close(struct perf_evlist *evlist) |
| 192 | { |
| 193 | struct perf_evsel *evsel; |
| 194 | |
| 195 | perf_evlist__for_each_entry_reverse(evlist, evsel) |
| 196 | perf_evsel__close(evsel); |
| 197 | } |
Jiri Olsa | fcc97c3 | 2019-07-21 13:24:56 +0200 | [diff] [blame] | 198 | |
| 199 | void perf_evlist__enable(struct perf_evlist *evlist) |
| 200 | { |
| 201 | struct perf_evsel *evsel; |
| 202 | |
| 203 | perf_evlist__for_each_entry(evlist, evsel) |
| 204 | perf_evsel__enable(evsel); |
| 205 | } |
| 206 | |
| 207 | void perf_evlist__disable(struct perf_evlist *evlist) |
| 208 | { |
| 209 | struct perf_evsel *evsel; |
| 210 | |
| 211 | perf_evlist__for_each_entry(evlist, evsel) |
| 212 | perf_evsel__disable(evsel); |
| 213 | } |
Jiri Olsa | ff47d86 | 2019-09-03 10:54:48 +0200 | [diff] [blame] | 214 | |
| 215 | u64 perf_evlist__read_format(struct perf_evlist *evlist) |
| 216 | { |
| 217 | struct perf_evsel *first = perf_evlist__first(evlist); |
| 218 | |
| 219 | return first->attr.read_format; |
| 220 | } |
Jiri Olsa | b0031c2 | 2019-09-03 11:01:04 +0200 | [diff] [blame] | 221 | |
| 222 | #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) |
| 223 | |
| 224 | static void perf_evlist__id_hash(struct perf_evlist *evlist, |
| 225 | struct perf_evsel *evsel, |
| 226 | int cpu, int thread, u64 id) |
| 227 | { |
| 228 | int hash; |
| 229 | struct perf_sample_id *sid = SID(evsel, cpu, thread); |
| 230 | |
| 231 | sid->id = id; |
| 232 | sid->evsel = evsel; |
| 233 | hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); |
| 234 | hlist_add_head(&sid->node, &evlist->heads[hash]); |
| 235 | } |
| 236 | |
| 237 | void perf_evlist__id_add(struct perf_evlist *evlist, |
| 238 | struct perf_evsel *evsel, |
| 239 | int cpu, int thread, u64 id) |
| 240 | { |
| 241 | perf_evlist__id_hash(evlist, evsel, cpu, thread, id); |
| 242 | evsel->id[evsel->ids++] = id; |
| 243 | } |
Jiri Olsa | d5a9948 | 2019-09-03 11:19:56 +0200 | [diff] [blame] | 244 | |
| 245 | int perf_evlist__id_add_fd(struct perf_evlist *evlist, |
| 246 | struct perf_evsel *evsel, |
| 247 | int cpu, int thread, int fd) |
| 248 | { |
| 249 | u64 read_data[4] = { 0, }; |
| 250 | int id_idx = 1; /* The first entry is the counter value */ |
| 251 | u64 id; |
| 252 | int ret; |
| 253 | |
| 254 | ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); |
| 255 | if (!ret) |
| 256 | goto add; |
| 257 | |
| 258 | if (errno != ENOTTY) |
| 259 | return -1; |
| 260 | |
| 261 | /* Legacy way to get event id.. All hail to old kernels! */ |
| 262 | |
| 263 | /* |
| 264 | * This way does not work with group format read, so bail |
| 265 | * out in that case. |
| 266 | */ |
| 267 | if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) |
| 268 | return -1; |
| 269 | |
| 270 | if (!(evsel->attr.read_format & PERF_FORMAT_ID) || |
| 271 | read(fd, &read_data, sizeof(read_data)) == -1) |
| 272 | return -1; |
| 273 | |
| 274 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
| 275 | ++id_idx; |
| 276 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
| 277 | ++id_idx; |
| 278 | |
| 279 | id = read_data[id_idx]; |
| 280 | |
| 281 | add: |
| 282 | perf_evlist__id_add(evlist, evsel, cpu, thread, id); |
| 283 | return 0; |
| 284 | } |
Jiri Olsa | 31f67fc | 2019-08-06 13:21:53 +0200 | [diff] [blame] | 285 | |
| 286 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) |
| 287 | { |
| 288 | int nr_cpus = perf_cpu_map__nr(evlist->cpus); |
| 289 | int nr_threads = perf_thread_map__nr(evlist->threads); |
| 290 | int nfds = 0; |
| 291 | struct perf_evsel *evsel; |
| 292 | |
| 293 | perf_evlist__for_each_entry(evlist, evsel) { |
| 294 | if (evsel->system_wide) |
| 295 | nfds += nr_cpus; |
| 296 | else |
| 297 | nfds += nr_cpus * nr_threads; |
| 298 | } |
| 299 | |
| 300 | if (fdarray__available_entries(&evlist->pollfd) < nfds && |
| 301 | fdarray__grow(&evlist->pollfd, nfds) < 0) |
| 302 | return -ENOMEM; |
| 303 | |
| 304 | return 0; |
| 305 | } |
Jiri Olsa | f4009e7 | 2019-08-16 16:00:45 +0200 | [diff] [blame] | 306 | |
| 307 | int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, |
Alexey Budankov | ab4c1f9 | 2020-07-17 09:59:45 +0300 | [diff] [blame] | 308 | void *ptr, short revent, enum fdarray_flags flags) |
Jiri Olsa | f4009e7 | 2019-08-16 16:00:45 +0200 | [diff] [blame] | 309 | { |
Alexey Budankov | ab4c1f9 | 2020-07-17 09:59:45 +0300 | [diff] [blame] | 310 | int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags); |
Jiri Olsa | f4009e7 | 2019-08-16 16:00:45 +0200 | [diff] [blame] | 311 | |
| 312 | if (pos >= 0) { |
| 313 | evlist->pollfd.priv[pos].ptr = ptr; |
| 314 | fcntl(fd, F_SETFL, O_NONBLOCK); |
| 315 | } |
| 316 | |
| 317 | return pos; |
| 318 | } |
Jiri Olsa | 80ab298 | 2019-08-31 22:48:33 +0200 | [diff] [blame] | 319 | |
Jiri Olsa | 84227cb | 2019-10-07 14:53:34 +0200 | [diff] [blame] | 320 | static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd, |
| 321 | void *arg __maybe_unused) |
| 322 | { |
| 323 | struct perf_mmap *map = fda->priv[fd].ptr; |
| 324 | |
| 325 | if (map) |
| 326 | perf_mmap__put(map); |
| 327 | } |
| 328 | |
| 329 | int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) |
| 330 | { |
| 331 | return fdarray__filter(&evlist->pollfd, revents_and_mask, |
| 332 | perf_evlist__munmap_filtered, NULL); |
| 333 | } |
| 334 | |
Jiri Olsa | 80ab298 | 2019-08-31 22:48:33 +0200 | [diff] [blame] | 335 | int perf_evlist__poll(struct perf_evlist *evlist, int timeout) |
| 336 | { |
| 337 | return fdarray__poll(&evlist->pollfd, timeout); |
| 338 | } |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 339 | |
| 340 | static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite) |
| 341 | { |
| 342 | int i; |
| 343 | struct perf_mmap *map; |
| 344 | |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 345 | map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); |
| 346 | if (!map) |
| 347 | return NULL; |
| 348 | |
| 349 | for (i = 0; i < evlist->nr_mmaps; i++) { |
Jiri Olsa | 6eb65f7 | 2019-10-17 12:59:09 +0200 | [diff] [blame] | 350 | struct perf_mmap *prev = i ? &map[i - 1] : NULL; |
| 351 | |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 352 | /* |
| 353 | * When the perf_mmap() call is made we grab one refcount, plus |
| 354 | * one extra to let perf_mmap__consume() get the last |
| 355 | * events after all real references (perf_mmap__get()) are |
| 356 | * dropped. |
| 357 | * |
| 358 | * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and |
| 359 | * thus does perf_mmap__get() on it. |
| 360 | */ |
Jiri Olsa | 6eb65f7 | 2019-10-17 12:59:09 +0200 | [diff] [blame] | 361 | perf_mmap__init(&map[i], prev, overwrite, NULL); |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 362 | } |
| 363 | |
| 364 | return map; |
| 365 | } |
| 366 | |
| 367 | static void perf_evlist__set_sid_idx(struct perf_evlist *evlist, |
| 368 | struct perf_evsel *evsel, int idx, int cpu, |
| 369 | int thread) |
| 370 | { |
| 371 | struct perf_sample_id *sid = SID(evsel, cpu, thread); |
| 372 | |
| 373 | sid->idx = idx; |
| 374 | if (evlist->cpus && cpu >= 0) |
| 375 | sid->cpu = evlist->cpus->map[cpu]; |
| 376 | else |
| 377 | sid->cpu = -1; |
| 378 | if (!evsel->system_wide && evlist->threads && thread >= 0) |
| 379 | sid->tid = perf_thread_map__pid(evlist->threads, thread); |
| 380 | else |
| 381 | sid->tid = -1; |
| 382 | } |
| 383 | |
| 384 | static struct perf_mmap* |
Jiri Olsa | 3a8bb58 | 2019-10-07 14:53:24 +0200 | [diff] [blame] | 385 | perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx) |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 386 | { |
Jiri Olsa | 3805e4f | 2019-10-17 12:59:10 +0200 | [diff] [blame] | 387 | struct perf_mmap *maps; |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 388 | |
Jiri Olsa | 3805e4f | 2019-10-17 12:59:10 +0200 | [diff] [blame] | 389 | maps = overwrite ? evlist->mmap_ovw : evlist->mmap; |
| 390 | |
| 391 | if (!maps) { |
| 392 | maps = perf_evlist__alloc_mmap(evlist, overwrite); |
| 393 | if (!maps) |
| 394 | return NULL; |
| 395 | |
| 396 | if (overwrite) |
| 397 | evlist->mmap_ovw = maps; |
| 398 | else |
| 399 | evlist->mmap = maps; |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 400 | } |
| 401 | |
Jiri Olsa | 3805e4f | 2019-10-17 12:59:10 +0200 | [diff] [blame] | 402 | return &maps[idx]; |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 403 | } |
| 404 | |
| 405 | #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y)) |
| 406 | |
| 407 | static int |
Jiri Olsa | b5911e7 | 2019-10-07 14:53:25 +0200 | [diff] [blame] | 408 | perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp, |
| 409 | int output, int cpu) |
| 410 | { |
Jiri Olsa | b5911e7 | 2019-10-07 14:53:25 +0200 | [diff] [blame] | 411 | return perf_mmap__mmap(map, mp, output, cpu); |
| 412 | } |
| 413 | |
Jiri Olsa | 6eb65f7 | 2019-10-17 12:59:09 +0200 | [diff] [blame] | 414 | static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map, |
| 415 | bool overwrite) |
| 416 | { |
| 417 | if (overwrite) |
| 418 | evlist->mmap_ovw_first = map; |
| 419 | else |
| 420 | evlist->mmap_first = map; |
| 421 | } |
| 422 | |
Jiri Olsa | b5911e7 | 2019-10-07 14:53:25 +0200 | [diff] [blame] | 423 | static int |
Jiri Olsa | 3a8bb58 | 2019-10-07 14:53:24 +0200 | [diff] [blame] | 424 | mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, |
| 425 | int idx, struct perf_mmap_param *mp, int cpu_idx, |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 426 | int thread, int *_output, int *_output_overwrite) |
| 427 | { |
| 428 | int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx); |
| 429 | struct perf_evsel *evsel; |
| 430 | int revent; |
| 431 | |
| 432 | perf_evlist__for_each_entry(evlist, evsel) { |
| 433 | bool overwrite = evsel->attr.write_backward; |
| 434 | struct perf_mmap *map; |
| 435 | int *output, fd, cpu; |
| 436 | |
| 437 | if (evsel->system_wide && thread) |
| 438 | continue; |
| 439 | |
| 440 | cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu); |
| 441 | if (cpu == -1) |
| 442 | continue; |
| 443 | |
Jiri Olsa | 3a8bb58 | 2019-10-07 14:53:24 +0200 | [diff] [blame] | 444 | map = ops->get(evlist, overwrite, idx); |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 445 | if (map == NULL) |
| 446 | return -ENOMEM; |
| 447 | |
| 448 | if (overwrite) { |
| 449 | mp->prot = PROT_READ; |
| 450 | output = _output_overwrite; |
| 451 | } else { |
| 452 | mp->prot = PROT_READ | PROT_WRITE; |
| 453 | output = _output; |
| 454 | } |
| 455 | |
| 456 | fd = FD(evsel, cpu, thread); |
| 457 | |
| 458 | if (*output == -1) { |
| 459 | *output = fd; |
| 460 | |
Jiri Olsa | 285aaea | 2019-10-07 14:53:30 +0200 | [diff] [blame] | 461 | /* |
| 462 | * The last one will be done at perf_mmap__consume(), so that we |
| 463 | * make sure we don't prevent tools from consuming every last event in |
| 464 | * the ring buffer. |
| 465 | * |
| 466 | * I.e. we can get the POLLHUP meaning that the fd doesn't exist |
| 467 | * anymore, but the last events for it are still in the ring buffer, |
| 468 | * waiting to be consumed. |
| 469 | * |
| 470 | * Tools can chose to ignore this at their own discretion, but the |
| 471 | * evlist layer can't just drop it when filtering events in |
| 472 | * perf_evlist__filter_pollfd(). |
| 473 | */ |
| 474 | refcount_set(&map->refcnt, 2); |
| 475 | |
Jiri Olsa | b5911e7 | 2019-10-07 14:53:25 +0200 | [diff] [blame] | 476 | if (ops->mmap(map, mp, *output, evlist_cpu) < 0) |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 477 | return -1; |
Jiri Olsa | 6eb65f7 | 2019-10-17 12:59:09 +0200 | [diff] [blame] | 478 | |
| 479 | if (!idx) |
| 480 | perf_evlist__set_mmap_first(evlist, map, overwrite); |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 481 | } else { |
| 482 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) |
| 483 | return -1; |
| 484 | |
| 485 | perf_mmap__get(map); |
| 486 | } |
| 487 | |
| 488 | revent = !overwrite ? POLLIN : 0; |
| 489 | |
| 490 | if (!evsel->system_wide && |
Alexey Budankov | ab4c1f9 | 2020-07-17 09:59:45 +0300 | [diff] [blame] | 491 | perf_evlist__add_pollfd(evlist, fd, map, revent, fdarray_flag__default) < 0) { |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 492 | perf_mmap__put(map); |
| 493 | return -1; |
| 494 | } |
| 495 | |
| 496 | if (evsel->attr.read_format & PERF_FORMAT_ID) { |
| 497 | if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread, |
| 498 | fd) < 0) |
| 499 | return -1; |
| 500 | perf_evlist__set_sid_idx(evlist, evsel, idx, cpu, |
| 501 | thread); |
| 502 | } |
| 503 | } |
| 504 | |
| 505 | return 0; |
| 506 | } |
| 507 | |
| 508 | static int |
Jiri Olsa | 1fcbb75 | 2019-10-07 14:53:23 +0200 | [diff] [blame] | 509 | mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, |
| 510 | struct perf_mmap_param *mp) |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 511 | { |
| 512 | int thread; |
| 513 | int nr_threads = perf_thread_map__nr(evlist->threads); |
| 514 | |
| 515 | for (thread = 0; thread < nr_threads; thread++) { |
| 516 | int output = -1; |
| 517 | int output_overwrite = -1; |
| 518 | |
Jiri Olsa | 1fcbb75 | 2019-10-07 14:53:23 +0200 | [diff] [blame] | 519 | if (ops->idx) |
| 520 | ops->idx(evlist, mp, thread, false); |
| 521 | |
Jiri Olsa | 3a8bb58 | 2019-10-07 14:53:24 +0200 | [diff] [blame] | 522 | if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread, |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 523 | &output, &output_overwrite)) |
| 524 | goto out_unmap; |
| 525 | } |
| 526 | |
| 527 | return 0; |
| 528 | |
| 529 | out_unmap: |
| 530 | perf_evlist__munmap(evlist); |
| 531 | return -1; |
| 532 | } |
| 533 | |
| 534 | static int |
Jiri Olsa | 1fcbb75 | 2019-10-07 14:53:23 +0200 | [diff] [blame] | 535 | mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, |
| 536 | struct perf_mmap_param *mp) |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 537 | { |
| 538 | int nr_threads = perf_thread_map__nr(evlist->threads); |
| 539 | int nr_cpus = perf_cpu_map__nr(evlist->cpus); |
| 540 | int cpu, thread; |
| 541 | |
| 542 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
| 543 | int output = -1; |
| 544 | int output_overwrite = -1; |
| 545 | |
Jiri Olsa | 1fcbb75 | 2019-10-07 14:53:23 +0200 | [diff] [blame] | 546 | if (ops->idx) |
| 547 | ops->idx(evlist, mp, cpu, true); |
| 548 | |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 549 | for (thread = 0; thread < nr_threads; thread++) { |
Jiri Olsa | 3a8bb58 | 2019-10-07 14:53:24 +0200 | [diff] [blame] | 550 | if (mmap_per_evsel(evlist, ops, cpu, mp, cpu, |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 551 | thread, &output, &output_overwrite)) |
| 552 | goto out_unmap; |
| 553 | } |
| 554 | } |
| 555 | |
| 556 | return 0; |
| 557 | |
| 558 | out_unmap: |
| 559 | perf_evlist__munmap(evlist); |
| 560 | return -1; |
| 561 | } |
| 562 | |
Jiri Olsa | 3805e4f | 2019-10-17 12:59:10 +0200 | [diff] [blame] | 563 | static int perf_evlist__nr_mmaps(struct perf_evlist *evlist) |
| 564 | { |
| 565 | int nr_mmaps; |
| 566 | |
| 567 | nr_mmaps = perf_cpu_map__nr(evlist->cpus); |
| 568 | if (perf_cpu_map__empty(evlist->cpus)) |
| 569 | nr_mmaps = perf_thread_map__nr(evlist->threads); |
| 570 | |
| 571 | return nr_mmaps; |
| 572 | } |
| 573 | |
Jiri Olsa | 0b5ea10 | 2019-10-07 14:53:22 +0200 | [diff] [blame] | 574 | int perf_evlist__mmap_ops(struct perf_evlist *evlist, |
| 575 | struct perf_evlist_mmap_ops *ops, |
| 576 | struct perf_mmap_param *mp) |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 577 | { |
| 578 | struct perf_evsel *evsel; |
| 579 | const struct perf_cpu_map *cpus = evlist->cpus; |
| 580 | const struct perf_thread_map *threads = evlist->threads; |
Jiri Olsa | 0b5ea10 | 2019-10-07 14:53:22 +0200 | [diff] [blame] | 581 | |
Jiri Olsa | b5911e7 | 2019-10-07 14:53:25 +0200 | [diff] [blame] | 582 | if (!ops || !ops->get || !ops->mmap) |
Jiri Olsa | 0b5ea10 | 2019-10-07 14:53:22 +0200 | [diff] [blame] | 583 | return -EINVAL; |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 584 | |
Jiri Olsa | b6cd35e | 2019-10-17 12:59:11 +0200 | [diff] [blame] | 585 | mp->mask = evlist->mmap_len - page_size - 1; |
| 586 | |
Jiri Olsa | 3805e4f | 2019-10-17 12:59:10 +0200 | [diff] [blame] | 587 | evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist); |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 588 | |
| 589 | perf_evlist__for_each_entry(evlist, evsel) { |
| 590 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && |
| 591 | evsel->sample_id == NULL && |
| 592 | perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0) |
| 593 | return -ENOMEM; |
| 594 | } |
| 595 | |
Jiri Olsa | 230662e | 2019-10-07 14:53:31 +0200 | [diff] [blame] | 596 | if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) |
| 597 | return -ENOMEM; |
| 598 | |
Jiri Olsa | 0b5ea10 | 2019-10-07 14:53:22 +0200 | [diff] [blame] | 599 | if (perf_cpu_map__empty(cpus)) |
Jiri Olsa | 1fcbb75 | 2019-10-07 14:53:23 +0200 | [diff] [blame] | 600 | return mmap_per_thread(evlist, ops, mp); |
Jiri Olsa | 0b5ea10 | 2019-10-07 14:53:22 +0200 | [diff] [blame] | 601 | |
Jiri Olsa | 1fcbb75 | 2019-10-07 14:53:23 +0200 | [diff] [blame] | 602 | return mmap_per_cpu(evlist, ops, mp); |
Jiri Olsa | 0b5ea10 | 2019-10-07 14:53:22 +0200 | [diff] [blame] | 603 | } |
| 604 | |
| 605 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages) |
| 606 | { |
| 607 | struct perf_mmap_param mp; |
Jiri Olsa | 3a8bb58 | 2019-10-07 14:53:24 +0200 | [diff] [blame] | 608 | struct perf_evlist_mmap_ops ops = { |
Jiri Olsa | b5911e7 | 2019-10-07 14:53:25 +0200 | [diff] [blame] | 609 | .get = perf_evlist__mmap_cb_get, |
| 610 | .mmap = perf_evlist__mmap_cb_mmap, |
Jiri Olsa | 3a8bb58 | 2019-10-07 14:53:24 +0200 | [diff] [blame] | 611 | }; |
Jiri Olsa | 0b5ea10 | 2019-10-07 14:53:22 +0200 | [diff] [blame] | 612 | |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 613 | evlist->mmap_len = (pages + 1) * page_size; |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 614 | |
Jiri Olsa | 0b5ea10 | 2019-10-07 14:53:22 +0200 | [diff] [blame] | 615 | return perf_evlist__mmap_ops(evlist, &ops, &mp); |
Jiri Olsa | d1a1775 | 2019-10-07 14:53:21 +0200 | [diff] [blame] | 616 | } |
| 617 | |
| 618 | void perf_evlist__munmap(struct perf_evlist *evlist) |
| 619 | { |
| 620 | int i; |
| 621 | |
| 622 | if (evlist->mmap) { |
| 623 | for (i = 0; i < evlist->nr_mmaps; i++) |
| 624 | perf_mmap__munmap(&evlist->mmap[i]); |
| 625 | } |
| 626 | |
| 627 | if (evlist->mmap_ovw) { |
| 628 | for (i = 0; i < evlist->nr_mmaps; i++) |
| 629 | perf_mmap__munmap(&evlist->mmap_ovw[i]); |
| 630 | } |
| 631 | |
| 632 | zfree(&evlist->mmap); |
| 633 | zfree(&evlist->mmap_ovw); |
| 634 | } |
Jiri Olsa | 6eb65f7 | 2019-10-17 12:59:09 +0200 | [diff] [blame] | 635 | |
| 636 | struct perf_mmap* |
| 637 | perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map, |
| 638 | bool overwrite) |
| 639 | { |
| 640 | if (map) |
| 641 | return map->next; |
| 642 | |
| 643 | return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first; |
| 644 | } |