blob: 42a5971146aed7d685e367a291d2eab7772e9a22 [file] [log] [blame]
Thomas Gleixner91007042019-05-29 07:12:25 -07001// SPDX-License-Identifier: GPL-2.0-only
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -03002/*
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
6 * copyright notes.
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -03007 */
8
9#include <sys/mman.h>
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -030010#include <inttypes.h>
11#include <asm/bug.h>
Arnaldo Carvalho de Melo7f7c5362019-07-04 11:32:27 -030012#include <linux/zalloc.h>
Alexey Budankovc44a8b42019-01-22 20:48:54 +030013#ifdef HAVE_LIBNUMA_SUPPORT
14#include <numaif.h>
15#endif
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -030016#include "debug.h"
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030017#include "event.h"
18#include "mmap.h"
19#include "util.h" /* page_size */
20
21size_t perf_mmap__mmap_len(struct perf_mmap *map)
22{
23 return map->mask + 1 + page_size;
24}
25
26/* When check_messup is true, 'end' must points to a good entry */
Wang Nan8eb7a1f2017-12-03 02:00:41 +000027static union perf_event *perf_mmap__read(struct perf_mmap *map,
Kan Liangb4b036b2018-01-18 13:26:21 -080028 u64 *startp, u64 end)
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030029{
30 unsigned char *data = map->base + page_size;
31 union perf_event *event = NULL;
Kan Liangb4b036b2018-01-18 13:26:21 -080032 int diff = end - *startp;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030033
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030034 if (diff >= (int)sizeof(event->header)) {
35 size_t size;
36
Kan Liangb4b036b2018-01-18 13:26:21 -080037 event = (union perf_event *)&data[*startp & map->mask];
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030038 size = event->header.size;
39
Kan Liangb4b036b2018-01-18 13:26:21 -080040 if (size < sizeof(event->header) || diff < (int)size)
41 return NULL;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030042
43 /*
44 * Event straddles the mmap boundary -- header should always
45 * be inside due to u64 alignment of output.
46 */
Kan Liangb4b036b2018-01-18 13:26:21 -080047 if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
48 unsigned int offset = *startp;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030049 unsigned int len = min(sizeof(*event), size), cpy;
50 void *dst = map->event_copy;
51
52 do {
53 cpy = min(map->mask + 1 - (offset & map->mask), len);
54 memcpy(dst, &data[offset & map->mask], cpy);
55 offset += cpy;
56 dst += cpy;
57 len -= cpy;
58 } while (len);
59
60 event = (union perf_event *)map->event_copy;
61 }
62
Kan Liangb4b036b2018-01-18 13:26:21 -080063 *startp += size;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030064 }
65
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030066 return event;
67}
68
Kan Liang3effc2f2018-01-18 13:26:25 -080069/*
Kan Liang7bb45972018-01-18 13:26:23 -080070 * Read event from ring buffer one by one.
71 * Return one event for each call.
72 *
73 * Usage:
74 * perf_mmap__read_init()
75 * while(event = perf_mmap__read_event()) {
76 * //process the event
77 * perf_mmap__consume()
78 * }
79 * perf_mmap__read_done()
80 */
Kan Liang0019dc872018-03-06 10:36:06 -050081union perf_event *perf_mmap__read_event(struct perf_mmap *map)
Kan Liang7bb45972018-01-18 13:26:23 -080082{
83 union perf_event *event;
84
85 /*
86 * Check if event was unmapped due to a POLLHUP/POLLERR.
87 */
88 if (!refcount_read(&map->refcnt))
89 return NULL;
90
Kan Liang7bb45972018-01-18 13:26:23 -080091 /* non-overwirte doesn't pause the ringbuffer */
Kan Liangb9de0f62018-03-06 10:36:03 -050092 if (!map->overwrite)
93 map->end = perf_mmap__read_head(map);
Kan Liang7bb45972018-01-18 13:26:23 -080094
Kan Liangb9de0f62018-03-06 10:36:03 -050095 event = perf_mmap__read(map, &map->start, map->end);
Kan Liang7bb45972018-01-18 13:26:23 -080096
Kan Liangb9de0f62018-03-06 10:36:03 -050097 if (!map->overwrite)
98 map->prev = map->start;
Kan Liang7bb45972018-01-18 13:26:23 -080099
100 return event;
101}
102
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300103static bool perf_mmap__empty(struct perf_mmap *map)
104{
105 return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
106}
107
108void perf_mmap__get(struct perf_mmap *map)
109{
110 refcount_inc(&map->refcnt);
111}
112
113void perf_mmap__put(struct perf_mmap *map)
114{
115 BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
116
117 if (refcount_dec_and_test(&map->refcnt))
118 perf_mmap__munmap(map);
119}
120
Kan Liangd6ace3d2018-03-06 10:36:05 -0500121void perf_mmap__consume(struct perf_mmap *map)
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300122{
Kan Liangbdec8b22018-03-06 10:36:04 -0500123 if (!map->overwrite) {
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300124 u64 old = map->prev;
125
126 perf_mmap__write_tail(map, old);
127 }
128
129 if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
130 perf_mmap__put(map);
131}
132
133int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
134 struct auxtrace_mmap_params *mp __maybe_unused,
135 void *userpg __maybe_unused,
136 int fd __maybe_unused)
137{
138 return 0;
139}
140
141void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
142{
143}
144
145void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
146 off_t auxtrace_offset __maybe_unused,
147 unsigned int auxtrace_pages __maybe_unused,
148 bool auxtrace_overwrite __maybe_unused)
149{
150}
151
152void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +0200153 struct evlist *evlist __maybe_unused,
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300154 int idx __maybe_unused,
155 bool per_cpu __maybe_unused)
156{
157}
158
Alexey Budankov0b773832018-11-06 12:03:35 +0300159#ifdef HAVE_AIO_SUPPORT
Alexey Budankov51255a82019-03-18 20:42:19 +0300160static int perf_mmap__aio_enabled(struct perf_mmap *map)
161{
162 return map->aio.nr_cblocks > 0;
163}
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300164
165#ifdef HAVE_LIBNUMA_SUPPORT
166static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
167{
168 map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
169 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
170 if (map->aio.data[idx] == MAP_FAILED) {
171 map->aio.data[idx] = NULL;
172 return -1;
173 }
174
175 return 0;
176}
177
178static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
179{
180 if (map->aio.data[idx]) {
181 munmap(map->aio.data[idx], perf_mmap__mmap_len(map));
182 map->aio.data[idx] = NULL;
183 }
184}
185
186static int perf_mmap__aio_bind(struct perf_mmap *map, int idx, int cpu, int affinity)
187{
188 void *data;
189 size_t mmap_len;
190 unsigned long node_mask;
191
192 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
193 data = map->aio.data[idx];
194 mmap_len = perf_mmap__mmap_len(map);
195 node_mask = 1UL << cpu__get_node(cpu);
196 if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
197 pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
198 data, data + mmap_len, cpu__get_node(cpu));
199 return -1;
200 }
201 }
202
203 return 0;
204}
Alexey Budankov51255a82019-03-18 20:42:19 +0300205#else /* !HAVE_LIBNUMA_SUPPORT */
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300206static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
207{
208 map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
209 if (map->aio.data[idx] == NULL)
210 return -1;
211
212 return 0;
213}
214
215static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
216{
217 zfree(&(map->aio.data[idx]));
218}
219
220static int perf_mmap__aio_bind(struct perf_mmap *map __maybe_unused, int idx __maybe_unused,
221 int cpu __maybe_unused, int affinity __maybe_unused)
222{
223 return 0;
224}
225#endif
226
Alexey Budankov0b773832018-11-06 12:03:35 +0300227static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
228{
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300229 int delta_max, i, prio, ret;
Alexey Budankov0b773832018-11-06 12:03:35 +0300230
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300231 map->aio.nr_cblocks = mp->nr_cblocks;
232 if (map->aio.nr_cblocks) {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300233 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
234 if (!map->aio.aiocb) {
235 pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
236 return -1;
237 }
238 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
239 if (!map->aio.cblocks) {
240 pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
241 return -1;
242 }
243 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
Alexey Budankov0b773832018-11-06 12:03:35 +0300244 if (!map->aio.data) {
245 pr_debug2("failed to allocate data buffer, error %m\n");
246 return -1;
247 }
Alexey Budankov0b773832018-11-06 12:03:35 +0300248 delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
Alexey Budankov93f20c02018-11-06 12:07:19 +0300249 for (i = 0; i < map->aio.nr_cblocks; ++i) {
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300250 ret = perf_mmap__aio_alloc(map, i);
251 if (ret == -1) {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300252 pr_debug2("failed to allocate data buffer area, error %m");
253 return -1;
254 }
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300255 ret = perf_mmap__aio_bind(map, i, map->cpu, mp->affinity);
256 if (ret == -1)
257 return -1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300258 /*
259 * Use cblock.aio_fildes value different from -1
260 * to denote started aio write operation on the
261 * cblock so it requires explicit record__aio_sync()
262 * call prior the cblock may be reused again.
263 */
264 map->aio.cblocks[i].aio_fildes = -1;
265 /*
266 * Allocate cblocks with priority delta to have
267 * faster aio write system calls because queued requests
268 * are kept in separate per-prio queues and adding
269 * a new request will iterate thru shorter per-prio
270 * list. Blocks with numbers higher than
271 * _SC_AIO_PRIO_DELTA_MAX go with priority 0.
272 */
273 prio = delta_max - i;
274 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
275 }
Alexey Budankov0b773832018-11-06 12:03:35 +0300276 }
277
278 return 0;
279}
280
281static void perf_mmap__aio_munmap(struct perf_mmap *map)
282{
Alexey Budankovc8dd6ee2018-12-05 20:19:41 +0300283 int i;
284
285 for (i = 0; i < map->aio.nr_cblocks; ++i)
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300286 perf_mmap__aio_free(map, i);
Alexey Budankov0b773832018-11-06 12:03:35 +0300287 if (map->aio.data)
288 zfree(&map->aio.data);
Alexey Budankovc8dd6ee2018-12-05 20:19:41 +0300289 zfree(&map->aio.cblocks);
290 zfree(&map->aio.aiocb);
Alexey Budankov0b773832018-11-06 12:03:35 +0300291}
Alexey Budankov51255a82019-03-18 20:42:19 +0300292#else /* !HAVE_AIO_SUPPORT */
293static int perf_mmap__aio_enabled(struct perf_mmap *map __maybe_unused)
294{
295 return 0;
296}
297
Alexey Budankov0b773832018-11-06 12:03:35 +0300298static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused,
299 struct mmap_params *mp __maybe_unused)
300{
301 return 0;
302}
303
304static void perf_mmap__aio_munmap(struct perf_mmap *map __maybe_unused)
305{
306}
307#endif
308
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300309void perf_mmap__munmap(struct perf_mmap *map)
310{
Alexey Budankov0b773832018-11-06 12:03:35 +0300311 perf_mmap__aio_munmap(map);
Alexey Budankov51255a82019-03-18 20:42:19 +0300312 if (map->data != NULL) {
313 munmap(map->data, perf_mmap__mmap_len(map));
314 map->data = NULL;
315 }
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300316 if (map->base != NULL) {
317 munmap(map->base, perf_mmap__mmap_len(map));
318 map->base = NULL;
319 map->fd = -1;
320 refcount_set(&map->refcnt, 0);
321 }
322 auxtrace_mmap__munmap(&map->auxtrace_mmap);
323}
324
Alexey Budankovf13de662019-01-22 20:50:57 +0300325static void build_node_mask(int node, cpu_set_t *mask)
326{
327 int c, cpu, nr_cpus;
Jiri Olsaf8548392019-07-21 13:23:49 +0200328 const struct perf_cpu_map *cpu_map = NULL;
Alexey Budankovf13de662019-01-22 20:50:57 +0300329
330 cpu_map = cpu_map__online();
331 if (!cpu_map)
332 return;
333
334 nr_cpus = cpu_map__nr(cpu_map);
335 for (c = 0; c < nr_cpus; c++) {
336 cpu = cpu_map->map[c]; /* map c index to online cpu index */
337 if (cpu__get_node(cpu) == node)
338 CPU_SET(cpu, mask);
339 }
340}
341
342static void perf_mmap__setup_affinity_mask(struct perf_mmap *map, struct mmap_params *mp)
343{
344 CPU_ZERO(&map->affinity_mask);
345 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
346 build_node_mask(cpu__get_node(map->cpu), &map->affinity_mask);
347 else if (mp->affinity == PERF_AFFINITY_CPU)
348 CPU_SET(map->cpu, &map->affinity_mask);
349}
350
Jiri Olsa31fb4c02018-08-17 13:45:55 +0200351int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300352{
353 /*
Kan Liang6afad542018-03-01 18:09:11 -0500354 * The last one will be done at perf_mmap__consume(), so that we
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300355 * make sure we don't prevent tools from consuming every last event in
356 * the ring buffer.
357 *
358 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
359 * anymore, but the last events for it are still in the ring buffer,
360 * waiting to be consumed.
361 *
362 * Tools can chose to ignore this at their own discretion, but the
363 * evlist layer can't just drop it when filtering events in
364 * perf_evlist__filter_pollfd().
365 */
366 refcount_set(&map->refcnt, 2);
367 map->prev = 0;
368 map->mask = mp->mask;
369 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
370 MAP_SHARED, fd, 0);
371 if (map->base == MAP_FAILED) {
372 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
373 errno);
374 map->base = NULL;
375 return -1;
376 }
377 map->fd = fd;
Jiri Olsa31fb4c02018-08-17 13:45:55 +0200378 map->cpu = cpu;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300379
Alexey Budankovf13de662019-01-22 20:50:57 +0300380 perf_mmap__setup_affinity_mask(map, mp);
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300381
Alexey Budankov470530b2019-03-18 20:40:26 +0300382 map->flush = mp->flush;
383
Alexey Budankov51255a82019-03-18 20:42:19 +0300384 map->comp_level = mp->comp_level;
385
386 if (map->comp_level && !perf_mmap__aio_enabled(map)) {
387 map->data = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
388 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
389 if (map->data == MAP_FAILED) {
390 pr_debug2("failed to mmap data buffer, error %d\n",
391 errno);
392 map->data = NULL;
393 return -1;
394 }
395 }
396
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300397 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
398 &mp->auxtrace_mp, map->base, fd))
399 return -1;
400
Alexey Budankov0b773832018-11-06 12:03:35 +0300401 return perf_mmap__aio_mmap(map, mp);
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300402}
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300403
Yisheng Xie699db112018-03-13 20:31:13 +0800404static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300405{
406 struct perf_event_header *pheader;
Yisheng Xie699db112018-03-13 20:31:13 +0800407 u64 evt_head = *start;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300408 int size = mask + 1;
409
Yisheng Xie699db112018-03-13 20:31:13 +0800410 pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
411 pheader = (struct perf_event_header *)(buf + (*start & mask));
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300412 while (true) {
Yisheng Xie699db112018-03-13 20:31:13 +0800413 if (evt_head - *start >= (unsigned int)size) {
Wang Nan0b72d692017-12-04 16:51:07 +0000414 pr_debug("Finished reading overwrite ring buffer: rewind\n");
Yisheng Xie699db112018-03-13 20:31:13 +0800415 if (evt_head - *start > (unsigned int)size)
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300416 evt_head -= pheader->size;
417 *end = evt_head;
418 return 0;
419 }
420
421 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
422
423 if (pheader->size == 0) {
Wang Nan0b72d692017-12-04 16:51:07 +0000424 pr_debug("Finished reading overwrite ring buffer: get start\n");
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300425 *end = evt_head;
426 return 0;
427 }
428
429 evt_head += pheader->size;
430 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
431 }
432 WARN_ONCE(1, "Shouldn't get here\n");
433 return -1;
434}
435
Kan Liang88724812018-01-18 13:26:19 -0800436/*
437 * Report the start and end of the available data in ringbuffer
438 */
Arnaldo Carvalho de Melo895e3b02018-03-26 11:42:15 -0300439static int __perf_mmap__read_init(struct perf_mmap *md)
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300440{
441 u64 head = perf_mmap__read_head(md);
442 u64 old = md->prev;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300443 unsigned char *data = md->base + page_size;
444 unsigned long size;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300445
Kan Liang4fda3452018-03-06 10:36:01 -0500446 md->start = md->overwrite ? head : old;
447 md->end = md->overwrite ? old : head;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300448
Alexey Budankov470530b2019-03-18 20:40:26 +0300449 if ((md->end - md->start) < md->flush)
Kan Liang189f2cc2018-01-18 13:26:20 -0800450 return -EAGAIN;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300451
Kan Liang4fda3452018-03-06 10:36:01 -0500452 size = md->end - md->start;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300453 if (size > (unsigned long)(md->mask) + 1) {
Kan Liang4fda3452018-03-06 10:36:01 -0500454 if (!md->overwrite) {
Wang Nan7fb4b402017-12-04 16:51:06 +0000455 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300456
Wang Nan7fb4b402017-12-04 16:51:06 +0000457 md->prev = head;
Kan Liangd6ace3d2018-03-06 10:36:05 -0500458 perf_mmap__consume(md);
Kan Liang189f2cc2018-01-18 13:26:20 -0800459 return -EAGAIN;
Wang Nan7fb4b402017-12-04 16:51:06 +0000460 }
461
462 /*
463 * Backward ring buffer is full. We still have a chance to read
464 * most of data from it.
465 */
Yisheng Xie699db112018-03-13 20:31:13 +0800466 if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
Kan Liang189f2cc2018-01-18 13:26:20 -0800467 return -EINVAL;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300468 }
469
Kan Liang189f2cc2018-01-18 13:26:20 -0800470 return 0;
Kan Liang88724812018-01-18 13:26:19 -0800471}
472
Arnaldo Carvalho de Melo895e3b02018-03-26 11:42:15 -0300473int perf_mmap__read_init(struct perf_mmap *map)
474{
475 /*
476 * Check if event was unmapped due to a POLLHUP/POLLERR.
477 */
478 if (!refcount_read(&map->refcnt))
479 return -ENOENT;
480
481 return __perf_mmap__read_init(map);
482}
483
Kan Liang07a94612018-03-06 10:36:02 -0500484int perf_mmap__push(struct perf_mmap *md, void *to,
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200485 int push(struct perf_mmap *map, void *to, void *buf, size_t size))
Kan Liang88724812018-01-18 13:26:19 -0800486{
487 u64 head = perf_mmap__read_head(md);
Kan Liang88724812018-01-18 13:26:19 -0800488 unsigned char *data = md->base + page_size;
489 unsigned long size;
490 void *buf;
491 int rc = 0;
492
Kan Liangb9bae2c2018-03-06 10:36:07 -0500493 rc = perf_mmap__read_init(md);
Kan Liang189f2cc2018-01-18 13:26:20 -0800494 if (rc < 0)
Alexey Budankovef781122019-03-18 20:44:12 +0300495 return (rc == -EAGAIN) ? 1 : -1;
Kan Liang88724812018-01-18 13:26:19 -0800496
Kan Liang07a94612018-03-06 10:36:02 -0500497 size = md->end - md->start;
Kan Liangdc6c35c2018-01-18 13:26:17 -0800498
Kan Liang07a94612018-03-06 10:36:02 -0500499 if ((md->start & md->mask) + size != (md->end & md->mask)) {
500 buf = &data[md->start & md->mask];
501 size = md->mask + 1 - (md->start & md->mask);
502 md->start += size;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300503
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200504 if (push(md, to, buf, size) < 0) {
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300505 rc = -1;
506 goto out;
507 }
508 }
509
Kan Liang07a94612018-03-06 10:36:02 -0500510 buf = &data[md->start & md->mask];
511 size = md->end - md->start;
512 md->start += size;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300513
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200514 if (push(md, to, buf, size) < 0) {
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300515 rc = -1;
516 goto out;
517 }
518
519 md->prev = head;
Kan Liangd6ace3d2018-03-06 10:36:05 -0500520 perf_mmap__consume(md);
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300521out:
522 return rc;
523}
Kan Liangee023de2018-01-18 13:26:22 -0800524
525/*
526 * Mandatory for overwrite mode
527 * The direction of overwrite mode is backward.
528 * The last perf_mmap__read() will set tail to map->prev.
529 * Need to correct the map->prev to head which is the end of next read.
530 */
531void perf_mmap__read_done(struct perf_mmap *map)
532{
Kan Liangf58385f2018-03-26 09:42:09 -0400533 /*
534 * Check if event was unmapped due to a POLLHUP/POLLERR.
535 */
536 if (!refcount_read(&map->refcnt))
537 return;
538
Kan Liangee023de2018-01-18 13:26:22 -0800539 map->prev = perf_mmap__read_head(map);
540}