blob: acef6e3f6b8017148c92caf71be0fcc18191d42d [file] [log] [blame]
Thomas Gleixner91007042019-05-29 07:12:25 -07001// SPDX-License-Identifier: GPL-2.0-only
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -03002/*
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
6 * copyright notes.
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -03007 */
8
9#include <sys/mman.h>
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -030010#include <inttypes.h>
11#include <asm/bug.h>
Arnaldo Carvalho de Melo7f7c5362019-07-04 11:32:27 -030012#include <linux/zalloc.h>
Arnaldo Carvalho de Melof2a39fe2019-08-30 14:45:20 -030013#include <stdlib.h>
14#include <string.h>
Arnaldo Carvalho de Melo7634d532019-09-23 18:06:52 -030015#include <unistd.h> // sysconf()
Alexey Budankovc44a8b42019-01-22 20:48:54 +030016#ifdef HAVE_LIBNUMA_SUPPORT
17#include <numaif.h>
18#endif
Arnaldo Carvalho de Melof2a39fe2019-08-30 14:45:20 -030019#include "cpumap.h"
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -030020#include "debug.h"
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030021#include "event.h"
22#include "mmap.h"
Arnaldo Carvalho de Meloc1a604d2019-08-29 15:20:59 -030023#include "../perf.h"
Jiri Olsa20f2be12019-08-06 15:25:25 +020024#include <internal/lib.h> /* page_size */
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030025
Jiri Olsabf59b302019-10-07 14:53:11 +020026size_t mmap__mmap_len(struct mmap *map)
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030027{
Jiri Olsabf59b302019-10-07 14:53:11 +020028 return perf_mmap__mmap_len(&map->core);
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030029}
30
31/* When check_messup is true, 'end' must points to a good entry */
Jiri Olsaa5830532019-07-27 20:30:53 +020032static union perf_event *perf_mmap__read(struct mmap *map,
Kan Liangb4b036b2018-01-18 13:26:21 -080033 u64 *startp, u64 end)
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030034{
Jiri Olsa547740f2019-07-27 22:07:44 +020035 unsigned char *data = map->core.base + page_size;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030036 union perf_event *event = NULL;
Kan Liangb4b036b2018-01-18 13:26:21 -080037 int diff = end - *startp;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030038
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030039 if (diff >= (int)sizeof(event->header)) {
40 size_t size;
41
Jiri Olsa4fd0cef2019-07-27 22:27:55 +020042 event = (union perf_event *)&data[*startp & map->core.mask];
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030043 size = event->header.size;
44
Kan Liangb4b036b2018-01-18 13:26:21 -080045 if (size < sizeof(event->header) || diff < (int)size)
46 return NULL;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030047
48 /*
49 * Event straddles the mmap boundary -- header should always
50 * be inside due to u64 alignment of output.
51 */
Jiri Olsa4fd0cef2019-07-27 22:27:55 +020052 if ((*startp & map->core.mask) + size != ((*startp + size) & map->core.mask)) {
Kan Liangb4b036b2018-01-18 13:26:21 -080053 unsigned int offset = *startp;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030054 unsigned int len = min(sizeof(*event), size), cpy;
Jiri Olsa4443e6d2019-07-27 22:47:58 +020055 void *dst = map->core.event_copy;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030056
57 do {
Jiri Olsa4fd0cef2019-07-27 22:27:55 +020058 cpy = min(map->core.mask + 1 - (offset & map->core.mask), len);
59 memcpy(dst, &data[offset & map->core.mask], cpy);
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030060 offset += cpy;
61 dst += cpy;
62 len -= cpy;
63 } while (len);
64
Jiri Olsa4443e6d2019-07-27 22:47:58 +020065 event = (union perf_event *)map->core.event_copy;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030066 }
67
Kan Liangb4b036b2018-01-18 13:26:21 -080068 *startp += size;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030069 }
70
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -030071 return event;
72}
73
Kan Liang3effc2f2018-01-18 13:26:25 -080074/*
Kan Liang7bb45972018-01-18 13:26:23 -080075 * Read event from ring buffer one by one.
76 * Return one event for each call.
77 *
78 * Usage:
79 * perf_mmap__read_init()
80 * while(event = perf_mmap__read_event()) {
81 * //process the event
82 * perf_mmap__consume()
83 * }
84 * perf_mmap__read_done()
85 */
Jiri Olsaa5830532019-07-27 20:30:53 +020086union perf_event *perf_mmap__read_event(struct mmap *map)
Kan Liang7bb45972018-01-18 13:26:23 -080087{
88 union perf_event *event;
89
90 /*
91 * Check if event was unmapped due to a POLLHUP/POLLERR.
92 */
Jiri Olsae03edfe2019-07-27 22:35:35 +020093 if (!refcount_read(&map->core.refcnt))
Kan Liang7bb45972018-01-18 13:26:23 -080094 return NULL;
95
Kan Liang7bb45972018-01-18 13:26:23 -080096 /* non-overwirte doesn't pause the ringbuffer */
Jiri Olsa8df7a862019-07-27 22:42:56 +020097 if (!map->core.overwrite)
Jiri Olsaebe4d722019-07-27 22:39:53 +020098 map->core.end = perf_mmap__read_head(map);
Kan Liang7bb45972018-01-18 13:26:23 -080099
Jiri Olsaebe4d722019-07-27 22:39:53 +0200100 event = perf_mmap__read(map, &map->core.start, map->core.end);
Kan Liang7bb45972018-01-18 13:26:23 -0800101
Jiri Olsa8df7a862019-07-27 22:42:56 +0200102 if (!map->core.overwrite)
Jiri Olsaebe4d722019-07-27 22:39:53 +0200103 map->core.prev = map->core.start;
Kan Liang7bb45972018-01-18 13:26:23 -0800104
105 return event;
106}
107
Jiri Olsaa5830532019-07-27 20:30:53 +0200108static bool perf_mmap__empty(struct mmap *map)
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300109{
Jiri Olsaebe4d722019-07-27 22:39:53 +0200110 return perf_mmap__read_head(map) == map->core.prev && !map->auxtrace_mmap.base;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300111}
112
Jiri Olsaa5830532019-07-27 20:30:53 +0200113void perf_mmap__get(struct mmap *map)
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300114{
Jiri Olsae03edfe2019-07-27 22:35:35 +0200115 refcount_inc(&map->core.refcnt);
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300116}
117
Jiri Olsaa5830532019-07-27 20:30:53 +0200118void perf_mmap__put(struct mmap *map)
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300119{
Jiri Olsae03edfe2019-07-27 22:35:35 +0200120 BUG_ON(map->core.base && refcount_read(&map->core.refcnt) == 0);
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300121
Jiri Olsae03edfe2019-07-27 22:35:35 +0200122 if (refcount_dec_and_test(&map->core.refcnt))
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300123 perf_mmap__munmap(map);
124}
125
Jiri Olsaa5830532019-07-27 20:30:53 +0200126void perf_mmap__consume(struct mmap *map)
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300127{
Jiri Olsa8df7a862019-07-27 22:42:56 +0200128 if (!map->core.overwrite) {
Jiri Olsaebe4d722019-07-27 22:39:53 +0200129 u64 old = map->core.prev;
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300130
131 perf_mmap__write_tail(map, old);
132 }
133
Jiri Olsae03edfe2019-07-27 22:35:35 +0200134 if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map))
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300135 perf_mmap__put(map);
136}
137
138int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
139 struct auxtrace_mmap_params *mp __maybe_unused,
140 void *userpg __maybe_unused,
141 int fd __maybe_unused)
142{
143 return 0;
144}
145
146void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
147{
148}
149
150void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
151 off_t auxtrace_offset __maybe_unused,
152 unsigned int auxtrace_pages __maybe_unused,
153 bool auxtrace_overwrite __maybe_unused)
154{
155}
156
157void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
Jiri Olsa63503db2019-07-21 13:23:52 +0200158 struct evlist *evlist __maybe_unused,
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300159 int idx __maybe_unused,
160 bool per_cpu __maybe_unused)
161{
162}
163
Alexey Budankov0b773832018-11-06 12:03:35 +0300164#ifdef HAVE_AIO_SUPPORT
Jiri Olsaa5830532019-07-27 20:30:53 +0200165static int perf_mmap__aio_enabled(struct mmap *map)
Alexey Budankov51255a82019-03-18 20:42:19 +0300166{
167 return map->aio.nr_cblocks > 0;
168}
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300169
170#ifdef HAVE_LIBNUMA_SUPPORT
Jiri Olsaa5830532019-07-27 20:30:53 +0200171static int perf_mmap__aio_alloc(struct mmap *map, int idx)
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300172{
Jiri Olsabf59b302019-10-07 14:53:11 +0200173 map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300174 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
175 if (map->aio.data[idx] == MAP_FAILED) {
176 map->aio.data[idx] = NULL;
177 return -1;
178 }
179
180 return 0;
181}
182
Jiri Olsaa5830532019-07-27 20:30:53 +0200183static void perf_mmap__aio_free(struct mmap *map, int idx)
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300184{
185 if (map->aio.data[idx]) {
Jiri Olsabf59b302019-10-07 14:53:11 +0200186 munmap(map->aio.data[idx], mmap__mmap_len(map));
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300187 map->aio.data[idx] = NULL;
188 }
189}
190
Jiri Olsaa5830532019-07-27 20:30:53 +0200191static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300192{
193 void *data;
194 size_t mmap_len;
195 unsigned long node_mask;
196
197 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
198 data = map->aio.data[idx];
Jiri Olsabf59b302019-10-07 14:53:11 +0200199 mmap_len = mmap__mmap_len(map);
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300200 node_mask = 1UL << cpu__get_node(cpu);
201 if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
202 pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
203 data, data + mmap_len, cpu__get_node(cpu));
204 return -1;
205 }
206 }
207
208 return 0;
209}
Alexey Budankov51255a82019-03-18 20:42:19 +0300210#else /* !HAVE_LIBNUMA_SUPPORT */
Jiri Olsaa5830532019-07-27 20:30:53 +0200211static int perf_mmap__aio_alloc(struct mmap *map, int idx)
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300212{
Jiri Olsabf59b302019-10-07 14:53:11 +0200213 map->aio.data[idx] = malloc(mmap__mmap_len(map));
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300214 if (map->aio.data[idx] == NULL)
215 return -1;
216
217 return 0;
218}
219
Jiri Olsaa5830532019-07-27 20:30:53 +0200220static void perf_mmap__aio_free(struct mmap *map, int idx)
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300221{
222 zfree(&(map->aio.data[idx]));
223}
224
Jiri Olsaa5830532019-07-27 20:30:53 +0200225static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300226 int cpu __maybe_unused, int affinity __maybe_unused)
227{
228 return 0;
229}
230#endif
231
Jiri Olsaa5830532019-07-27 20:30:53 +0200232static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
Alexey Budankov0b773832018-11-06 12:03:35 +0300233{
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300234 int delta_max, i, prio, ret;
Alexey Budankov0b773832018-11-06 12:03:35 +0300235
Alexey Budankovd3d1af62018-11-06 12:04:58 +0300236 map->aio.nr_cblocks = mp->nr_cblocks;
237 if (map->aio.nr_cblocks) {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300238 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
239 if (!map->aio.aiocb) {
240 pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
241 return -1;
242 }
243 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
244 if (!map->aio.cblocks) {
245 pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
246 return -1;
247 }
248 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
Alexey Budankov0b773832018-11-06 12:03:35 +0300249 if (!map->aio.data) {
250 pr_debug2("failed to allocate data buffer, error %m\n");
251 return -1;
252 }
Alexey Budankov0b773832018-11-06 12:03:35 +0300253 delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
Alexey Budankov93f20c02018-11-06 12:07:19 +0300254 for (i = 0; i < map->aio.nr_cblocks; ++i) {
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300255 ret = perf_mmap__aio_alloc(map, i);
256 if (ret == -1) {
Alexey Budankov93f20c02018-11-06 12:07:19 +0300257 pr_debug2("failed to allocate data buffer area, error %m");
258 return -1;
259 }
Jiri Olsa56a94702019-07-27 22:33:20 +0200260 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300261 if (ret == -1)
262 return -1;
Alexey Budankov93f20c02018-11-06 12:07:19 +0300263 /*
264 * Use cblock.aio_fildes value different from -1
265 * to denote started aio write operation on the
266 * cblock so it requires explicit record__aio_sync()
267 * call prior the cblock may be reused again.
268 */
269 map->aio.cblocks[i].aio_fildes = -1;
270 /*
271 * Allocate cblocks with priority delta to have
272 * faster aio write system calls because queued requests
273 * are kept in separate per-prio queues and adding
274 * a new request will iterate thru shorter per-prio
275 * list. Blocks with numbers higher than
276 * _SC_AIO_PRIO_DELTA_MAX go with priority 0.
277 */
278 prio = delta_max - i;
279 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
280 }
Alexey Budankov0b773832018-11-06 12:03:35 +0300281 }
282
283 return 0;
284}
285
Jiri Olsaa5830532019-07-27 20:30:53 +0200286static void perf_mmap__aio_munmap(struct mmap *map)
Alexey Budankov0b773832018-11-06 12:03:35 +0300287{
Alexey Budankovc8dd6ee2018-12-05 20:19:41 +0300288 int i;
289
290 for (i = 0; i < map->aio.nr_cblocks; ++i)
Alexey Budankovc44a8b42019-01-22 20:48:54 +0300291 perf_mmap__aio_free(map, i);
Alexey Budankov0b773832018-11-06 12:03:35 +0300292 if (map->aio.data)
293 zfree(&map->aio.data);
Alexey Budankovc8dd6ee2018-12-05 20:19:41 +0300294 zfree(&map->aio.cblocks);
295 zfree(&map->aio.aiocb);
Alexey Budankov0b773832018-11-06 12:03:35 +0300296}
Alexey Budankov51255a82019-03-18 20:42:19 +0300297#else /* !HAVE_AIO_SUPPORT */
Jiri Olsaa5830532019-07-27 20:30:53 +0200298static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
Alexey Budankov51255a82019-03-18 20:42:19 +0300299{
300 return 0;
301}
302
Jiri Olsaa5830532019-07-27 20:30:53 +0200303static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
Alexey Budankov0b773832018-11-06 12:03:35 +0300304 struct mmap_params *mp __maybe_unused)
305{
306 return 0;
307}
308
Jiri Olsaa5830532019-07-27 20:30:53 +0200309static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
Alexey Budankov0b773832018-11-06 12:03:35 +0300310{
311}
312#endif
313
Jiri Olsaa5830532019-07-27 20:30:53 +0200314void perf_mmap__munmap(struct mmap *map)
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300315{
Alexey Budankov0b773832018-11-06 12:03:35 +0300316 perf_mmap__aio_munmap(map);
Alexey Budankov51255a82019-03-18 20:42:19 +0300317 if (map->data != NULL) {
Jiri Olsabf59b302019-10-07 14:53:11 +0200318 munmap(map->data, mmap__mmap_len(map));
Alexey Budankov51255a82019-03-18 20:42:19 +0300319 map->data = NULL;
320 }
Jiri Olsa547740f2019-07-27 22:07:44 +0200321 if (map->core.base != NULL) {
Jiri Olsabf59b302019-10-07 14:53:11 +0200322 munmap(map->core.base, mmap__mmap_len(map));
Jiri Olsa547740f2019-07-27 22:07:44 +0200323 map->core.base = NULL;
Jiri Olsa2cf07b22019-07-27 22:31:17 +0200324 map->core.fd = -1;
Jiri Olsae03edfe2019-07-27 22:35:35 +0200325 refcount_set(&map->core.refcnt, 0);
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300326 }
327 auxtrace_mmap__munmap(&map->auxtrace_mmap);
328}
329
Alexey Budankovf13de662019-01-22 20:50:57 +0300330static void build_node_mask(int node, cpu_set_t *mask)
331{
332 int c, cpu, nr_cpus;
Jiri Olsaf8548392019-07-21 13:23:49 +0200333 const struct perf_cpu_map *cpu_map = NULL;
Alexey Budankovf13de662019-01-22 20:50:57 +0300334
335 cpu_map = cpu_map__online();
336 if (!cpu_map)
337 return;
338
Jiri Olsa6549cd82019-08-22 13:11:38 +0200339 nr_cpus = perf_cpu_map__nr(cpu_map);
Alexey Budankovf13de662019-01-22 20:50:57 +0300340 for (c = 0; c < nr_cpus; c++) {
341 cpu = cpu_map->map[c]; /* map c index to online cpu index */
342 if (cpu__get_node(cpu) == node)
343 CPU_SET(cpu, mask);
344 }
345}
346
Jiri Olsaa5830532019-07-27 20:30:53 +0200347static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
Alexey Budankovf13de662019-01-22 20:50:57 +0300348{
349 CPU_ZERO(&map->affinity_mask);
350 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
Jiri Olsa56a94702019-07-27 22:33:20 +0200351 build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
Alexey Budankovf13de662019-01-22 20:50:57 +0300352 else if (mp->affinity == PERF_AFFINITY_CPU)
Jiri Olsa56a94702019-07-27 22:33:20 +0200353 CPU_SET(map->core.cpu, &map->affinity_mask);
Alexey Budankovf13de662019-01-22 20:50:57 +0300354}
355
Jiri Olsa32c261c2019-10-07 14:53:12 +0200356int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300357{
358 /*
Kan Liang6afad542018-03-01 18:09:11 -0500359 * The last one will be done at perf_mmap__consume(), so that we
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300360 * make sure we don't prevent tools from consuming every last event in
361 * the ring buffer.
362 *
363 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
364 * anymore, but the last events for it are still in the ring buffer,
365 * waiting to be consumed.
366 *
367 * Tools can chose to ignore this at their own discretion, but the
368 * evlist layer can't just drop it when filtering events in
369 * perf_evlist__filter_pollfd().
370 */
Jiri Olsae03edfe2019-07-27 22:35:35 +0200371 refcount_set(&map->core.refcnt, 2);
Jiri Olsa32c261c2019-10-07 14:53:12 +0200372
373 if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300374 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
375 errno);
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300376 return -1;
377 }
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300378
Alexey Budankovf13de662019-01-22 20:50:57 +0300379 perf_mmap__setup_affinity_mask(map, mp);
Alexey Budankov9d2ed642019-01-22 20:47:43 +0300380
Jiri Olsa65aa2e62019-08-27 16:05:18 +0200381 map->core.flush = mp->flush;
Alexey Budankov470530b2019-03-18 20:40:26 +0300382
Alexey Budankov51255a82019-03-18 20:42:19 +0300383 map->comp_level = mp->comp_level;
384
385 if (map->comp_level && !perf_mmap__aio_enabled(map)) {
Jiri Olsabf59b302019-10-07 14:53:11 +0200386 map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
Alexey Budankov51255a82019-03-18 20:42:19 +0300387 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
388 if (map->data == MAP_FAILED) {
389 pr_debug2("failed to mmap data buffer, error %d\n",
390 errno);
391 map->data = NULL;
392 return -1;
393 }
394 }
395
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300396 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
Jiri Olsa547740f2019-07-27 22:07:44 +0200397 &mp->auxtrace_mp, map->core.base, fd))
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300398 return -1;
399
Alexey Budankov0b773832018-11-06 12:03:35 +0300400 return perf_mmap__aio_mmap(map, mp);
Arnaldo Carvalho de Melo16958492017-10-06 10:31:47 -0300401}
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300402
Yisheng Xie699db112018-03-13 20:31:13 +0800403static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300404{
405 struct perf_event_header *pheader;
Yisheng Xie699db112018-03-13 20:31:13 +0800406 u64 evt_head = *start;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300407 int size = mask + 1;
408
Yisheng Xie699db112018-03-13 20:31:13 +0800409 pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
410 pheader = (struct perf_event_header *)(buf + (*start & mask));
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300411 while (true) {
Yisheng Xie699db112018-03-13 20:31:13 +0800412 if (evt_head - *start >= (unsigned int)size) {
Wang Nan0b72d692017-12-04 16:51:07 +0000413 pr_debug("Finished reading overwrite ring buffer: rewind\n");
Yisheng Xie699db112018-03-13 20:31:13 +0800414 if (evt_head - *start > (unsigned int)size)
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300415 evt_head -= pheader->size;
416 *end = evt_head;
417 return 0;
418 }
419
420 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
421
422 if (pheader->size == 0) {
Wang Nan0b72d692017-12-04 16:51:07 +0000423 pr_debug("Finished reading overwrite ring buffer: get start\n");
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300424 *end = evt_head;
425 return 0;
426 }
427
428 evt_head += pheader->size;
429 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
430 }
431 WARN_ONCE(1, "Shouldn't get here\n");
432 return -1;
433}
434
Kan Liang88724812018-01-18 13:26:19 -0800435/*
436 * Report the start and end of the available data in ringbuffer
437 */
Jiri Olsaa5830532019-07-27 20:30:53 +0200438static int __perf_mmap__read_init(struct mmap *md)
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300439{
440 u64 head = perf_mmap__read_head(md);
Jiri Olsaebe4d722019-07-27 22:39:53 +0200441 u64 old = md->core.prev;
Jiri Olsa547740f2019-07-27 22:07:44 +0200442 unsigned char *data = md->core.base + page_size;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300443 unsigned long size;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300444
Jiri Olsa8df7a862019-07-27 22:42:56 +0200445 md->core.start = md->core.overwrite ? head : old;
446 md->core.end = md->core.overwrite ? old : head;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300447
Jiri Olsa65aa2e62019-08-27 16:05:18 +0200448 if ((md->core.end - md->core.start) < md->core.flush)
Kan Liang189f2cc2018-01-18 13:26:20 -0800449 return -EAGAIN;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300450
Jiri Olsaebe4d722019-07-27 22:39:53 +0200451 size = md->core.end - md->core.start;
Jiri Olsa4fd0cef2019-07-27 22:27:55 +0200452 if (size > (unsigned long)(md->core.mask) + 1) {
Jiri Olsa8df7a862019-07-27 22:42:56 +0200453 if (!md->core.overwrite) {
Wang Nan7fb4b402017-12-04 16:51:06 +0000454 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300455
Jiri Olsaebe4d722019-07-27 22:39:53 +0200456 md->core.prev = head;
Kan Liangd6ace3d2018-03-06 10:36:05 -0500457 perf_mmap__consume(md);
Kan Liang189f2cc2018-01-18 13:26:20 -0800458 return -EAGAIN;
Wang Nan7fb4b402017-12-04 16:51:06 +0000459 }
460
461 /*
462 * Backward ring buffer is full. We still have a chance to read
463 * most of data from it.
464 */
Jiri Olsaebe4d722019-07-27 22:39:53 +0200465 if (overwrite_rb_find_range(data, md->core.mask, &md->core.start, &md->core.end))
Kan Liang189f2cc2018-01-18 13:26:20 -0800466 return -EINVAL;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300467 }
468
Kan Liang189f2cc2018-01-18 13:26:20 -0800469 return 0;
Kan Liang88724812018-01-18 13:26:19 -0800470}
471
Jiri Olsaa5830532019-07-27 20:30:53 +0200472int perf_mmap__read_init(struct mmap *map)
Arnaldo Carvalho de Melo895e3b02018-03-26 11:42:15 -0300473{
474 /*
475 * Check if event was unmapped due to a POLLHUP/POLLERR.
476 */
Jiri Olsae03edfe2019-07-27 22:35:35 +0200477 if (!refcount_read(&map->core.refcnt))
Arnaldo Carvalho de Melo895e3b02018-03-26 11:42:15 -0300478 return -ENOENT;
479
480 return __perf_mmap__read_init(map);
481}
482
Jiri Olsaa5830532019-07-27 20:30:53 +0200483int perf_mmap__push(struct mmap *md, void *to,
484 int push(struct mmap *map, void *to, void *buf, size_t size))
Kan Liang88724812018-01-18 13:26:19 -0800485{
486 u64 head = perf_mmap__read_head(md);
Jiri Olsa547740f2019-07-27 22:07:44 +0200487 unsigned char *data = md->core.base + page_size;
Kan Liang88724812018-01-18 13:26:19 -0800488 unsigned long size;
489 void *buf;
490 int rc = 0;
491
Kan Liangb9bae2c2018-03-06 10:36:07 -0500492 rc = perf_mmap__read_init(md);
Kan Liang189f2cc2018-01-18 13:26:20 -0800493 if (rc < 0)
Alexey Budankovef781122019-03-18 20:44:12 +0300494 return (rc == -EAGAIN) ? 1 : -1;
Kan Liang88724812018-01-18 13:26:19 -0800495
Jiri Olsaebe4d722019-07-27 22:39:53 +0200496 size = md->core.end - md->core.start;
Kan Liangdc6c35c2018-01-18 13:26:17 -0800497
Jiri Olsaebe4d722019-07-27 22:39:53 +0200498 if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) {
499 buf = &data[md->core.start & md->core.mask];
500 size = md->core.mask + 1 - (md->core.start & md->core.mask);
501 md->core.start += size;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300502
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200503 if (push(md, to, buf, size) < 0) {
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300504 rc = -1;
505 goto out;
506 }
507 }
508
Jiri Olsaebe4d722019-07-27 22:39:53 +0200509 buf = &data[md->core.start & md->core.mask];
510 size = md->core.end - md->core.start;
511 md->core.start += size;
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300512
Jiri Olsaded2b8f2018-09-13 14:54:06 +0200513 if (push(md, to, buf, size) < 0) {
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300514 rc = -1;
515 goto out;
516 }
517
Jiri Olsaebe4d722019-07-27 22:39:53 +0200518 md->core.prev = head;
Kan Liangd6ace3d2018-03-06 10:36:05 -0500519 perf_mmap__consume(md);
Arnaldo Carvalho de Melo73c17d82017-10-06 10:46:01 -0300520out:
521 return rc;
522}
Kan Liangee023de2018-01-18 13:26:22 -0800523
524/*
525 * Mandatory for overwrite mode
526 * The direction of overwrite mode is backward.
Jiri Olsaebe4d722019-07-27 22:39:53 +0200527 * The last perf_mmap__read() will set tail to map->core.prev.
528 * Need to correct the map->core.prev to head which is the end of next read.
Kan Liangee023de2018-01-18 13:26:22 -0800529 */
Jiri Olsaa5830532019-07-27 20:30:53 +0200530void perf_mmap__read_done(struct mmap *map)
Kan Liangee023de2018-01-18 13:26:22 -0800531{
Kan Liangf58385f2018-03-26 09:42:09 -0400532 /*
533 * Check if event was unmapped due to a POLLHUP/POLLERR.
534 */
Jiri Olsae03edfe2019-07-27 22:35:35 +0200535 if (!refcount_read(&map->core.refcnt))
Kan Liangf58385f2018-03-26 09:42:09 -0400536 return;
537
Jiri Olsaebe4d722019-07-27 22:39:53 +0200538 map->core.prev = perf_mmap__read_head(map);
Kan Liangee023de2018-01-18 13:26:22 -0800539}