blob: fdbc6c550dea85244a0fd682aa5700d24d11959d [file] [log] [blame]
Jiri Olsa353120b2019-10-07 14:53:09 +02001// SPDX-License-Identifier: GPL-2.0
Jiri Olsa32c261c2019-10-07 14:53:12 +02002#include <sys/mman.h>
Jiri Olsa7c4d4182019-10-07 14:53:18 +02003#include <inttypes.h>
4#include <asm/bug.h>
5#include <errno.h>
Jiri Olsa7728fa02019-10-07 14:53:17 +02006#include <linux/ring_buffer.h>
7#include <linux/perf_event.h>
8#include <perf/mmap.h>
Jiri Olsa353120b2019-10-07 14:53:09 +02009#include <internal/mmap.h>
Jiri Olsabf59b302019-10-07 14:53:11 +020010#include <internal/lib.h>
Jiri Olsa80e53d12019-10-07 14:53:15 +020011#include <linux/kernel.h>
Jiri Olsa7c4d4182019-10-07 14:53:18 +020012#include "internal.h"
Jiri Olsa353120b2019-10-07 14:53:09 +020013
Jiri Olsa80e53d12019-10-07 14:53:15 +020014void perf_mmap__init(struct perf_mmap *map, bool overwrite,
15 libperf_unmap_cb_t unmap_cb)
Jiri Olsa353120b2019-10-07 14:53:09 +020016{
17 map->fd = -1;
18 map->overwrite = overwrite;
Jiri Olsa80e53d12019-10-07 14:53:15 +020019 map->unmap_cb = unmap_cb;
Jiri Olsa353120b2019-10-07 14:53:09 +020020 refcount_set(&map->refcnt, 0);
21}
Jiri Olsabf59b302019-10-07 14:53:11 +020022
23size_t perf_mmap__mmap_len(struct perf_mmap *map)
24{
25 return map->mask + 1 + page_size;
26}
Jiri Olsa32c261c2019-10-07 14:53:12 +020027
28int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
29 int fd, int cpu)
30{
31 map->prev = 0;
32 map->mask = mp->mask;
33 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
34 MAP_SHARED, fd, 0);
35 if (map->base == MAP_FAILED) {
36 map->base = NULL;
37 return -1;
38 }
39
40 map->fd = fd;
41 map->cpu = cpu;
42 return 0;
43}
Jiri Olsae75710f2019-10-07 14:53:13 +020044
Jiri Olsa59d7ea622019-10-07 14:53:14 +020045void perf_mmap__munmap(struct perf_mmap *map)
46{
47 if (map && map->base != NULL) {
48 munmap(map->base, perf_mmap__mmap_len(map));
49 map->base = NULL;
50 map->fd = -1;
51 refcount_set(&map->refcnt, 0);
52 }
Jiri Olsa80e53d12019-10-07 14:53:15 +020053 if (map && map->unmap_cb)
54 map->unmap_cb(map);
Jiri Olsa59d7ea622019-10-07 14:53:14 +020055}
56
Jiri Olsae75710f2019-10-07 14:53:13 +020057void perf_mmap__get(struct perf_mmap *map)
58{
59 refcount_inc(&map->refcnt);
60}
Jiri Olsa80e53d12019-10-07 14:53:15 +020061
62void perf_mmap__put(struct perf_mmap *map)
63{
64 BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
65
66 if (refcount_dec_and_test(&map->refcnt))
67 perf_mmap__munmap(map);
68}
Jiri Olsa7728fa02019-10-07 14:53:17 +020069
70static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
71{
72 ring_buffer_write_tail(md->base, tail);
73}
74
75u64 perf_mmap__read_head(struct perf_mmap *map)
76{
77 return ring_buffer_read_head(map->base);
78}
79
80static bool perf_mmap__empty(struct perf_mmap *map)
81{
82 struct perf_event_mmap_page *pc = map->base;
83
84 return perf_mmap__read_head(map) == map->prev && !pc->aux_size;
85}
86
87void perf_mmap__consume(struct perf_mmap *map)
88{
89 if (!map->overwrite) {
90 u64 old = map->prev;
91
92 perf_mmap__write_tail(map, old);
93 }
94
95 if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
96 perf_mmap__put(map);
97}
Jiri Olsa7c4d4182019-10-07 14:53:18 +020098
99static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
100{
101 struct perf_event_header *pheader;
102 u64 evt_head = *start;
103 int size = mask + 1;
104
105 pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
106 pheader = (struct perf_event_header *)(buf + (*start & mask));
107 while (true) {
108 if (evt_head - *start >= (unsigned int)size) {
109 pr_debug("Finished reading overwrite ring buffer: rewind\n");
110 if (evt_head - *start > (unsigned int)size)
111 evt_head -= pheader->size;
112 *end = evt_head;
113 return 0;
114 }
115
116 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
117
118 if (pheader->size == 0) {
119 pr_debug("Finished reading overwrite ring buffer: get start\n");
120 *end = evt_head;
121 return 0;
122 }
123
124 evt_head += pheader->size;
125 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
126 }
127 WARN_ONCE(1, "Shouldn't get here\n");
128 return -1;
129}
130
131/*
132 * Report the start and end of the available data in ringbuffer
133 */
134static int __perf_mmap__read_init(struct perf_mmap *md)
135{
136 u64 head = perf_mmap__read_head(md);
137 u64 old = md->prev;
138 unsigned char *data = md->base + page_size;
139 unsigned long size;
140
141 md->start = md->overwrite ? head : old;
142 md->end = md->overwrite ? old : head;
143
144 if ((md->end - md->start) < md->flush)
145 return -EAGAIN;
146
147 size = md->end - md->start;
148 if (size > (unsigned long)(md->mask) + 1) {
149 if (!md->overwrite) {
150 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
151
152 md->prev = head;
153 perf_mmap__consume(md);
154 return -EAGAIN;
155 }
156
157 /*
158 * Backward ring buffer is full. We still have a chance to read
159 * most of data from it.
160 */
161 if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
162 return -EINVAL;
163 }
164
165 return 0;
166}
167
168int perf_mmap__read_init(struct perf_mmap *map)
169{
170 /*
171 * Check if event was unmapped due to a POLLHUP/POLLERR.
172 */
173 if (!refcount_read(&map->refcnt))
174 return -ENOENT;
175
176 return __perf_mmap__read_init(map);
177}