blob: 989fed6f43b5a5f34d485c39b7b8fa30790e6a2d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -03002#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -03003#include <inttypes.h>
Jiri Olsa5f86b802014-08-01 13:02:58 -03004#include <linux/list.h>
Jiri Olsacee3ab92014-07-11 14:49:54 +02005#include <linux/compiler.h>
Alexander Yarygin54bf53b2014-10-03 18:40:11 +04006#include <linux/string.h>
Jiri Olsa5f86b802014-08-01 13:02:58 -03007#include "ordered-events.h"
Jiri Olsa5f86b802014-08-01 13:02:58 -03008#include "session.h"
9#include "asm/bug.h"
10#include "debug.h"
11
Jiri Olsacee3ab92014-07-11 14:49:54 +020012#define pr_N(n, fmt, ...) \
13 eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
14
15#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
16
Jiri Olsa5f86b802014-08-01 13:02:58 -030017static void queue_event(struct ordered_events *oe, struct ordered_event *new)
18{
19 struct ordered_event *last = oe->last;
20 u64 timestamp = new->timestamp;
21 struct list_head *p;
22
23 ++oe->nr_events;
24 oe->last = new;
25
Jiri Olsacee3ab92014-07-11 14:49:54 +020026 pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
27
Jiri Olsa5f86b802014-08-01 13:02:58 -030028 if (!last) {
29 list_add(&new->list, &oe->events);
30 oe->max_timestamp = timestamp;
31 return;
32 }
33
34 /*
35 * last event might point to some random place in the list as it's
36 * the last queued event. We expect that the new event is close to
37 * this.
38 */
39 if (last->timestamp <= timestamp) {
40 while (last->timestamp <= timestamp) {
41 p = last->list.next;
42 if (p == &oe->events) {
43 list_add_tail(&new->list, &oe->events);
44 oe->max_timestamp = timestamp;
45 return;
46 }
47 last = list_entry(p, struct ordered_event, list);
48 }
49 list_add_tail(&new->list, &last->list);
50 } else {
51 while (last->timestamp > timestamp) {
52 p = last->list.prev;
53 if (p == &oe->events) {
54 list_add(&new->list, &oe->events);
55 return;
56 }
57 last = list_entry(p, struct ordered_event, list);
58 }
59 list_add(&new->list, &last->list);
60 }
61}
62
Alexander Yarygin54bf53b2014-10-03 18:40:11 +040063static union perf_event *__dup_event(struct ordered_events *oe,
64 union perf_event *event)
65{
66 union perf_event *new_event = NULL;
67
68 if (oe->cur_alloc_size < oe->max_alloc_size) {
69 new_event = memdup(event, event->header.size);
70 if (new_event)
71 oe->cur_alloc_size += event->header.size;
72 }
73
74 return new_event;
75}
76
77static union perf_event *dup_event(struct ordered_events *oe,
78 union perf_event *event)
79{
80 return oe->copy_on_queue ? __dup_event(oe, event) : event;
81}
82
Jiri Olsad5ceb622018-09-07 12:24:54 +020083static void __free_dup_event(struct ordered_events *oe, union perf_event *event)
Alexander Yarygin54bf53b2014-10-03 18:40:11 +040084{
Jiri Olsad5ceb622018-09-07 12:24:54 +020085 if (event) {
Alexander Yarygin54bf53b2014-10-03 18:40:11 +040086 oe->cur_alloc_size -= event->header.size;
87 free(event);
88 }
89}
90
Jiri Olsad5ceb622018-09-07 12:24:54 +020091static void free_dup_event(struct ordered_events *oe, union perf_event *event)
92{
93 if (oe->copy_on_queue)
94 __free_dup_event(oe, event);
95}
96
Jiri Olsa5f86b802014-08-01 13:02:58 -030097#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
Alexander Yarygin54bf53b2014-10-03 18:40:11 +040098static struct ordered_event *alloc_event(struct ordered_events *oe,
99 union perf_event *event)
Jiri Olsa5f86b802014-08-01 13:02:58 -0300100{
101 struct list_head *cache = &oe->cache;
102 struct ordered_event *new = NULL;
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400103 union perf_event *new_event;
Jiri Olsa53da12e2018-09-07 12:24:55 +0200104 size_t size;
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400105
106 new_event = dup_event(oe, event);
107 if (!new_event)
108 return NULL;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300109
Jiri Olsad5ceb622018-09-07 12:24:54 +0200110 /*
111 * We maintain the following scheme of buffers for ordered
112 * event allocation:
113 *
114 * to_free list -> buffer1 (64K)
115 * buffer2 (64K)
116 * ...
117 *
118 * Each buffer keeps an array of ordered events objects:
119 * buffer -> event[0]
120 * event[1]
121 * ...
122 *
123 * Each allocated ordered event is linked to one of
124 * following lists:
125 * - time ordered list 'events'
126 * - list of currently removed events 'cache'
127 *
128 * Allocation of the ordered event uses the following order
129 * to get the memory:
130 * - use recently removed object from 'cache' list
131 * - use available object in current allocation buffer
132 * - allocate new buffer if the current buffer is full
133 *
134 * Removal of ordered event object moves it from events to
135 * the cache list.
136 */
Jiri Olsa53da12e2018-09-07 12:24:55 +0200137 size = sizeof(*oe->buffer) + MAX_SAMPLE_BUFFER * sizeof(*new);
138
Jiri Olsa5f86b802014-08-01 13:02:58 -0300139 if (!list_empty(cache)) {
140 new = list_entry(cache->next, struct ordered_event, list);
141 list_del(&new->list);
142 } else if (oe->buffer) {
Jiri Olsad5ceb622018-09-07 12:24:54 +0200143 new = &oe->buffer->event[oe->buffer_idx];
Jiri Olsa5f86b802014-08-01 13:02:58 -0300144 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
145 oe->buffer = NULL;
Jiri Olsa53da12e2018-09-07 12:24:55 +0200146 } else if ((oe->cur_alloc_size + size) < oe->max_alloc_size) {
Jiri Olsa5f86b802014-08-01 13:02:58 -0300147 oe->buffer = malloc(size);
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400148 if (!oe->buffer) {
149 free_dup_event(oe, new_event);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300150 return NULL;
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400151 }
Jiri Olsa5f86b802014-08-01 13:02:58 -0300152
Jiri Olsacee3ab92014-07-11 14:49:54 +0200153 pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
154 oe->cur_alloc_size, size, oe->max_alloc_size);
155
Jiri Olsa5f86b802014-08-01 13:02:58 -0300156 oe->cur_alloc_size += size;
157 list_add(&oe->buffer->list, &oe->to_free);
158
Jiri Olsad5ceb622018-09-07 12:24:54 +0200159 oe->buffer_idx = 1;
160 new = &oe->buffer->event[0];
Jiri Olsacee3ab92014-07-11 14:49:54 +0200161 } else {
162 pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
Jiri Olsad5ceb622018-09-07 12:24:54 +0200163 return NULL;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300164 }
165
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400166 new->event = new_event;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300167 return new;
168}
169
Arnaldo Carvalho de Melo4a6b3622015-03-03 13:02:24 -0300170static struct ordered_event *
171ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400172 union perf_event *event)
Jiri Olsa5f86b802014-08-01 13:02:58 -0300173{
174 struct ordered_event *new;
175
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400176 new = alloc_event(oe, event);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300177 if (new) {
178 new->timestamp = timestamp;
179 queue_event(oe, new);
180 }
181
182 return new;
183}
184
185void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
186{
Jiri Olsafa4e5c62014-06-15 19:46:08 +0200187 list_move(&event->list, &oe->cache);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300188 oe->nr_events--;
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400189 free_dup_event(oe, event->event);
David Carrillo-Cisneros1e0d4f02017-04-10 13:14:27 -0700190 event->event = NULL;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300191}
192
Arnaldo Carvalho de Melo4a6b3622015-03-03 13:02:24 -0300193int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
Jiri Olsadc83e132017-08-03 13:24:33 +0200194 u64 timestamp, u64 file_offset)
Arnaldo Carvalho de Melo4a6b3622015-03-03 13:02:24 -0300195{
Arnaldo Carvalho de Melo4a6b3622015-03-03 13:02:24 -0300196 struct ordered_event *oevent;
197
198 if (!timestamp || timestamp == ~0ULL)
199 return -ETIME;
200
201 if (timestamp < oe->last_flush) {
202 pr_oe_time(timestamp, "out of order event\n");
203 pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
204 oe->last_flush_type);
205
Arnaldo Carvalho de Melo9870d782015-03-31 12:48:16 -0300206 oe->nr_unordered_events++;
Arnaldo Carvalho de Melo4a6b3622015-03-03 13:02:24 -0300207 }
208
209 oevent = ordered_events__new_event(oe, timestamp, event);
210 if (!oevent) {
211 ordered_events__flush(oe, OE_FLUSH__HALF);
212 oevent = ordered_events__new_event(oe, timestamp, event);
213 }
214
215 if (!oevent)
216 return -ENOMEM;
217
218 oevent->file_offset = file_offset;
219 return 0;
220}
221
Jiri Olsa68ca5d02018-12-05 17:05:07 +0100222static int do_flush(struct ordered_events *oe, bool show_progress)
Jiri Olsa5f86b802014-08-01 13:02:58 -0300223{
Jiri Olsa5f86b802014-08-01 13:02:58 -0300224 struct list_head *head = &oe->events;
225 struct ordered_event *tmp, *iter;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300226 u64 limit = oe->next_flush;
227 u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300228 struct ui_progress prog;
229 int ret;
230
Arnaldo Carvalho de Melo28083682015-02-22 13:52:47 -0800231 if (!limit)
Jiri Olsa5f86b802014-08-01 13:02:58 -0300232 return 0;
233
234 if (show_progress)
235 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
236
237 list_for_each_entry_safe(iter, tmp, head, list) {
238 if (session_done())
239 return 0;
240
241 if (iter->timestamp > limit)
242 break;
Arnaldo Carvalho de Melo9870d782015-03-31 12:48:16 -0300243 ret = oe->deliver(oe, iter);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300244 if (ret)
Arnaldo Carvalho de Melo9870d782015-03-31 12:48:16 -0300245 return ret;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300246
247 ordered_events__delete(oe, iter);
248 oe->last_flush = iter->timestamp;
249
250 if (show_progress)
251 ui_progress__update(&prog, 1);
252 }
253
254 if (list_empty(head))
255 oe->last = NULL;
256 else if (last_ts <= limit)
257 oe->last = list_entry(head->prev, struct ordered_event, list);
258
Arnaldo Carvalho de Melo5c9ce1e2015-08-24 17:16:22 -0300259 if (show_progress)
260 ui_progress__finish();
261
Jiri Olsa5f86b802014-08-01 13:02:58 -0300262 return 0;
263}
264
Jiri Olsa68ca5d02018-12-05 17:05:07 +0100265static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
266 u64 timestamp)
Jiri Olsa5f86b802014-08-01 13:02:58 -0300267{
Jiri Olsacee3ab92014-07-11 14:49:54 +0200268 static const char * const str[] = {
Jiri Olsab0a45202014-06-12 09:50:11 +0200269 "NONE",
Jiri Olsacee3ab92014-07-11 14:49:54 +0200270 "FINAL",
271 "ROUND",
272 "HALF ",
Changbin Du1e5b0cf2019-03-16 16:05:52 +0800273 "TOP ",
274 "TIME ",
Jiri Olsacee3ab92014-07-11 14:49:54 +0200275 };
Jiri Olsa5f86b802014-08-01 13:02:58 -0300276 int err;
Jiri Olsab8494f12018-11-07 16:46:47 +0100277 bool show_progress = false;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300278
Arnaldo Carvalho de Melo28083682015-02-22 13:52:47 -0800279 if (oe->nr_events == 0)
280 return 0;
281
Jiri Olsa5f86b802014-08-01 13:02:58 -0300282 switch (how) {
283 case OE_FLUSH__FINAL:
Jiri Olsab8494f12018-11-07 16:46:47 +0100284 show_progress = true;
Jiri Olsa16c66bc2018-11-05 13:24:55 +0100285 __fallthrough;
286 case OE_FLUSH__TOP:
287 oe->next_flush = ULLONG_MAX;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300288 break;
289
290 case OE_FLUSH__HALF:
291 {
292 struct ordered_event *first, *last;
293 struct list_head *head = &oe->events;
294
295 first = list_entry(head->next, struct ordered_event, list);
296 last = oe->last;
297
298 /* Warn if we are called before any event got allocated. */
299 if (WARN_ONCE(!last || list_empty(head), "empty queue"))
300 return 0;
301
302 oe->next_flush = first->timestamp;
303 oe->next_flush += (last->timestamp - first->timestamp) / 2;
304 break;
305 }
306
Jiri Olsa68ca5d02018-12-05 17:05:07 +0100307 case OE_FLUSH__TIME:
308 oe->next_flush = timestamp;
309 show_progress = false;
310 break;
311
Jiri Olsa5f86b802014-08-01 13:02:58 -0300312 case OE_FLUSH__ROUND:
Jiri Olsab0a45202014-06-12 09:50:11 +0200313 case OE_FLUSH__NONE:
Jiri Olsa5f86b802014-08-01 13:02:58 -0300314 default:
315 break;
316 };
317
Jiri Olsacee3ab92014-07-11 14:49:54 +0200318 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
319 str[how], oe->nr_events);
320 pr_oe_time(oe->max_timestamp, "max_timestamp\n");
321
Jiri Olsa68ca5d02018-12-05 17:05:07 +0100322 err = do_flush(oe, show_progress);
Jiri Olsa5f86b802014-08-01 13:02:58 -0300323
324 if (!err) {
325 if (how == OE_FLUSH__ROUND)
326 oe->next_flush = oe->max_timestamp;
Jiri Olsab0a45202014-06-12 09:50:11 +0200327
328 oe->last_flush_type = how;
Jiri Olsa5f86b802014-08-01 13:02:58 -0300329 }
330
Jiri Olsacee3ab92014-07-11 14:49:54 +0200331 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
332 str[how], oe->nr_events);
333 pr_oe_time(oe->last_flush, "last_flush\n");
334
Jiri Olsa5f86b802014-08-01 13:02:58 -0300335 return err;
336}
Jiri Olsa36522f52014-06-10 22:47:40 +0200337
Jiri Olsa68ca5d02018-12-05 17:05:07 +0100338int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
339{
340 return __ordered_events__flush(oe, how, 0);
341}
342
343int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp)
344{
345 return __ordered_events__flush(oe, OE_FLUSH__TIME, timestamp);
346}
347
Jiri Olsa83356b32018-12-06 14:38:52 -0300348u64 ordered_events__first_time(struct ordered_events *oe)
349{
350 struct ordered_event *event;
351
352 if (list_empty(&oe->events))
353 return 0;
354
355 event = list_first_entry(&oe->events, struct ordered_event, list);
356 return event->timestamp;
357}
358
Jiri Olsaa4a66682018-11-07 16:58:36 +0100359void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
360 void *data)
Jiri Olsa36522f52014-06-10 22:47:40 +0200361{
362 INIT_LIST_HEAD(&oe->events);
363 INIT_LIST_HEAD(&oe->cache);
364 INIT_LIST_HEAD(&oe->to_free);
365 oe->max_alloc_size = (u64) -1;
366 oe->cur_alloc_size = 0;
Arnaldo Carvalho de Melod10eb1e2015-03-03 12:20:38 -0300367 oe->deliver = deliver;
Jiri Olsaa4a66682018-11-07 16:58:36 +0100368 oe->data = data;
Jiri Olsa36522f52014-06-10 22:47:40 +0200369}
Jiri Olsaadc56ed2014-06-10 22:50:03 +0200370
Jiri Olsad5ceb622018-09-07 12:24:54 +0200371static void
372ordered_events_buffer__free(struct ordered_events_buffer *buffer,
373 unsigned int max, struct ordered_events *oe)
374{
375 if (oe->copy_on_queue) {
376 unsigned int i;
377
378 for (i = 0; i < max; i++)
379 __free_dup_event(oe, buffer->event[i].event);
380 }
381
382 free(buffer);
383}
384
Jiri Olsaadc56ed2014-06-10 22:50:03 +0200385void ordered_events__free(struct ordered_events *oe)
386{
Jiri Olsad5ceb622018-09-07 12:24:54 +0200387 struct ordered_events_buffer *buffer, *tmp;
Jiri Olsaadc56ed2014-06-10 22:50:03 +0200388
Jiri Olsad5ceb622018-09-07 12:24:54 +0200389 if (list_empty(&oe->to_free))
390 return;
391
392 /*
393 * Current buffer might not have all the events allocated
394 * yet, we need to free only allocated ones ...
395 */
Jiri Olsa99d86c8b2019-01-17 12:30:17 +0100396 if (oe->buffer) {
397 list_del(&oe->buffer->list);
398 ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
399 }
Jiri Olsad5ceb622018-09-07 12:24:54 +0200400
401 /* ... and continue with the rest */
402 list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
403 list_del(&buffer->list);
404 ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
Jiri Olsaadc56ed2014-06-10 22:50:03 +0200405 }
406}
Wang Nan4532f642016-04-13 08:21:04 +0000407
408void ordered_events__reinit(struct ordered_events *oe)
409{
410 ordered_events__deliver_t old_deliver = oe->deliver;
411
412 ordered_events__free(oe);
413 memset(oe, '\0', sizeof(*oe));
Jiri Olsaa4a66682018-11-07 16:58:36 +0100414 ordered_events__init(oe, old_deliver, oe->data);
Wang Nan4532f642016-04-13 08:21:04 +0000415}