blob: e13b1a14c859d16984ed3ac485865fb9e63086eb [file] [log] [blame]
Adrian Hunter718c6022015-04-09 18:53:42 +03001/*
2 * auxtrace.c: AUX area trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#include <sys/types.h>
17#include <sys/mman.h>
18#include <stdbool.h>
19
20#include <linux/kernel.h>
21#include <linux/perf_event.h>
22#include <linux/types.h>
23#include <linux/bitops.h>
24#include <linux/log2.h>
Adrian Huntere5027892015-04-21 12:21:51 +030025#include <linux/string.h>
Adrian Hunter718c6022015-04-09 18:53:42 +030026
Adrian Huntere5027892015-04-21 12:21:51 +030027#include <sys/param.h>
Adrian Hunter9e0cc4f2015-04-09 18:53:44 +030028#include <stdlib.h>
Adrian Hunter85ed4722015-04-09 18:53:50 +030029#include <stdio.h>
Adrian Hunter9e0cc4f2015-04-09 18:53:44 +030030#include <string.h>
Adrian Huntere5027892015-04-21 12:21:51 +030031#include <limits.h>
Adrian Hunter9e0cc4f2015-04-09 18:53:44 +030032#include <errno.h>
Adrian Huntere5027892015-04-21 12:21:51 +030033#include <linux/list.h>
Adrian Hunter9e0cc4f2015-04-09 18:53:44 +030034
Adrian Hunter718c6022015-04-09 18:53:42 +030035#include "../perf.h"
36#include "util.h"
37#include "evlist.h"
38#include "cpumap.h"
39#include "thread_map.h"
40#include "asm/bug.h"
41#include "auxtrace.h"
42
Adrian Hunter9e0cc4f2015-04-09 18:53:44 +030043#include "event.h"
Adrian Hunter85ed4722015-04-09 18:53:50 +030044#include "session.h"
Adrian Hunter9e0cc4f2015-04-09 18:53:44 +030045#include "debug.h"
Adrian Hunterf6986c952015-04-09 18:53:49 +030046#include "parse-options.h"
Adrian Hunter9e0cc4f2015-04-09 18:53:44 +030047
Adrian Hunter718c6022015-04-09 18:53:42 +030048int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
49 struct auxtrace_mmap_params *mp,
50 void *userpg, int fd)
51{
52 struct perf_event_mmap_page *pc = userpg;
53
54#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
55 pr_err("Cannot use AUX area tracing mmaps\n");
56 return -1;
57#endif
58
59 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
60
61 mm->userpg = userpg;
62 mm->mask = mp->mask;
63 mm->len = mp->len;
64 mm->prev = 0;
65 mm->idx = mp->idx;
66 mm->tid = mp->tid;
67 mm->cpu = mp->cpu;
68
69 if (!mp->len) {
70 mm->base = NULL;
71 return 0;
72 }
73
74 pc->aux_offset = mp->offset;
75 pc->aux_size = mp->len;
76
77 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
78 if (mm->base == MAP_FAILED) {
79 pr_debug2("failed to mmap AUX area\n");
80 mm->base = NULL;
81 return -1;
82 }
83
84 return 0;
85}
86
87void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
88{
89 if (mm->base) {
90 munmap(mm->base, mm->len);
91 mm->base = NULL;
92 }
93}
94
95void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
96 off_t auxtrace_offset,
97 unsigned int auxtrace_pages,
98 bool auxtrace_overwrite)
99{
100 if (auxtrace_pages) {
101 mp->offset = auxtrace_offset;
102 mp->len = auxtrace_pages * (size_t)page_size;
103 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
104 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
105 pr_debug2("AUX area mmap length %zu\n", mp->len);
106 } else {
107 mp->len = 0;
108 }
109}
110
111void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
112 struct perf_evlist *evlist, int idx,
113 bool per_cpu)
114{
115 mp->idx = idx;
116
117 if (per_cpu) {
118 mp->cpu = evlist->cpus->map[idx];
119 if (evlist->threads)
120 mp->tid = evlist->threads->map[0];
121 else
122 mp->tid = -1;
123 } else {
124 mp->cpu = -1;
125 mp->tid = evlist->threads->map[idx];
126 }
127}
Adrian Hunter9e0cc4f2015-04-09 18:53:44 +0300128
Adrian Huntere5027892015-04-21 12:21:51 +0300129#define AUXTRACE_INIT_NR_QUEUES 32
130
131static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
132{
133 struct auxtrace_queue *queue_array;
134 unsigned int max_nr_queues, i;
135
136 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
137 if (nr_queues > max_nr_queues)
138 return NULL;
139
140 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
141 if (!queue_array)
142 return NULL;
143
144 for (i = 0; i < nr_queues; i++) {
145 INIT_LIST_HEAD(&queue_array[i].head);
146 queue_array[i].priv = NULL;
147 }
148
149 return queue_array;
150}
151
152int auxtrace_queues__init(struct auxtrace_queues *queues)
153{
154 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
155 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
156 if (!queues->queue_array)
157 return -ENOMEM;
158 return 0;
159}
160
161static int auxtrace_queues__grow(struct auxtrace_queues *queues,
162 unsigned int new_nr_queues)
163{
164 unsigned int nr_queues = queues->nr_queues;
165 struct auxtrace_queue *queue_array;
166 unsigned int i;
167
168 if (!nr_queues)
169 nr_queues = AUXTRACE_INIT_NR_QUEUES;
170
171 while (nr_queues && nr_queues < new_nr_queues)
172 nr_queues <<= 1;
173
174 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
175 return -EINVAL;
176
177 queue_array = auxtrace_alloc_queue_array(nr_queues);
178 if (!queue_array)
179 return -ENOMEM;
180
181 for (i = 0; i < queues->nr_queues; i++) {
182 list_splice_tail(&queues->queue_array[i].head,
183 &queue_array[i].head);
184 queue_array[i].priv = queues->queue_array[i].priv;
185 }
186
187 queues->nr_queues = nr_queues;
188 queues->queue_array = queue_array;
189
190 return 0;
191}
192
193static void *auxtrace_copy_data(u64 size, struct perf_session *session)
194{
195 int fd = perf_data_file__fd(session->file);
196 void *p;
197 ssize_t ret;
198
199 if (size > SSIZE_MAX)
200 return NULL;
201
202 p = malloc(size);
203 if (!p)
204 return NULL;
205
206 ret = readn(fd, p, size);
207 if (ret != (ssize_t)size) {
208 free(p);
209 return NULL;
210 }
211
212 return p;
213}
214
215static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
216 unsigned int idx,
217 struct auxtrace_buffer *buffer)
218{
219 struct auxtrace_queue *queue;
220 int err;
221
222 if (idx >= queues->nr_queues) {
223 err = auxtrace_queues__grow(queues, idx + 1);
224 if (err)
225 return err;
226 }
227
228 queue = &queues->queue_array[idx];
229
230 if (!queue->set) {
231 queue->set = true;
232 queue->tid = buffer->tid;
233 queue->cpu = buffer->cpu;
234 } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
235 pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
236 queue->cpu, queue->tid, buffer->cpu, buffer->tid);
237 return -EINVAL;
238 }
239
240 buffer->buffer_nr = queues->next_buffer_nr++;
241
242 list_add_tail(&buffer->list, &queue->head);
243
244 queues->new_data = true;
245 queues->populated = true;
246
247 return 0;
248}
249
250/* Limit buffers to 32MiB on 32-bit */
251#define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
252
253static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
254 unsigned int idx,
255 struct auxtrace_buffer *buffer)
256{
257 u64 sz = buffer->size;
258 bool consecutive = false;
259 struct auxtrace_buffer *b;
260 int err;
261
262 while (sz > BUFFER_LIMIT_FOR_32_BIT) {
263 b = memdup(buffer, sizeof(struct auxtrace_buffer));
264 if (!b)
265 return -ENOMEM;
266 b->size = BUFFER_LIMIT_FOR_32_BIT;
267 b->consecutive = consecutive;
268 err = auxtrace_queues__add_buffer(queues, idx, b);
269 if (err) {
270 auxtrace_buffer__free(b);
271 return err;
272 }
273 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
274 sz -= BUFFER_LIMIT_FOR_32_BIT;
275 consecutive = true;
276 }
277
278 buffer->size = sz;
279 buffer->consecutive = consecutive;
280
281 return 0;
282}
283
284static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues,
285 struct perf_session *session,
286 unsigned int idx,
287 struct auxtrace_buffer *buffer)
288{
289 if (session->one_mmap) {
290 buffer->data = buffer->data_offset - session->one_mmap_offset +
291 session->one_mmap_addr;
292 } else if (perf_data_file__is_pipe(session->file)) {
293 buffer->data = auxtrace_copy_data(buffer->size, session);
294 if (!buffer->data)
295 return -ENOMEM;
296 buffer->data_needs_freeing = true;
297 } else if (BITS_PER_LONG == 32 &&
298 buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
299 int err;
300
301 err = auxtrace_queues__split_buffer(queues, idx, buffer);
302 if (err)
303 return err;
304 }
305
306 return auxtrace_queues__add_buffer(queues, idx, buffer);
307}
308
309int auxtrace_queues__add_event(struct auxtrace_queues *queues,
310 struct perf_session *session,
311 union perf_event *event, off_t data_offset,
312 struct auxtrace_buffer **buffer_ptr)
313{
314 struct auxtrace_buffer *buffer;
315 unsigned int idx;
316 int err;
317
318 buffer = zalloc(sizeof(struct auxtrace_buffer));
319 if (!buffer)
320 return -ENOMEM;
321
322 buffer->pid = -1;
323 buffer->tid = event->auxtrace.tid;
324 buffer->cpu = event->auxtrace.cpu;
325 buffer->data_offset = data_offset;
326 buffer->offset = event->auxtrace.offset;
327 buffer->reference = event->auxtrace.reference;
328 buffer->size = event->auxtrace.size;
329 idx = event->auxtrace.idx;
330
331 err = auxtrace_queues__add_event_buffer(queues, session, idx, buffer);
332 if (err)
333 goto out_err;
334
335 if (buffer_ptr)
336 *buffer_ptr = buffer;
337
338 return 0;
339
340out_err:
341 auxtrace_buffer__free(buffer);
342 return err;
343}
344
345void auxtrace_queues__free(struct auxtrace_queues *queues)
346{
347 unsigned int i;
348
349 for (i = 0; i < queues->nr_queues; i++) {
350 while (!list_empty(&queues->queue_array[i].head)) {
351 struct auxtrace_buffer *buffer;
352
353 buffer = list_entry(queues->queue_array[i].head.next,
354 struct auxtrace_buffer, list);
355 list_del(&buffer->list);
356 auxtrace_buffer__free(buffer);
357 }
358 }
359
360 zfree(&queues->queue_array);
361 queues->nr_queues = 0;
362}
363
Adrian Hunterf9397152015-04-09 18:53:52 +0300364static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
365 unsigned int pos, unsigned int queue_nr,
366 u64 ordinal)
367{
368 unsigned int parent;
369
370 while (pos) {
371 parent = (pos - 1) >> 1;
372 if (heap_array[parent].ordinal <= ordinal)
373 break;
374 heap_array[pos] = heap_array[parent];
375 pos = parent;
376 }
377 heap_array[pos].queue_nr = queue_nr;
378 heap_array[pos].ordinal = ordinal;
379}
380
381int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
382 u64 ordinal)
383{
384 struct auxtrace_heap_item *heap_array;
385
386 if (queue_nr >= heap->heap_sz) {
387 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
388
389 while (heap_sz <= queue_nr)
390 heap_sz <<= 1;
391 heap_array = realloc(heap->heap_array,
392 heap_sz * sizeof(struct auxtrace_heap_item));
393 if (!heap_array)
394 return -ENOMEM;
395 heap->heap_array = heap_array;
396 heap->heap_sz = heap_sz;
397 }
398
399 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
400
401 return 0;
402}
403
404void auxtrace_heap__free(struct auxtrace_heap *heap)
405{
406 zfree(&heap->heap_array);
407 heap->heap_cnt = 0;
408 heap->heap_sz = 0;
409}
410
411void auxtrace_heap__pop(struct auxtrace_heap *heap)
412{
413 unsigned int pos, last, heap_cnt = heap->heap_cnt;
414 struct auxtrace_heap_item *heap_array;
415
416 if (!heap_cnt)
417 return;
418
419 heap->heap_cnt -= 1;
420
421 heap_array = heap->heap_array;
422
423 pos = 0;
424 while (1) {
425 unsigned int left, right;
426
427 left = (pos << 1) + 1;
428 if (left >= heap_cnt)
429 break;
430 right = left + 1;
431 if (right >= heap_cnt) {
432 heap_array[pos] = heap_array[left];
433 return;
434 }
435 if (heap_array[left].ordinal < heap_array[right].ordinal) {
436 heap_array[pos] = heap_array[left];
437 pos = left;
438 } else {
439 heap_array[pos] = heap_array[right];
440 pos = right;
441 }
442 }
443
444 last = heap_cnt - 1;
445 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
446 heap_array[last].ordinal);
447}
448
Adrian Hunter9e0cc4f2015-04-09 18:53:44 +0300449size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr)
450{
451 if (itr)
452 return itr->info_priv_size(itr);
453 return 0;
454}
455
456static int auxtrace_not_supported(void)
457{
458 pr_err("AUX area tracing is not supported on this architecture\n");
459 return -EINVAL;
460}
461
462int auxtrace_record__info_fill(struct auxtrace_record *itr,
463 struct perf_session *session,
464 struct auxtrace_info_event *auxtrace_info,
465 size_t priv_size)
466{
467 if (itr)
468 return itr->info_fill(itr, session, auxtrace_info, priv_size);
469 return auxtrace_not_supported();
470}
471
472void auxtrace_record__free(struct auxtrace_record *itr)
473{
474 if (itr)
475 itr->free(itr);
476}
477
478int auxtrace_record__options(struct auxtrace_record *itr,
479 struct perf_evlist *evlist,
480 struct record_opts *opts)
481{
482 if (itr)
483 return itr->recording_options(itr, evlist, opts);
484 return 0;
485}
486
487u64 auxtrace_record__reference(struct auxtrace_record *itr)
488{
489 if (itr)
490 return itr->reference(itr);
491 return 0;
492}
493
494struct auxtrace_record *__weak
495auxtrace_record__init(struct perf_evlist *evlist __maybe_unused, int *err)
496{
497 *err = 0;
498 return NULL;
499}
500
Adrian Huntere5027892015-04-21 12:21:51 +0300501struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
502 struct auxtrace_buffer *buffer)
503{
504 if (buffer) {
505 if (list_is_last(&buffer->list, &queue->head))
506 return NULL;
507 return list_entry(buffer->list.next, struct auxtrace_buffer,
508 list);
509 } else {
510 if (list_empty(&queue->head))
511 return NULL;
512 return list_entry(queue->head.next, struct auxtrace_buffer,
513 list);
514 }
515}
516
517void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
518{
519 size_t adj = buffer->data_offset & (page_size - 1);
520 size_t size = buffer->size + adj;
521 off_t file_offset = buffer->data_offset - adj;
522 void *addr;
523
524 if (buffer->data)
525 return buffer->data;
526
527 addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset);
528 if (addr == MAP_FAILED)
529 return NULL;
530
531 buffer->mmap_addr = addr;
532 buffer->mmap_size = size;
533
534 buffer->data = addr + adj;
535
536 return buffer->data;
537}
538
539void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
540{
541 if (!buffer->data || !buffer->mmap_addr)
542 return;
543 munmap(buffer->mmap_addr, buffer->mmap_size);
544 buffer->mmap_addr = NULL;
545 buffer->mmap_size = 0;
546 buffer->data = NULL;
547 buffer->use_data = NULL;
548}
549
550void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
551{
552 auxtrace_buffer__put_data(buffer);
553 if (buffer->data_needs_freeing) {
554 buffer->data_needs_freeing = false;
555 zfree(&buffer->data);
556 buffer->use_data = NULL;
557 buffer->size = 0;
558 }
559}
560
561void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
562{
563 auxtrace_buffer__drop_data(buffer);
564 free(buffer);
565}
566
Adrian Hunter85ed4722015-04-09 18:53:50 +0300567void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
568 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
569 const char *msg)
570{
571 size_t size;
572
573 memset(auxtrace_error, 0, sizeof(struct auxtrace_error_event));
574
575 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
576 auxtrace_error->type = type;
577 auxtrace_error->code = code;
578 auxtrace_error->cpu = cpu;
579 auxtrace_error->pid = pid;
580 auxtrace_error->tid = tid;
581 auxtrace_error->ip = ip;
582 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
583
584 size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
585 strlen(auxtrace_error->msg) + 1;
586 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
587}
588
Adrian Hunter9e0cc4f2015-04-09 18:53:44 +0300589int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
590 struct perf_tool *tool,
591 struct perf_session *session,
592 perf_event__handler_t process)
593{
594 union perf_event *ev;
595 size_t priv_size;
596 int err;
597
598 pr_debug2("Synthesizing auxtrace information\n");
599 priv_size = auxtrace_record__info_priv_size(itr);
600 ev = zalloc(sizeof(struct auxtrace_info_event) + priv_size);
601 if (!ev)
602 return -ENOMEM;
603
604 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
605 ev->auxtrace_info.header.size = sizeof(struct auxtrace_info_event) +
606 priv_size;
607 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
608 priv_size);
609 if (err)
610 goto out_free;
611
612 err = process(tool, ev, NULL, NULL);
613out_free:
614 free(ev);
615 return err;
616}
617
Adrian Hunterf6986c952015-04-09 18:53:49 +0300618#define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
619#define PERF_ITRACE_DEFAULT_PERIOD 100000
620#define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
621#define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
622
623void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
624{
625 synth_opts->instructions = true;
626 synth_opts->branches = true;
627 synth_opts->errors = true;
628 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
629 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
630 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
631}
632
633/*
634 * Please check tools/perf/Documentation/perf-script.txt for information
635 * about the options parsed here, which is introduced after this cset,
636 * when support in 'perf script' for these options is introduced.
637 */
638int itrace_parse_synth_opts(const struct option *opt, const char *str,
639 int unset)
640{
641 struct itrace_synth_opts *synth_opts = opt->value;
642 const char *p;
643 char *endptr;
644
645 synth_opts->set = true;
646
647 if (unset) {
648 synth_opts->dont_decode = true;
649 return 0;
650 }
651
652 if (!str) {
653 itrace_synth_opts__set_default(synth_opts);
654 return 0;
655 }
656
657 for (p = str; *p;) {
658 switch (*p++) {
659 case 'i':
660 synth_opts->instructions = true;
661 while (*p == ' ' || *p == ',')
662 p += 1;
663 if (isdigit(*p)) {
664 synth_opts->period = strtoull(p, &endptr, 10);
665 p = endptr;
666 while (*p == ' ' || *p == ',')
667 p += 1;
668 switch (*p++) {
669 case 'i':
670 synth_opts->period_type =
671 PERF_ITRACE_PERIOD_INSTRUCTIONS;
672 break;
673 case 't':
674 synth_opts->period_type =
675 PERF_ITRACE_PERIOD_TICKS;
676 break;
677 case 'm':
678 synth_opts->period *= 1000;
679 /* Fall through */
680 case 'u':
681 synth_opts->period *= 1000;
682 /* Fall through */
683 case 'n':
684 if (*p++ != 's')
685 goto out_err;
686 synth_opts->period_type =
687 PERF_ITRACE_PERIOD_NANOSECS;
688 break;
689 case '\0':
690 goto out;
691 default:
692 goto out_err;
693 }
694 }
695 break;
696 case 'b':
697 synth_opts->branches = true;
698 break;
699 case 'e':
700 synth_opts->errors = true;
701 break;
702 case 'd':
703 synth_opts->log = true;
704 break;
705 case 'c':
706 synth_opts->branches = true;
707 synth_opts->calls = true;
708 break;
709 case 'r':
710 synth_opts->branches = true;
711 synth_opts->returns = true;
712 break;
713 case 'g':
714 synth_opts->instructions = true;
715 synth_opts->callchain = true;
716 synth_opts->callchain_sz =
717 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
718 while (*p == ' ' || *p == ',')
719 p += 1;
720 if (isdigit(*p)) {
721 unsigned int val;
722
723 val = strtoul(p, &endptr, 10);
724 p = endptr;
725 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
726 goto out_err;
727 synth_opts->callchain_sz = val;
728 }
729 break;
730 case ' ':
731 case ',':
732 break;
733 default:
734 goto out_err;
735 }
736 }
737out:
738 if (synth_opts->instructions) {
739 if (!synth_opts->period_type)
740 synth_opts->period_type =
741 PERF_ITRACE_DEFAULT_PERIOD_TYPE;
742 if (!synth_opts->period)
743 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
744 }
745
746 return 0;
747
748out_err:
749 pr_err("Bad Instruction Tracing options '%s'\n", str);
750 return -EINVAL;
751}
752
Adrian Hunter85ed4722015-04-09 18:53:50 +0300753static const char * const auxtrace_error_type_name[] = {
754 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
755};
756
757static const char *auxtrace_error_name(int type)
758{
759 const char *error_type_name = NULL;
760
761 if (type < PERF_AUXTRACE_ERROR_MAX)
762 error_type_name = auxtrace_error_type_name[type];
763 if (!error_type_name)
764 error_type_name = "unknown AUX";
765 return error_type_name;
766}
767
768size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
769{
770 struct auxtrace_error_event *e = &event->auxtrace_error;
771 int ret;
772
773 ret = fprintf(fp, " %s error type %u",
774 auxtrace_error_name(e->type), e->type);
775 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRIx64" code %u: %s\n",
776 e->cpu, e->pid, e->tid, e->ip, e->code, e->msg);
777 return ret;
778}
779
780void perf_session__auxtrace_error_inc(struct perf_session *session,
781 union perf_event *event)
782{
783 struct auxtrace_error_event *e = &event->auxtrace_error;
784
785 if (e->type < PERF_AUXTRACE_ERROR_MAX)
786 session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
787}
788
789void events_stats__auxtrace_error_warn(const struct events_stats *stats)
790{
791 int i;
792
793 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
794 if (!stats->nr_auxtrace_errors[i])
795 continue;
796 ui__warning("%u %s errors\n",
797 stats->nr_auxtrace_errors[i],
798 auxtrace_error_name(i));
799 }
800}
801
802int perf_event__process_auxtrace_error(struct perf_tool *tool __maybe_unused,
803 union perf_event *event,
804 struct perf_session *session __maybe_unused)
805{
806 perf_event__fprintf_auxtrace_error(event, stdout);
807 return 0;
808}
809
Adrian Hunter9e0cc4f2015-04-09 18:53:44 +0300810int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr,
811 struct perf_tool *tool, process_auxtrace_t fn)
812{
813 u64 head = auxtrace_mmap__read_head(mm);
814 u64 old = mm->prev, offset, ref;
815 unsigned char *data = mm->base;
816 size_t size, head_off, old_off, len1, len2, padding;
817 union perf_event ev;
818 void *data1, *data2;
819
820 if (old == head)
821 return 0;
822
823 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
824 mm->idx, old, head, head - old);
825
826 if (mm->mask) {
827 head_off = head & mm->mask;
828 old_off = old & mm->mask;
829 } else {
830 head_off = head % mm->len;
831 old_off = old % mm->len;
832 }
833
834 if (head_off > old_off)
835 size = head_off - old_off;
836 else
837 size = mm->len - (old_off - head_off);
838
839 ref = auxtrace_record__reference(itr);
840
841 if (head > old || size <= head || mm->mask) {
842 offset = head - size;
843 } else {
844 /*
845 * When the buffer size is not a power of 2, 'head' wraps at the
846 * highest multiple of the buffer size, so we have to subtract
847 * the remainder here.
848 */
849 u64 rem = (0ULL - mm->len) % mm->len;
850
851 offset = head - size - rem;
852 }
853
854 if (size > head_off) {
855 len1 = size - head_off;
856 data1 = &data[mm->len - len1];
857 len2 = head_off;
858 data2 = &data[0];
859 } else {
860 len1 = size;
861 data1 = &data[head_off - len1];
862 len2 = 0;
863 data2 = NULL;
864 }
865
866 /* padding must be written by fn() e.g. record__process_auxtrace() */
867 padding = size & 7;
868 if (padding)
869 padding = 8 - padding;
870
871 memset(&ev, 0, sizeof(ev));
872 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
873 ev.auxtrace.header.size = sizeof(ev.auxtrace);
874 ev.auxtrace.size = size + padding;
875 ev.auxtrace.offset = offset;
876 ev.auxtrace.reference = ref;
877 ev.auxtrace.idx = mm->idx;
878 ev.auxtrace.tid = mm->tid;
879 ev.auxtrace.cpu = mm->cpu;
880
881 if (fn(tool, &ev, data1, len1, data2, len2))
882 return -1;
883
884 mm->prev = head;
885
886 auxtrace_mmap__write_tail(mm, head);
887 if (itr->read_finish) {
888 int err;
889
890 err = itr->read_finish(itr, mm->idx);
891 if (err < 0)
892 return err;
893 }
894
895 return 1;
896}