blob: 9a4f9cdb752fea088e3868f1fee0c447c968ac22 [file] [log] [blame]
Adrian Hunter90e457f2015-07-17 19:33:41 +03001/*
2 * intel_pt.c: Intel Processor Trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030016#include <inttypes.h>
Adrian Hunter90e457f2015-07-17 19:33:41 +030017#include <stdio.h>
18#include <stdbool.h>
19#include <errno.h>
20#include <linux/kernel.h>
21#include <linux/types.h>
22
23#include "../perf.h"
24#include "session.h"
25#include "machine.h"
Arnaldo Carvalho de Melo98521b32017-04-25 15:45:35 -030026#include "memswap.h"
Adrian Hunterf14445e2015-09-25 16:15:45 +030027#include "sort.h"
Adrian Hunter90e457f2015-07-17 19:33:41 +030028#include "tool.h"
29#include "event.h"
30#include "evlist.h"
31#include "evsel.h"
32#include "map.h"
33#include "color.h"
34#include "util.h"
35#include "thread.h"
36#include "thread-stack.h"
37#include "symbol.h"
38#include "callchain.h"
39#include "dso.h"
40#include "debug.h"
41#include "auxtrace.h"
42#include "tsc.h"
43#include "intel-pt.h"
Taeung Song41840d22016-06-23 17:55:17 +090044#include "config.h"
Adrian Hunter90e457f2015-07-17 19:33:41 +030045
46#include "intel-pt-decoder/intel-pt-log.h"
47#include "intel-pt-decoder/intel-pt-decoder.h"
48#include "intel-pt-decoder/intel-pt-insn-decoder.h"
49#include "intel-pt-decoder/intel-pt-pkt-decoder.h"
50
51#define MAX_TIMESTAMP (~0ULL)
52
53struct intel_pt {
54 struct auxtrace auxtrace;
55 struct auxtrace_queues queues;
56 struct auxtrace_heap heap;
57 u32 auxtrace_type;
58 struct perf_session *session;
59 struct machine *machine;
60 struct perf_evsel *switch_evsel;
61 struct thread *unknown_thread;
62 bool timeless_decoding;
63 bool sampling_mode;
64 bool snapshot_mode;
65 bool per_cpu_mmaps;
66 bool have_tsc;
67 bool data_queued;
68 bool est_tsc;
69 bool sync_switch;
Adrian Hunterba11ba62015-09-25 16:15:56 +030070 bool mispred_all;
Adrian Hunter90e457f2015-07-17 19:33:41 +030071 int have_sched_switch;
72 u32 pmu_type;
73 u64 kernel_start;
74 u64 switch_ip;
75 u64 ptss_ip;
76
77 struct perf_tsc_conversion tc;
78 bool cap_user_time_zero;
79
80 struct itrace_synth_opts synth_opts;
81
82 bool sample_instructions;
83 u64 instructions_sample_type;
Adrian Hunter90e457f2015-07-17 19:33:41 +030084 u64 instructions_id;
85
86 bool sample_branches;
87 u32 branches_filter;
88 u64 branches_sample_type;
89 u64 branches_id;
90
91 bool sample_transactions;
92 u64 transactions_sample_type;
93 u64 transactions_id;
94
Adrian Hunter37973072017-06-30 11:36:45 +030095 bool sample_ptwrites;
96 u64 ptwrites_sample_type;
97 u64 ptwrites_id;
98
99 bool sample_pwr_events;
100 u64 pwr_events_sample_type;
101 u64 mwait_id;
102 u64 pwre_id;
103 u64 exstop_id;
104 u64 pwrx_id;
105 u64 cbr_id;
106
Adrian Hunter90e457f2015-07-17 19:33:41 +0300107 u64 tsc_bit;
Adrian Hunter11fa7cb2015-07-17 19:33:54 +0300108 u64 mtc_bit;
109 u64 mtc_freq_bits;
110 u32 tsc_ctc_ratio_n;
111 u32 tsc_ctc_ratio_d;
112 u64 cyc_bit;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300113 u64 noretcomp_bit;
114 unsigned max_non_turbo_ratio;
Adrian Hunter37973072017-06-30 11:36:45 +0300115 unsigned cbr2khz;
Andi Kleend1706b32016-03-28 10:45:38 -0700116
117 unsigned long num_events;
Adrian Hunter2b9e32c2016-09-23 17:38:46 +0300118
119 char *filter;
Adrian Hunter2acee102016-09-23 17:38:48 +0300120 struct addr_filters filts;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300121};
122
123enum switch_state {
124 INTEL_PT_SS_NOT_TRACING,
125 INTEL_PT_SS_UNKNOWN,
126 INTEL_PT_SS_TRACING,
127 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
128 INTEL_PT_SS_EXPECTING_SWITCH_IP,
129};
130
131struct intel_pt_queue {
132 struct intel_pt *pt;
133 unsigned int queue_nr;
134 struct auxtrace_buffer *buffer;
Adrian Hunter9c665062018-03-07 16:02:27 +0200135 struct auxtrace_buffer *old_buffer;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300136 void *decoder;
137 const struct intel_pt_state *state;
138 struct ip_callchain *chain;
Adrian Hunterf14445e2015-09-25 16:15:45 +0300139 struct branch_stack *last_branch;
140 struct branch_stack *last_branch_rb;
141 size_t last_branch_pos;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300142 union perf_event *event_buf;
143 bool on_heap;
144 bool stop;
145 bool step_through_buffers;
146 bool use_buffer_pid_tid;
Adrian Hunter63d8e382018-03-07 16:02:22 +0200147 bool sync_switch;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300148 pid_t pid, tid;
149 int cpu;
150 int switch_state;
151 pid_t next_tid;
152 struct thread *thread;
153 bool exclude_kernel;
154 bool have_sample;
155 u64 time;
156 u64 timestamp;
157 u32 flags;
158 u16 insn_len;
Adrian Hunter2a21d032015-07-17 19:33:48 +0300159 u64 last_insn_cnt;
Andi Kleenfaaa8762016-10-07 16:42:26 +0300160 char insn[INTEL_PT_INSN_BUF_SZ];
Adrian Hunter90e457f2015-07-17 19:33:41 +0300161};
162
163static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
164 unsigned char *buf, size_t len)
165{
166 struct intel_pt_pkt packet;
167 size_t pos = 0;
168 int ret, pkt_len, i;
169 char desc[INTEL_PT_PKT_DESC_MAX];
170 const char *color = PERF_COLOR_BLUE;
171
172 color_fprintf(stdout, color,
173 ". ... Intel Processor Trace data: size %zu bytes\n",
174 len);
175
176 while (len) {
177 ret = intel_pt_get_packet(buf, len, &packet);
178 if (ret > 0)
179 pkt_len = ret;
180 else
181 pkt_len = 1;
182 printf(".");
183 color_fprintf(stdout, color, " %08x: ", pos);
184 for (i = 0; i < pkt_len; i++)
185 color_fprintf(stdout, color, " %02x", buf[i]);
186 for (; i < 16; i++)
187 color_fprintf(stdout, color, " ");
188 if (ret > 0) {
189 ret = intel_pt_pkt_desc(&packet, desc,
190 INTEL_PT_PKT_DESC_MAX);
191 if (ret > 0)
192 color_fprintf(stdout, color, " %s\n", desc);
193 } else {
194 color_fprintf(stdout, color, " Bad packet!\n");
195 }
196 pos += pkt_len;
197 buf += pkt_len;
198 len -= pkt_len;
199 }
200}
201
202static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
203 size_t len)
204{
205 printf(".\n");
206 intel_pt_dump(pt, buf, len);
207}
208
209static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
210 struct auxtrace_buffer *b)
211{
Adrian Hunter117db4b2018-03-07 16:02:21 +0200212 bool consecutive = false;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300213 void *start;
214
215 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
Adrian Hunter117db4b2018-03-07 16:02:21 +0200216 pt->have_tsc, &consecutive);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300217 if (!start)
218 return -EINVAL;
219 b->use_size = b->data + b->size - start;
220 b->use_data = start;
Adrian Hunter117db4b2018-03-07 16:02:21 +0200221 if (b->use_size && consecutive)
222 b->consecutive = true;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300223 return 0;
224}
225
Adrian Hunter90e457f2015-07-17 19:33:41 +0300226/* This function assumes data is processed sequentially only */
227static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
228{
229 struct intel_pt_queue *ptq = data;
Adrian Hunter9c665062018-03-07 16:02:27 +0200230 struct auxtrace_buffer *buffer = ptq->buffer;
231 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300232 struct auxtrace_queue *queue;
233
234 if (ptq->stop) {
235 b->len = 0;
236 return 0;
237 }
238
239 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
Adrian Hunter9c665062018-03-07 16:02:27 +0200240
Adrian Hunter90e457f2015-07-17 19:33:41 +0300241 buffer = auxtrace_buffer__next(queue, buffer);
242 if (!buffer) {
243 if (old_buffer)
244 auxtrace_buffer__drop_data(old_buffer);
245 b->len = 0;
246 return 0;
247 }
248
249 ptq->buffer = buffer;
250
251 if (!buffer->data) {
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100252 int fd = perf_data__fd(ptq->pt->session->data);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300253
254 buffer->data = auxtrace_buffer__get_data(buffer, fd);
255 if (!buffer->data)
256 return -ENOMEM;
257 }
258
259 if (ptq->pt->snapshot_mode && !buffer->consecutive && old_buffer &&
260 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
261 return -ENOMEM;
262
Adrian Hunter90e457f2015-07-17 19:33:41 +0300263 if (buffer->use_data) {
264 b->len = buffer->use_size;
265 b->buf = buffer->use_data;
266 } else {
267 b->len = buffer->size;
268 b->buf = buffer->data;
269 }
270 b->ref_timestamp = buffer->reference;
271
272 if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode &&
273 !buffer->consecutive)) {
274 b->consecutive = false;
275 b->trace_nr = buffer->buffer_nr + 1;
276 } else {
277 b->consecutive = true;
278 }
279
Adrian Hunter90e457f2015-07-17 19:33:41 +0300280 if (ptq->step_through_buffers)
281 ptq->stop = true;
282
Adrian Hunter9c665062018-03-07 16:02:27 +0200283 if (b->len) {
284 if (old_buffer)
285 auxtrace_buffer__drop_data(old_buffer);
286 ptq->old_buffer = buffer;
287 } else {
288 auxtrace_buffer__drop_data(buffer);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300289 return intel_pt_get_trace(b, data);
Adrian Hunter9c665062018-03-07 16:02:27 +0200290 }
Adrian Hunter90e457f2015-07-17 19:33:41 +0300291
292 return 0;
293}
294
295struct intel_pt_cache_entry {
296 struct auxtrace_cache_entry entry;
297 u64 insn_cnt;
298 u64 byte_cnt;
299 enum intel_pt_insn_op op;
300 enum intel_pt_insn_branch branch;
301 int length;
302 int32_t rel;
Andi Kleenfaaa8762016-10-07 16:42:26 +0300303 char insn[INTEL_PT_INSN_BUF_SZ];
Adrian Hunter90e457f2015-07-17 19:33:41 +0300304};
305
306static int intel_pt_config_div(const char *var, const char *value, void *data)
307{
308 int *d = data;
309 long val;
310
311 if (!strcmp(var, "intel-pt.cache-divisor")) {
312 val = strtol(value, NULL, 0);
313 if (val > 0 && val <= INT_MAX)
314 *d = val;
315 }
316
317 return 0;
318}
319
320static int intel_pt_cache_divisor(void)
321{
322 static int d;
323
324 if (d)
325 return d;
326
327 perf_config(intel_pt_config_div, &d);
328
329 if (!d)
330 d = 64;
331
332 return d;
333}
334
335static unsigned int intel_pt_cache_size(struct dso *dso,
336 struct machine *machine)
337{
338 off_t size;
339
340 size = dso__data_size(dso, machine);
341 size /= intel_pt_cache_divisor();
342 if (size < 1000)
343 return 10;
344 if (size > (1 << 21))
345 return 21;
346 return 32 - __builtin_clz(size);
347}
348
349static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
350 struct machine *machine)
351{
352 struct auxtrace_cache *c;
353 unsigned int bits;
354
355 if (dso->auxtrace_cache)
356 return dso->auxtrace_cache;
357
358 bits = intel_pt_cache_size(dso, machine);
359
360 /* Ignoring cache creation failure */
361 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
362
363 dso->auxtrace_cache = c;
364
365 return c;
366}
367
368static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
369 u64 offset, u64 insn_cnt, u64 byte_cnt,
370 struct intel_pt_insn *intel_pt_insn)
371{
372 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
373 struct intel_pt_cache_entry *e;
374 int err;
375
376 if (!c)
377 return -ENOMEM;
378
379 e = auxtrace_cache__alloc_entry(c);
380 if (!e)
381 return -ENOMEM;
382
383 e->insn_cnt = insn_cnt;
384 e->byte_cnt = byte_cnt;
385 e->op = intel_pt_insn->op;
386 e->branch = intel_pt_insn->branch;
387 e->length = intel_pt_insn->length;
388 e->rel = intel_pt_insn->rel;
Andi Kleenfaaa8762016-10-07 16:42:26 +0300389 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300390
391 err = auxtrace_cache__add(c, offset, &e->entry);
392 if (err)
393 auxtrace_cache__free_entry(c, e);
394
395 return err;
396}
397
398static struct intel_pt_cache_entry *
399intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
400{
401 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
402
403 if (!c)
404 return NULL;
405
406 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
407}
408
409static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
410 uint64_t *insn_cnt_ptr, uint64_t *ip,
411 uint64_t to_ip, uint64_t max_insn_cnt,
412 void *data)
413{
414 struct intel_pt_queue *ptq = data;
415 struct machine *machine = ptq->pt->machine;
416 struct thread *thread;
417 struct addr_location al;
Adrian Hunter32f98aa2016-10-07 16:42:25 +0300418 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
Adrian Hunter90e457f2015-07-17 19:33:41 +0300419 ssize_t len;
420 int x86_64;
421 u8 cpumode;
422 u64 offset, start_offset, start_ip;
423 u64 insn_cnt = 0;
424 bool one_map = true;
425
Andi Kleenfaaa8762016-10-07 16:42:26 +0300426 intel_pt_insn->length = 0;
427
Adrian Hunter90e457f2015-07-17 19:33:41 +0300428 if (to_ip && *ip == to_ip)
429 goto out_no_cache;
430
Adrian Hunter90e457f2015-07-17 19:33:41 +0300431 if (*ip >= ptq->pt->kernel_start)
432 cpumode = PERF_RECORD_MISC_KERNEL;
433 else
434 cpumode = PERF_RECORD_MISC_USER;
435
436 thread = ptq->thread;
437 if (!thread) {
438 if (cpumode != PERF_RECORD_MISC_KERNEL)
439 return -EINVAL;
440 thread = ptq->pt->unknown_thread;
441 }
442
443 while (1) {
444 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, *ip, &al);
445 if (!al.map || !al.map->dso)
446 return -EINVAL;
447
448 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
449 dso__data_status_seen(al.map->dso,
450 DSO_DATA_STATUS_SEEN_ITRACE))
451 return -ENOENT;
452
453 offset = al.map->map_ip(al.map, *ip);
454
455 if (!to_ip && one_map) {
456 struct intel_pt_cache_entry *e;
457
458 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
459 if (e &&
460 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
461 *insn_cnt_ptr = e->insn_cnt;
462 *ip += e->byte_cnt;
463 intel_pt_insn->op = e->op;
464 intel_pt_insn->branch = e->branch;
465 intel_pt_insn->length = e->length;
466 intel_pt_insn->rel = e->rel;
Andi Kleenfaaa8762016-10-07 16:42:26 +0300467 memcpy(intel_pt_insn->buf, e->insn,
468 INTEL_PT_INSN_BUF_SZ);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300469 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
470 return 0;
471 }
472 }
473
474 start_offset = offset;
475 start_ip = *ip;
476
477 /* Load maps to ensure dso->is_64_bit has been updated */
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -0300478 map__load(al.map);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300479
480 x86_64 = al.map->dso->is_64_bit;
481
482 while (1) {
483 len = dso__data_read_offset(al.map->dso, machine,
Adrian Hunter32f98aa2016-10-07 16:42:25 +0300484 offset, buf,
485 INTEL_PT_INSN_BUF_SZ);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300486 if (len <= 0)
487 return -EINVAL;
488
489 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
490 return -EINVAL;
491
492 intel_pt_log_insn(intel_pt_insn, *ip);
493
494 insn_cnt += 1;
495
496 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
497 goto out;
498
499 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
500 goto out_no_cache;
501
502 *ip += intel_pt_insn->length;
503
504 if (to_ip && *ip == to_ip)
505 goto out_no_cache;
506
507 if (*ip >= al.map->end)
508 break;
509
510 offset += intel_pt_insn->length;
511 }
512 one_map = false;
513 }
514out:
515 *insn_cnt_ptr = insn_cnt;
516
517 if (!one_map)
518 goto out_no_cache;
519
520 /*
521 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
522 * entries.
523 */
524 if (to_ip) {
525 struct intel_pt_cache_entry *e;
526
527 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
528 if (e)
529 return 0;
530 }
531
532 /* Ignore cache errors */
533 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
534 *ip - start_ip, intel_pt_insn);
535
536 return 0;
537
538out_no_cache:
539 *insn_cnt_ptr = insn_cnt;
540 return 0;
541}
542
Adrian Hunter2acee102016-09-23 17:38:48 +0300543static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
544 uint64_t offset, const char *filename)
545{
546 struct addr_filter *filt;
547 bool have_filter = false;
548 bool hit_tracestop = false;
549 bool hit_filter = false;
550
551 list_for_each_entry(filt, &pt->filts.head, list) {
552 if (filt->start)
553 have_filter = true;
554
555 if ((filename && !filt->filename) ||
556 (!filename && filt->filename) ||
557 (filename && strcmp(filename, filt->filename)))
558 continue;
559
560 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
561 continue;
562
563 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
564 ip, offset, filename ? filename : "[kernel]",
565 filt->start ? "filter" : "stop",
566 filt->addr, filt->size);
567
568 if (filt->start)
569 hit_filter = true;
570 else
571 hit_tracestop = true;
572 }
573
574 if (!hit_tracestop && !hit_filter)
575 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
576 ip, offset, filename ? filename : "[kernel]");
577
578 return hit_tracestop || (have_filter && !hit_filter);
579}
580
581static int __intel_pt_pgd_ip(uint64_t ip, void *data)
582{
583 struct intel_pt_queue *ptq = data;
584 struct thread *thread;
585 struct addr_location al;
586 u8 cpumode;
587 u64 offset;
588
589 if (ip >= ptq->pt->kernel_start)
590 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
591
592 cpumode = PERF_RECORD_MISC_USER;
593
594 thread = ptq->thread;
595 if (!thread)
596 return -EINVAL;
597
598 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al);
599 if (!al.map || !al.map->dso)
600 return -EINVAL;
601
602 offset = al.map->map_ip(al.map, ip);
603
604 return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
605 al.map->dso->long_name);
606}
607
608static bool intel_pt_pgd_ip(uint64_t ip, void *data)
609{
610 return __intel_pt_pgd_ip(ip, data) > 0;
611}
612
Adrian Hunter90e457f2015-07-17 19:33:41 +0300613static bool intel_pt_get_config(struct intel_pt *pt,
614 struct perf_event_attr *attr, u64 *config)
615{
616 if (attr->type == pt->pmu_type) {
617 if (config)
618 *config = attr->config;
619 return true;
620 }
621
622 return false;
623}
624
625static bool intel_pt_exclude_kernel(struct intel_pt *pt)
626{
627 struct perf_evsel *evsel;
628
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300629 evlist__for_each_entry(pt->session->evlist, evsel) {
Adrian Hunter90e457f2015-07-17 19:33:41 +0300630 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
631 !evsel->attr.exclude_kernel)
632 return false;
633 }
634 return true;
635}
636
637static bool intel_pt_return_compression(struct intel_pt *pt)
638{
639 struct perf_evsel *evsel;
640 u64 config;
641
642 if (!pt->noretcomp_bit)
643 return true;
644
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300645 evlist__for_each_entry(pt->session->evlist, evsel) {
Adrian Hunter90e457f2015-07-17 19:33:41 +0300646 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
647 (config & pt->noretcomp_bit))
648 return false;
649 }
650 return true;
651}
652
Adrian Hunter83959812017-05-26 11:17:11 +0300653static bool intel_pt_branch_enable(struct intel_pt *pt)
654{
655 struct perf_evsel *evsel;
656 u64 config;
657
658 evlist__for_each_entry(pt->session->evlist, evsel) {
659 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
660 (config & 1) && !(config & 0x2000))
661 return false;
662 }
663 return true;
664}
665
Adrian Hunter11fa7cb2015-07-17 19:33:54 +0300666static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
667{
668 struct perf_evsel *evsel;
669 unsigned int shift;
670 u64 config;
671
672 if (!pt->mtc_freq_bits)
673 return 0;
674
675 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
676 config >>= 1;
677
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300678 evlist__for_each_entry(pt->session->evlist, evsel) {
Adrian Hunter11fa7cb2015-07-17 19:33:54 +0300679 if (intel_pt_get_config(pt, &evsel->attr, &config))
680 return (config & pt->mtc_freq_bits) >> shift;
681 }
682 return 0;
683}
684
Adrian Hunter90e457f2015-07-17 19:33:41 +0300685static bool intel_pt_timeless_decoding(struct intel_pt *pt)
686{
687 struct perf_evsel *evsel;
688 bool timeless_decoding = true;
689 u64 config;
690
691 if (!pt->tsc_bit || !pt->cap_user_time_zero)
692 return true;
693
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300694 evlist__for_each_entry(pt->session->evlist, evsel) {
Adrian Hunter90e457f2015-07-17 19:33:41 +0300695 if (!(evsel->attr.sample_type & PERF_SAMPLE_TIME))
696 return true;
697 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
698 if (config & pt->tsc_bit)
699 timeless_decoding = false;
700 else
701 return true;
702 }
703 }
704 return timeless_decoding;
705}
706
707static bool intel_pt_tracing_kernel(struct intel_pt *pt)
708{
709 struct perf_evsel *evsel;
710
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300711 evlist__for_each_entry(pt->session->evlist, evsel) {
Adrian Hunter90e457f2015-07-17 19:33:41 +0300712 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
713 !evsel->attr.exclude_kernel)
714 return true;
715 }
716 return false;
717}
718
719static bool intel_pt_have_tsc(struct intel_pt *pt)
720{
721 struct perf_evsel *evsel;
722 bool have_tsc = false;
723 u64 config;
724
725 if (!pt->tsc_bit)
726 return false;
727
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300728 evlist__for_each_entry(pt->session->evlist, evsel) {
Adrian Hunter90e457f2015-07-17 19:33:41 +0300729 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
730 if (config & pt->tsc_bit)
731 have_tsc = true;
732 else
733 return false;
734 }
735 }
736 return have_tsc;
737}
738
739static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
740{
741 u64 quot, rem;
742
743 quot = ns / pt->tc.time_mult;
744 rem = ns % pt->tc.time_mult;
745 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
746 pt->tc.time_mult;
747}
748
749static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
750 unsigned int queue_nr)
751{
752 struct intel_pt_params params = { .get_trace = 0, };
753 struct intel_pt_queue *ptq;
754
755 ptq = zalloc(sizeof(struct intel_pt_queue));
756 if (!ptq)
757 return NULL;
758
759 if (pt->synth_opts.callchain) {
760 size_t sz = sizeof(struct ip_callchain);
761
762 sz += pt->synth_opts.callchain_sz * sizeof(u64);
763 ptq->chain = zalloc(sz);
764 if (!ptq->chain)
765 goto out_free;
766 }
767
Adrian Hunterf14445e2015-09-25 16:15:45 +0300768 if (pt->synth_opts.last_branch) {
769 size_t sz = sizeof(struct branch_stack);
770
771 sz += pt->synth_opts.last_branch_sz *
772 sizeof(struct branch_entry);
773 ptq->last_branch = zalloc(sz);
774 if (!ptq->last_branch)
775 goto out_free;
776 ptq->last_branch_rb = zalloc(sz);
777 if (!ptq->last_branch_rb)
778 goto out_free;
779 }
780
Adrian Hunter90e457f2015-07-17 19:33:41 +0300781 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
782 if (!ptq->event_buf)
783 goto out_free;
784
785 ptq->pt = pt;
786 ptq->queue_nr = queue_nr;
787 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
788 ptq->pid = -1;
789 ptq->tid = -1;
790 ptq->cpu = -1;
791 ptq->next_tid = -1;
792
793 params.get_trace = intel_pt_get_trace;
794 params.walk_insn = intel_pt_walk_next_insn;
795 params.data = ptq;
796 params.return_compression = intel_pt_return_compression(pt);
Adrian Hunter83959812017-05-26 11:17:11 +0300797 params.branch_enable = intel_pt_branch_enable(pt);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300798 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
Adrian Hunter11fa7cb2015-07-17 19:33:54 +0300799 params.mtc_period = intel_pt_mtc_period(pt);
800 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
801 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300802
Adrian Hunter2acee102016-09-23 17:38:48 +0300803 if (pt->filts.cnt > 0)
804 params.pgd_ip = intel_pt_pgd_ip;
805
Adrian Hunter90e457f2015-07-17 19:33:41 +0300806 if (pt->synth_opts.instructions) {
807 if (pt->synth_opts.period) {
808 switch (pt->synth_opts.period_type) {
809 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
810 params.period_type =
811 INTEL_PT_PERIOD_INSTRUCTIONS;
812 params.period = pt->synth_opts.period;
813 break;
814 case PERF_ITRACE_PERIOD_TICKS:
815 params.period_type = INTEL_PT_PERIOD_TICKS;
816 params.period = pt->synth_opts.period;
817 break;
818 case PERF_ITRACE_PERIOD_NANOSECS:
819 params.period_type = INTEL_PT_PERIOD_TICKS;
820 params.period = intel_pt_ns_to_ticks(pt,
821 pt->synth_opts.period);
822 break;
823 default:
824 break;
825 }
826 }
827
828 if (!params.period) {
829 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
Adrian Huntere1791342015-09-25 16:15:32 +0300830 params.period = 1;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300831 }
832 }
833
834 ptq->decoder = intel_pt_decoder_new(&params);
835 if (!ptq->decoder)
836 goto out_free;
837
838 return ptq;
839
840out_free:
841 zfree(&ptq->event_buf);
Adrian Hunterf14445e2015-09-25 16:15:45 +0300842 zfree(&ptq->last_branch);
843 zfree(&ptq->last_branch_rb);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300844 zfree(&ptq->chain);
845 free(ptq);
846 return NULL;
847}
848
849static void intel_pt_free_queue(void *priv)
850{
851 struct intel_pt_queue *ptq = priv;
852
853 if (!ptq)
854 return;
855 thread__zput(ptq->thread);
856 intel_pt_decoder_free(ptq->decoder);
857 zfree(&ptq->event_buf);
Adrian Hunterf14445e2015-09-25 16:15:45 +0300858 zfree(&ptq->last_branch);
859 zfree(&ptq->last_branch_rb);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300860 zfree(&ptq->chain);
861 free(ptq);
862}
863
864static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
865 struct auxtrace_queue *queue)
866{
867 struct intel_pt_queue *ptq = queue->priv;
868
869 if (queue->tid == -1 || pt->have_sched_switch) {
870 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
871 thread__zput(ptq->thread);
872 }
873
874 if (!ptq->thread && ptq->tid != -1)
875 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
876
877 if (ptq->thread) {
878 ptq->pid = ptq->thread->pid_;
879 if (queue->cpu == -1)
880 ptq->cpu = ptq->thread->cpu;
881 }
882}
883
884static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
885{
886 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
887 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
888 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
889 if (ptq->state->to_ip)
890 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
891 PERF_IP_FLAG_ASYNC |
892 PERF_IP_FLAG_INTERRUPT;
893 else
894 ptq->flags = PERF_IP_FLAG_BRANCH |
895 PERF_IP_FLAG_TRACE_END;
896 ptq->insn_len = 0;
897 } else {
898 if (ptq->state->from_ip)
899 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
900 else
901 ptq->flags = PERF_IP_FLAG_BRANCH |
902 PERF_IP_FLAG_TRACE_BEGIN;
903 if (ptq->state->flags & INTEL_PT_IN_TX)
904 ptq->flags |= PERF_IP_FLAG_IN_TX;
905 ptq->insn_len = ptq->state->insn_len;
Andi Kleenfaaa8762016-10-07 16:42:26 +0300906 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300907 }
908}
909
910static int intel_pt_setup_queue(struct intel_pt *pt,
911 struct auxtrace_queue *queue,
912 unsigned int queue_nr)
913{
914 struct intel_pt_queue *ptq = queue->priv;
915
916 if (list_empty(&queue->head))
917 return 0;
918
919 if (!ptq) {
920 ptq = intel_pt_alloc_queue(pt, queue_nr);
921 if (!ptq)
922 return -ENOMEM;
923 queue->priv = ptq;
924
925 if (queue->cpu != -1)
926 ptq->cpu = queue->cpu;
927 ptq->tid = queue->tid;
928
Adrian Hunter1c071c82018-03-07 16:02:26 +0200929 if (pt->sampling_mode && !pt->snapshot_mode &&
930 pt->timeless_decoding)
931 ptq->step_through_buffers = true;
Adrian Hunter63d8e382018-03-07 16:02:22 +0200932
933 ptq->sync_switch = pt->sync_switch;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300934 }
935
936 if (!ptq->on_heap &&
Adrian Hunter63d8e382018-03-07 16:02:22 +0200937 (!ptq->sync_switch ||
Adrian Hunter90e457f2015-07-17 19:33:41 +0300938 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
939 const struct intel_pt_state *state;
940 int ret;
941
942 if (pt->timeless_decoding)
943 return 0;
944
945 intel_pt_log("queue %u getting timestamp\n", queue_nr);
946 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
947 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
948 while (1) {
949 state = intel_pt_decode(ptq->decoder);
950 if (state->err) {
951 if (state->err == INTEL_PT_ERR_NODATA) {
952 intel_pt_log("queue %u has no timestamp\n",
953 queue_nr);
954 return 0;
955 }
956 continue;
957 }
958 if (state->timestamp)
959 break;
960 }
961
962 ptq->timestamp = state->timestamp;
963 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
964 queue_nr, ptq->timestamp);
965 ptq->state = state;
966 ptq->have_sample = true;
967 intel_pt_sample_flags(ptq);
968 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
969 if (ret)
970 return ret;
971 ptq->on_heap = true;
972 }
973
974 return 0;
975}
976
977static int intel_pt_setup_queues(struct intel_pt *pt)
978{
979 unsigned int i;
980 int ret;
981
982 for (i = 0; i < pt->queues.nr_queues; i++) {
983 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
984 if (ret)
985 return ret;
986 }
987 return 0;
988}
989
Adrian Hunterf14445e2015-09-25 16:15:45 +0300990static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue *ptq)
991{
992 struct branch_stack *bs_src = ptq->last_branch_rb;
993 struct branch_stack *bs_dst = ptq->last_branch;
994 size_t nr = 0;
995
996 bs_dst->nr = bs_src->nr;
997
998 if (!bs_src->nr)
999 return;
1000
1001 nr = ptq->pt->synth_opts.last_branch_sz - ptq->last_branch_pos;
1002 memcpy(&bs_dst->entries[0],
1003 &bs_src->entries[ptq->last_branch_pos],
1004 sizeof(struct branch_entry) * nr);
1005
1006 if (bs_src->nr >= ptq->pt->synth_opts.last_branch_sz) {
1007 memcpy(&bs_dst->entries[nr],
1008 &bs_src->entries[0],
1009 sizeof(struct branch_entry) * ptq->last_branch_pos);
1010 }
1011}
1012
1013static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue *ptq)
1014{
1015 ptq->last_branch_pos = 0;
1016 ptq->last_branch_rb->nr = 0;
1017}
1018
1019static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq)
1020{
1021 const struct intel_pt_state *state = ptq->state;
1022 struct branch_stack *bs = ptq->last_branch_rb;
1023 struct branch_entry *be;
1024
1025 if (!ptq->last_branch_pos)
1026 ptq->last_branch_pos = ptq->pt->synth_opts.last_branch_sz;
1027
1028 ptq->last_branch_pos -= 1;
1029
1030 be = &bs->entries[ptq->last_branch_pos];
1031 be->from = state->from_ip;
1032 be->to = state->to_ip;
1033 be->flags.abort = !!(state->flags & INTEL_PT_ABORT_TX);
1034 be->flags.in_tx = !!(state->flags & INTEL_PT_IN_TX);
1035 /* No support for mispredict */
Adrian Hunterba11ba62015-09-25 16:15:56 +03001036 be->flags.mispred = ptq->pt->mispred_all;
Adrian Hunterf14445e2015-09-25 16:15:45 +03001037
1038 if (bs->nr < ptq->pt->synth_opts.last_branch_sz)
1039 bs->nr += 1;
1040}
1041
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001042static inline bool intel_pt_skip_event(struct intel_pt *pt)
1043{
1044 return pt->synth_opts.initial_skip &&
1045 pt->num_events++ < pt->synth_opts.initial_skip;
1046}
1047
1048static void intel_pt_prep_b_sample(struct intel_pt *pt,
1049 struct intel_pt_queue *ptq,
1050 union perf_event *event,
1051 struct perf_sample *sample)
1052{
1053 event->sample.header.type = PERF_RECORD_SAMPLE;
1054 event->sample.header.misc = PERF_RECORD_MISC_USER;
1055 event->sample.header.size = sizeof(struct perf_event_header);
1056
1057 if (!pt->timeless_decoding)
1058 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1059
1060 sample->cpumode = PERF_RECORD_MISC_USER;
1061 sample->ip = ptq->state->from_ip;
1062 sample->pid = ptq->pid;
1063 sample->tid = ptq->tid;
1064 sample->addr = ptq->state->to_ip;
1065 sample->period = 1;
1066 sample->cpu = ptq->cpu;
1067 sample->flags = ptq->flags;
1068 sample->insn_len = ptq->insn_len;
1069 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1070}
1071
Adrian Hunter90e457f2015-07-17 19:33:41 +03001072static int intel_pt_inject_event(union perf_event *event,
Adrian Huntera10eb532018-01-16 15:14:50 +02001073 struct perf_sample *sample, u64 type)
Adrian Hunter90e457f2015-07-17 19:33:41 +03001074{
1075 event->header.size = perf_event__sample_event_size(sample, type, 0);
Adrian Hunter936f1f32018-01-16 15:14:52 +02001076 return perf_event__synthesize_sample(event, type, 0, sample);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001077}
1078
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001079static inline int intel_pt_opt_inject(struct intel_pt *pt,
1080 union perf_event *event,
1081 struct perf_sample *sample, u64 type)
1082{
1083 if (!pt->synth_opts.inject)
1084 return 0;
1085
Adrian Huntera10eb532018-01-16 15:14:50 +02001086 return intel_pt_inject_event(event, sample, type);
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001087}
1088
1089static int intel_pt_deliver_synth_b_event(struct intel_pt *pt,
1090 union perf_event *event,
1091 struct perf_sample *sample, u64 type)
Adrian Hunter90e457f2015-07-17 19:33:41 +03001092{
1093 int ret;
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001094
1095 ret = intel_pt_opt_inject(pt, event, sample, type);
1096 if (ret)
1097 return ret;
1098
1099 ret = perf_session__deliver_synth_event(pt->session, event, sample);
1100 if (ret)
1101 pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1102
1103 return ret;
1104}
1105
1106static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1107{
Adrian Hunter90e457f2015-07-17 19:33:41 +03001108 struct intel_pt *pt = ptq->pt;
1109 union perf_event *event = ptq->event_buf;
1110 struct perf_sample sample = { .ip = 0, };
Adrian Hunterf14445e2015-09-25 16:15:45 +03001111 struct dummy_branch_stack {
1112 u64 nr;
1113 struct branch_entry entries;
1114 } dummy_bs;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001115
Adrian Hunter385e3302015-09-25 16:15:44 +03001116 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1117 return 0;
1118
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001119 if (intel_pt_skip_event(pt))
Andi Kleend1706b32016-03-28 10:45:38 -07001120 return 0;
1121
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001122 intel_pt_prep_b_sample(pt, ptq, event, &sample);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001123
Adrian Hunter90e457f2015-07-17 19:33:41 +03001124 sample.id = ptq->pt->branches_id;
1125 sample.stream_id = ptq->pt->branches_id;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001126
Adrian Hunterf14445e2015-09-25 16:15:45 +03001127 /*
1128 * perf report cannot handle events without a branch stack when using
1129 * SORT_MODE__BRANCH so make a dummy one.
1130 */
1131 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1132 dummy_bs = (struct dummy_branch_stack){
1133 .nr = 1,
1134 .entries = {
1135 .from = sample.ip,
1136 .to = sample.addr,
1137 },
1138 };
1139 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1140 }
1141
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001142 return intel_pt_deliver_synth_b_event(pt, event, &sample,
1143 pt->branches_sample_type);
1144}
1145
1146static void intel_pt_prep_sample(struct intel_pt *pt,
1147 struct intel_pt_queue *ptq,
1148 union perf_event *event,
1149 struct perf_sample *sample)
1150{
1151 intel_pt_prep_b_sample(pt, ptq, event, sample);
1152
1153 if (pt->synth_opts.callchain) {
1154 thread_stack__sample(ptq->thread, ptq->chain,
1155 pt->synth_opts.callchain_sz, sample->ip);
1156 sample->callchain = ptq->chain;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001157 }
1158
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001159 if (pt->synth_opts.last_branch) {
1160 intel_pt_copy_last_branch_rb(ptq);
1161 sample->branch_stack = ptq->last_branch;
1162 }
1163}
1164
1165static inline int intel_pt_deliver_synth_event(struct intel_pt *pt,
1166 struct intel_pt_queue *ptq,
1167 union perf_event *event,
1168 struct perf_sample *sample,
1169 u64 type)
1170{
1171 int ret;
1172
1173 ret = intel_pt_deliver_synth_b_event(pt, event, sample, type);
1174
1175 if (pt->synth_opts.last_branch)
1176 intel_pt_reset_last_branch_rb(ptq);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001177
1178 return ret;
1179}
1180
1181static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1182{
Adrian Hunter90e457f2015-07-17 19:33:41 +03001183 struct intel_pt *pt = ptq->pt;
1184 union perf_event *event = ptq->event_buf;
1185 struct perf_sample sample = { .ip = 0, };
1186
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001187 if (intel_pt_skip_event(pt))
Andi Kleend1706b32016-03-28 10:45:38 -07001188 return 0;
1189
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001190 intel_pt_prep_sample(pt, ptq, event, &sample);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001191
Adrian Hunter90e457f2015-07-17 19:33:41 +03001192 sample.id = ptq->pt->instructions_id;
1193 sample.stream_id = ptq->pt->instructions_id;
Adrian Hunter2a21d032015-07-17 19:33:48 +03001194 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001195
Adrian Hunter2a21d032015-07-17 19:33:48 +03001196 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1197
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001198 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1199 pt->instructions_sample_type);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001200}
1201
1202static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1203{
Adrian Hunter90e457f2015-07-17 19:33:41 +03001204 struct intel_pt *pt = ptq->pt;
1205 union perf_event *event = ptq->event_buf;
1206 struct perf_sample sample = { .ip = 0, };
1207
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001208 if (intel_pt_skip_event(pt))
Andi Kleend1706b32016-03-28 10:45:38 -07001209 return 0;
1210
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001211 intel_pt_prep_sample(pt, ptq, event, &sample);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001212
Adrian Hunter90e457f2015-07-17 19:33:41 +03001213 sample.id = ptq->pt->transactions_id;
1214 sample.stream_id = ptq->pt->transactions_id;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001215
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001216 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1217 pt->transactions_sample_type);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001218}
1219
Adrian Hunter37973072017-06-30 11:36:45 +03001220static void intel_pt_prep_p_sample(struct intel_pt *pt,
1221 struct intel_pt_queue *ptq,
1222 union perf_event *event,
1223 struct perf_sample *sample)
1224{
1225 intel_pt_prep_sample(pt, ptq, event, sample);
1226
1227 /*
1228 * Zero IP is used to mean "trace start" but that is not the case for
1229 * power or PTWRITE events with no IP, so clear the flags.
1230 */
1231 if (!sample->ip)
1232 sample->flags = 0;
1233}
1234
1235static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1236{
1237 struct intel_pt *pt = ptq->pt;
1238 union perf_event *event = ptq->event_buf;
1239 struct perf_sample sample = { .ip = 0, };
1240 struct perf_synth_intel_ptwrite raw;
1241
1242 if (intel_pt_skip_event(pt))
1243 return 0;
1244
1245 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1246
1247 sample.id = ptq->pt->ptwrites_id;
1248 sample.stream_id = ptq->pt->ptwrites_id;
1249
1250 raw.flags = 0;
1251 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1252 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1253
1254 sample.raw_size = perf_synth__raw_size(raw);
1255 sample.raw_data = perf_synth__raw_data(&raw);
1256
1257 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1258 pt->ptwrites_sample_type);
1259}
1260
1261static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1262{
1263 struct intel_pt *pt = ptq->pt;
1264 union perf_event *event = ptq->event_buf;
1265 struct perf_sample sample = { .ip = 0, };
1266 struct perf_synth_intel_cbr raw;
1267 u32 flags;
1268
1269 if (intel_pt_skip_event(pt))
1270 return 0;
1271
1272 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1273
1274 sample.id = ptq->pt->cbr_id;
1275 sample.stream_id = ptq->pt->cbr_id;
1276
1277 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1278 raw.flags = cpu_to_le32(flags);
1279 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1280 raw.reserved3 = 0;
1281
1282 sample.raw_size = perf_synth__raw_size(raw);
1283 sample.raw_data = perf_synth__raw_data(&raw);
1284
1285 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1286 pt->pwr_events_sample_type);
1287}
1288
1289static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1290{
1291 struct intel_pt *pt = ptq->pt;
1292 union perf_event *event = ptq->event_buf;
1293 struct perf_sample sample = { .ip = 0, };
1294 struct perf_synth_intel_mwait raw;
1295
1296 if (intel_pt_skip_event(pt))
1297 return 0;
1298
1299 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1300
1301 sample.id = ptq->pt->mwait_id;
1302 sample.stream_id = ptq->pt->mwait_id;
1303
1304 raw.reserved = 0;
1305 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1306
1307 sample.raw_size = perf_synth__raw_size(raw);
1308 sample.raw_data = perf_synth__raw_data(&raw);
1309
1310 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1311 pt->pwr_events_sample_type);
1312}
1313
1314static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
1315{
1316 struct intel_pt *pt = ptq->pt;
1317 union perf_event *event = ptq->event_buf;
1318 struct perf_sample sample = { .ip = 0, };
1319 struct perf_synth_intel_pwre raw;
1320
1321 if (intel_pt_skip_event(pt))
1322 return 0;
1323
1324 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1325
1326 sample.id = ptq->pt->pwre_id;
1327 sample.stream_id = ptq->pt->pwre_id;
1328
1329 raw.reserved = 0;
1330 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
1331
1332 sample.raw_size = perf_synth__raw_size(raw);
1333 sample.raw_data = perf_synth__raw_data(&raw);
1334
1335 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1336 pt->pwr_events_sample_type);
1337}
1338
1339static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
1340{
1341 struct intel_pt *pt = ptq->pt;
1342 union perf_event *event = ptq->event_buf;
1343 struct perf_sample sample = { .ip = 0, };
1344 struct perf_synth_intel_exstop raw;
1345
1346 if (intel_pt_skip_event(pt))
1347 return 0;
1348
1349 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1350
1351 sample.id = ptq->pt->exstop_id;
1352 sample.stream_id = ptq->pt->exstop_id;
1353
1354 raw.flags = 0;
1355 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1356
1357 sample.raw_size = perf_synth__raw_size(raw);
1358 sample.raw_data = perf_synth__raw_data(&raw);
1359
1360 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1361 pt->pwr_events_sample_type);
1362}
1363
1364static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
1365{
1366 struct intel_pt *pt = ptq->pt;
1367 union perf_event *event = ptq->event_buf;
1368 struct perf_sample sample = { .ip = 0, };
1369 struct perf_synth_intel_pwrx raw;
1370
1371 if (intel_pt_skip_event(pt))
1372 return 0;
1373
1374 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1375
1376 sample.id = ptq->pt->pwrx_id;
1377 sample.stream_id = ptq->pt->pwrx_id;
1378
1379 raw.reserved = 0;
1380 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
1381
1382 sample.raw_size = perf_synth__raw_size(raw);
1383 sample.raw_data = perf_synth__raw_data(&raw);
1384
1385 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1386 pt->pwr_events_sample_type);
1387}
1388
Adrian Hunter90e457f2015-07-17 19:33:41 +03001389static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
1390 pid_t pid, pid_t tid, u64 ip)
1391{
1392 union perf_event event;
1393 char msg[MAX_AUXTRACE_ERROR_MSG];
1394 int err;
1395
1396 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
1397
1398 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
1399 code, cpu, pid, tid, ip, msg);
1400
1401 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
1402 if (err)
1403 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1404 err);
1405
1406 return err;
1407}
1408
1409static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
1410{
1411 struct auxtrace_queue *queue;
1412 pid_t tid = ptq->next_tid;
1413 int err;
1414
1415 if (tid == -1)
1416 return 0;
1417
1418 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
1419
1420 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
1421
1422 queue = &pt->queues.queue_array[ptq->queue_nr];
1423 intel_pt_set_pid_tid_cpu(pt, queue);
1424
1425 ptq->next_tid = -1;
1426
1427 return err;
1428}
1429
1430static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
1431{
1432 struct intel_pt *pt = ptq->pt;
1433
1434 return ip == pt->switch_ip &&
1435 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
1436 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
1437 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
1438}
1439
Adrian Hunter37973072017-06-30 11:36:45 +03001440#define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
1441 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT | \
1442 INTEL_PT_CBR_CHG)
1443
Adrian Hunter90e457f2015-07-17 19:33:41 +03001444static int intel_pt_sample(struct intel_pt_queue *ptq)
1445{
1446 const struct intel_pt_state *state = ptq->state;
1447 struct intel_pt *pt = ptq->pt;
1448 int err;
1449
1450 if (!ptq->have_sample)
1451 return 0;
1452
1453 ptq->have_sample = false;
1454
Adrian Hunter37973072017-06-30 11:36:45 +03001455 if (pt->sample_pwr_events && (state->type & INTEL_PT_PWR_EVT)) {
1456 if (state->type & INTEL_PT_CBR_CHG) {
1457 err = intel_pt_synth_cbr_sample(ptq);
1458 if (err)
1459 return err;
1460 }
1461 if (state->type & INTEL_PT_MWAIT_OP) {
1462 err = intel_pt_synth_mwait_sample(ptq);
1463 if (err)
1464 return err;
1465 }
1466 if (state->type & INTEL_PT_PWR_ENTRY) {
1467 err = intel_pt_synth_pwre_sample(ptq);
1468 if (err)
1469 return err;
1470 }
1471 if (state->type & INTEL_PT_EX_STOP) {
1472 err = intel_pt_synth_exstop_sample(ptq);
1473 if (err)
1474 return err;
1475 }
1476 if (state->type & INTEL_PT_PWR_EXIT) {
1477 err = intel_pt_synth_pwrx_sample(ptq);
1478 if (err)
1479 return err;
1480 }
1481 }
1482
Adrian Hunter406a1802017-05-26 11:17:29 +03001483 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03001484 err = intel_pt_synth_instruction_sample(ptq);
1485 if (err)
1486 return err;
1487 }
1488
Adrian Hunter406a1802017-05-26 11:17:29 +03001489 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03001490 err = intel_pt_synth_transaction_sample(ptq);
1491 if (err)
1492 return err;
1493 }
1494
Adrian Hunter37973072017-06-30 11:36:45 +03001495 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
1496 err = intel_pt_synth_ptwrite_sample(ptq);
1497 if (err)
1498 return err;
1499 }
1500
Adrian Hunter90e457f2015-07-17 19:33:41 +03001501 if (!(state->type & INTEL_PT_BRANCH))
1502 return 0;
1503
Adrian Hunter50f736372016-06-23 16:40:57 +03001504 if (pt->synth_opts.callchain || pt->synth_opts.thread_stack)
Adrian Hunter90e457f2015-07-17 19:33:41 +03001505 thread_stack__event(ptq->thread, ptq->flags, state->from_ip,
1506 state->to_ip, ptq->insn_len,
1507 state->trace_nr);
1508 else
1509 thread_stack__set_trace_nr(ptq->thread, state->trace_nr);
1510
1511 if (pt->sample_branches) {
1512 err = intel_pt_synth_branch_sample(ptq);
1513 if (err)
1514 return err;
1515 }
1516
Adrian Hunterf14445e2015-09-25 16:15:45 +03001517 if (pt->synth_opts.last_branch)
1518 intel_pt_update_last_branch_rb(ptq);
1519
Adrian Hunter63d8e382018-03-07 16:02:22 +02001520 if (!ptq->sync_switch)
Adrian Hunter90e457f2015-07-17 19:33:41 +03001521 return 0;
1522
1523 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
1524 switch (ptq->switch_state) {
1525 case INTEL_PT_SS_UNKNOWN:
1526 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1527 err = intel_pt_next_tid(pt, ptq);
1528 if (err)
1529 return err;
1530 ptq->switch_state = INTEL_PT_SS_TRACING;
1531 break;
1532 default:
1533 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
1534 return 1;
1535 }
1536 } else if (!state->to_ip) {
1537 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
1538 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
1539 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
1540 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1541 state->to_ip == pt->ptss_ip &&
1542 (ptq->flags & PERF_IP_FLAG_CALL)) {
1543 ptq->switch_state = INTEL_PT_SS_TRACING;
1544 }
1545
1546 return 0;
1547}
1548
Adrian Hunter86c27862015-08-13 12:40:57 +03001549static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
Adrian Hunter90e457f2015-07-17 19:33:41 +03001550{
Adrian Hunter86c27862015-08-13 12:40:57 +03001551 struct machine *machine = pt->machine;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001552 struct map *map;
1553 struct symbol *sym, *start;
1554 u64 ip, switch_ip = 0;
Adrian Hunter86c27862015-08-13 12:40:57 +03001555 const char *ptss;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001556
1557 if (ptss_ip)
1558 *ptss_ip = 0;
1559
Arnaldo Carvalho de Meloa5e813c2015-09-30 11:54:04 -03001560 map = machine__kernel_map(machine);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001561 if (!map)
1562 return 0;
1563
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -03001564 if (map__load(map))
Adrian Hunter90e457f2015-07-17 19:33:41 +03001565 return 0;
1566
1567 start = dso__first_symbol(map->dso, MAP__FUNCTION);
1568
1569 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1570 if (sym->binding == STB_GLOBAL &&
1571 !strcmp(sym->name, "__switch_to")) {
1572 ip = map->unmap_ip(map, sym->start);
1573 if (ip >= map->start && ip < map->end) {
1574 switch_ip = ip;
1575 break;
1576 }
1577 }
1578 }
1579
1580 if (!switch_ip || !ptss_ip)
1581 return 0;
1582
Adrian Hunter86c27862015-08-13 12:40:57 +03001583 if (pt->have_sched_switch == 1)
1584 ptss = "perf_trace_sched_switch";
1585 else
1586 ptss = "__perf_event_task_sched_out";
1587
Adrian Hunter90e457f2015-07-17 19:33:41 +03001588 for (sym = start; sym; sym = dso__next_symbol(sym)) {
Adrian Hunter86c27862015-08-13 12:40:57 +03001589 if (!strcmp(sym->name, ptss)) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03001590 ip = map->unmap_ip(map, sym->start);
1591 if (ip >= map->start && ip < map->end) {
1592 *ptss_ip = ip;
1593 break;
1594 }
1595 }
1596 }
1597
1598 return switch_ip;
1599}
1600
Adrian Hunter63d8e382018-03-07 16:02:22 +02001601static void intel_pt_enable_sync_switch(struct intel_pt *pt)
1602{
1603 unsigned int i;
1604
1605 pt->sync_switch = true;
1606
1607 for (i = 0; i < pt->queues.nr_queues; i++) {
1608 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1609 struct intel_pt_queue *ptq = queue->priv;
1610
1611 if (ptq)
1612 ptq->sync_switch = true;
1613 }
1614}
1615
Adrian Hunter90e457f2015-07-17 19:33:41 +03001616static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
1617{
1618 const struct intel_pt_state *state = ptq->state;
1619 struct intel_pt *pt = ptq->pt;
1620 int err;
1621
1622 if (!pt->kernel_start) {
1623 pt->kernel_start = machine__kernel_start(pt->machine);
Adrian Hunter86c27862015-08-13 12:40:57 +03001624 if (pt->per_cpu_mmaps &&
1625 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
Adrian Hunter90e457f2015-07-17 19:33:41 +03001626 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
1627 !pt->sampling_mode) {
Adrian Hunter86c27862015-08-13 12:40:57 +03001628 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001629 if (pt->switch_ip) {
1630 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
1631 pt->switch_ip, pt->ptss_ip);
Adrian Hunter63d8e382018-03-07 16:02:22 +02001632 intel_pt_enable_sync_switch(pt);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001633 }
1634 }
1635 }
1636
1637 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1638 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1639 while (1) {
1640 err = intel_pt_sample(ptq);
1641 if (err)
1642 return err;
1643
1644 state = intel_pt_decode(ptq->decoder);
1645 if (state->err) {
1646 if (state->err == INTEL_PT_ERR_NODATA)
1647 return 1;
Adrian Hunter63d8e382018-03-07 16:02:22 +02001648 if (ptq->sync_switch &&
Adrian Hunter90e457f2015-07-17 19:33:41 +03001649 state->from_ip >= pt->kernel_start) {
Adrian Hunter63d8e382018-03-07 16:02:22 +02001650 ptq->sync_switch = false;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001651 intel_pt_next_tid(pt, ptq);
1652 }
1653 if (pt->synth_opts.errors) {
1654 err = intel_pt_synth_error(pt, state->err,
1655 ptq->cpu, ptq->pid,
1656 ptq->tid,
1657 state->from_ip);
1658 if (err)
1659 return err;
1660 }
1661 continue;
1662 }
1663
1664 ptq->state = state;
1665 ptq->have_sample = true;
1666 intel_pt_sample_flags(ptq);
1667
1668 /* Use estimated TSC upon return to user space */
1669 if (pt->est_tsc &&
1670 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
1671 state->to_ip && state->to_ip < pt->kernel_start) {
1672 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1673 state->timestamp, state->est_timestamp);
1674 ptq->timestamp = state->est_timestamp;
1675 /* Use estimated TSC in unknown switch state */
Adrian Hunter63d8e382018-03-07 16:02:22 +02001676 } else if (ptq->sync_switch &&
Adrian Hunter90e457f2015-07-17 19:33:41 +03001677 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1678 intel_pt_is_switch_ip(ptq, state->to_ip) &&
1679 ptq->next_tid == -1) {
1680 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1681 state->timestamp, state->est_timestamp);
1682 ptq->timestamp = state->est_timestamp;
1683 } else if (state->timestamp > ptq->timestamp) {
1684 ptq->timestamp = state->timestamp;
1685 }
1686
1687 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
1688 *timestamp = ptq->timestamp;
1689 return 0;
1690 }
1691 }
1692 return 0;
1693}
1694
1695static inline int intel_pt_update_queues(struct intel_pt *pt)
1696{
1697 if (pt->queues.new_data) {
1698 pt->queues.new_data = false;
1699 return intel_pt_setup_queues(pt);
1700 }
1701 return 0;
1702}
1703
1704static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
1705{
1706 unsigned int queue_nr;
1707 u64 ts;
1708 int ret;
1709
1710 while (1) {
1711 struct auxtrace_queue *queue;
1712 struct intel_pt_queue *ptq;
1713
1714 if (!pt->heap.heap_cnt)
1715 return 0;
1716
1717 if (pt->heap.heap_array[0].ordinal >= timestamp)
1718 return 0;
1719
1720 queue_nr = pt->heap.heap_array[0].queue_nr;
1721 queue = &pt->queues.queue_array[queue_nr];
1722 ptq = queue->priv;
1723
1724 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
1725 queue_nr, pt->heap.heap_array[0].ordinal,
1726 timestamp);
1727
1728 auxtrace_heap__pop(&pt->heap);
1729
1730 if (pt->heap.heap_cnt) {
1731 ts = pt->heap.heap_array[0].ordinal + 1;
1732 if (ts > timestamp)
1733 ts = timestamp;
1734 } else {
1735 ts = timestamp;
1736 }
1737
1738 intel_pt_set_pid_tid_cpu(pt, queue);
1739
1740 ret = intel_pt_run_decoder(ptq, &ts);
1741
1742 if (ret < 0) {
1743 auxtrace_heap__add(&pt->heap, queue_nr, ts);
1744 return ret;
1745 }
1746
1747 if (!ret) {
1748 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
1749 if (ret < 0)
1750 return ret;
1751 } else {
1752 ptq->on_heap = false;
1753 }
1754 }
1755
1756 return 0;
1757}
1758
1759static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
1760 u64 time_)
1761{
1762 struct auxtrace_queues *queues = &pt->queues;
1763 unsigned int i;
1764 u64 ts = 0;
1765
1766 for (i = 0; i < queues->nr_queues; i++) {
1767 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1768 struct intel_pt_queue *ptq = queue->priv;
1769
1770 if (ptq && (tid == -1 || ptq->tid == tid)) {
1771 ptq->time = time_;
1772 intel_pt_set_pid_tid_cpu(pt, queue);
1773 intel_pt_run_decoder(ptq, &ts);
1774 }
1775 }
1776 return 0;
1777}
1778
1779static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
1780{
1781 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
1782 sample->pid, sample->tid, 0);
1783}
1784
1785static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
1786{
1787 unsigned i, j;
1788
1789 if (cpu < 0 || !pt->queues.nr_queues)
1790 return NULL;
1791
1792 if ((unsigned)cpu >= pt->queues.nr_queues)
1793 i = pt->queues.nr_queues - 1;
1794 else
1795 i = cpu;
1796
1797 if (pt->queues.queue_array[i].cpu == cpu)
1798 return pt->queues.queue_array[i].priv;
1799
1800 for (j = 0; i > 0; j++) {
1801 if (pt->queues.queue_array[--i].cpu == cpu)
1802 return pt->queues.queue_array[i].priv;
1803 }
1804
1805 for (; j < pt->queues.nr_queues; j++) {
1806 if (pt->queues.queue_array[j].cpu == cpu)
1807 return pt->queues.queue_array[j].priv;
1808 }
1809
1810 return NULL;
1811}
1812
Adrian Hunter86c27862015-08-13 12:40:57 +03001813static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
1814 u64 timestamp)
Adrian Hunter90e457f2015-07-17 19:33:41 +03001815{
1816 struct intel_pt_queue *ptq;
Adrian Hunter86c27862015-08-13 12:40:57 +03001817 int err;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001818
1819 if (!pt->sync_switch)
Adrian Hunter86c27862015-08-13 12:40:57 +03001820 return 1;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001821
1822 ptq = intel_pt_cpu_to_ptq(pt, cpu);
Adrian Hunter63d8e382018-03-07 16:02:22 +02001823 if (!ptq || !ptq->sync_switch)
Adrian Hunter86c27862015-08-13 12:40:57 +03001824 return 1;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001825
1826 switch (ptq->switch_state) {
1827 case INTEL_PT_SS_NOT_TRACING:
1828 ptq->next_tid = -1;
1829 break;
1830 case INTEL_PT_SS_UNKNOWN:
1831 case INTEL_PT_SS_TRACING:
1832 ptq->next_tid = tid;
1833 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
1834 return 0;
1835 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
1836 if (!ptq->on_heap) {
Adrian Hunter86c27862015-08-13 12:40:57 +03001837 ptq->timestamp = perf_time_to_tsc(timestamp,
Adrian Hunter90e457f2015-07-17 19:33:41 +03001838 &pt->tc);
1839 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
1840 ptq->timestamp);
1841 if (err)
1842 return err;
1843 ptq->on_heap = true;
1844 }
1845 ptq->switch_state = INTEL_PT_SS_TRACING;
1846 break;
1847 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1848 ptq->next_tid = tid;
1849 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
1850 break;
1851 default:
1852 break;
1853 }
Adrian Hunter86c27862015-08-13 12:40:57 +03001854
1855 return 1;
1856}
1857
1858static int intel_pt_process_switch(struct intel_pt *pt,
1859 struct perf_sample *sample)
1860{
1861 struct perf_evsel *evsel;
1862 pid_t tid;
1863 int cpu, ret;
1864
1865 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
1866 if (evsel != pt->switch_evsel)
1867 return 0;
1868
1869 tid = perf_evsel__intval(evsel, sample, "next_pid");
1870 cpu = sample->cpu;
1871
1872 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1873 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
1874 &pt->tc));
1875
1876 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1877 if (ret <= 0)
1878 return ret;
1879
Adrian Hunter90e457f2015-07-17 19:33:41 +03001880 return machine__set_current_tid(pt->machine, cpu, -1, tid);
1881}
1882
Adrian Hunter86c27862015-08-13 12:40:57 +03001883static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
1884 struct perf_sample *sample)
1885{
1886 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1887 pid_t pid, tid;
1888 int cpu, ret;
1889
1890 cpu = sample->cpu;
1891
1892 if (pt->have_sched_switch == 3) {
1893 if (!out)
1894 return 0;
1895 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
1896 pr_err("Expecting CPU-wide context switch event\n");
1897 return -EINVAL;
1898 }
1899 pid = event->context_switch.next_prev_pid;
1900 tid = event->context_switch.next_prev_tid;
1901 } else {
1902 if (out)
1903 return 0;
1904 pid = sample->pid;
1905 tid = sample->tid;
1906 }
1907
1908 if (tid == -1) {
1909 pr_err("context_switch event has no tid\n");
1910 return -EINVAL;
1911 }
1912
1913 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1914 cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
1915 &pt->tc));
1916
1917 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1918 if (ret <= 0)
1919 return ret;
1920
1921 return machine__set_current_tid(pt->machine, cpu, pid, tid);
1922}
1923
Adrian Hunter90e457f2015-07-17 19:33:41 +03001924static int intel_pt_process_itrace_start(struct intel_pt *pt,
1925 union perf_event *event,
1926 struct perf_sample *sample)
1927{
1928 if (!pt->per_cpu_mmaps)
1929 return 0;
1930
1931 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1932 sample->cpu, event->itrace_start.pid,
1933 event->itrace_start.tid, sample->time,
1934 perf_time_to_tsc(sample->time, &pt->tc));
1935
1936 return machine__set_current_tid(pt->machine, sample->cpu,
1937 event->itrace_start.pid,
1938 event->itrace_start.tid);
1939}
1940
1941static int intel_pt_process_event(struct perf_session *session,
1942 union perf_event *event,
1943 struct perf_sample *sample,
1944 struct perf_tool *tool)
1945{
1946 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1947 auxtrace);
1948 u64 timestamp;
1949 int err = 0;
1950
1951 if (dump_trace)
1952 return 0;
1953
1954 if (!tool->ordered_events) {
1955 pr_err("Intel Processor Trace requires ordered events\n");
1956 return -EINVAL;
1957 }
1958
Adrian Hunter81cd60c2015-08-20 11:51:32 +03001959 if (sample->time && sample->time != (u64)-1)
Adrian Hunter90e457f2015-07-17 19:33:41 +03001960 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
1961 else
1962 timestamp = 0;
1963
1964 if (timestamp || pt->timeless_decoding) {
1965 err = intel_pt_update_queues(pt);
1966 if (err)
1967 return err;
1968 }
1969
1970 if (pt->timeless_decoding) {
1971 if (event->header.type == PERF_RECORD_EXIT) {
1972 err = intel_pt_process_timeless_queues(pt,
Adrian Hunter53ff6bc2015-08-18 12:07:05 +03001973 event->fork.tid,
Adrian Hunter90e457f2015-07-17 19:33:41 +03001974 sample->time);
1975 }
1976 } else if (timestamp) {
1977 err = intel_pt_process_queues(pt, timestamp);
1978 }
1979 if (err)
1980 return err;
1981
1982 if (event->header.type == PERF_RECORD_AUX &&
1983 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
1984 pt->synth_opts.errors) {
1985 err = intel_pt_lost(pt, sample);
1986 if (err)
1987 return err;
1988 }
1989
1990 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
1991 err = intel_pt_process_switch(pt, sample);
1992 else if (event->header.type == PERF_RECORD_ITRACE_START)
1993 err = intel_pt_process_itrace_start(pt, event, sample);
Adrian Hunter86c27862015-08-13 12:40:57 +03001994 else if (event->header.type == PERF_RECORD_SWITCH ||
1995 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
1996 err = intel_pt_context_switch(pt, event, sample);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001997
1998 intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n",
1999 perf_event__name(event->header.type), event->header.type,
2000 sample->cpu, sample->time, timestamp);
2001
2002 return err;
2003}
2004
2005static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
2006{
2007 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2008 auxtrace);
2009 int ret;
2010
2011 if (dump_trace)
2012 return 0;
2013
2014 if (!tool->ordered_events)
2015 return -EINVAL;
2016
2017 ret = intel_pt_update_queues(pt);
2018 if (ret < 0)
2019 return ret;
2020
2021 if (pt->timeless_decoding)
2022 return intel_pt_process_timeless_queues(pt, -1,
2023 MAX_TIMESTAMP - 1);
2024
2025 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
2026}
2027
2028static void intel_pt_free_events(struct perf_session *session)
2029{
2030 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2031 auxtrace);
2032 struct auxtrace_queues *queues = &pt->queues;
2033 unsigned int i;
2034
2035 for (i = 0; i < queues->nr_queues; i++) {
2036 intel_pt_free_queue(queues->queue_array[i].priv);
2037 queues->queue_array[i].priv = NULL;
2038 }
2039 intel_pt_log_disable();
2040 auxtrace_queues__free(queues);
2041}
2042
2043static void intel_pt_free(struct perf_session *session)
2044{
2045 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2046 auxtrace);
2047
2048 auxtrace_heap__free(&pt->heap);
2049 intel_pt_free_events(session);
2050 session->auxtrace = NULL;
Arnaldo Carvalho de Meloabd82862015-12-11 19:11:23 -03002051 thread__put(pt->unknown_thread);
Adrian Hunter2acee102016-09-23 17:38:48 +03002052 addr_filters__exit(&pt->filts);
Adrian Hunter2b9e32c2016-09-23 17:38:46 +03002053 zfree(&pt->filter);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002054 free(pt);
2055}
2056
2057static int intel_pt_process_auxtrace_event(struct perf_session *session,
2058 union perf_event *event,
2059 struct perf_tool *tool __maybe_unused)
2060{
2061 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2062 auxtrace);
2063
2064 if (pt->sampling_mode)
2065 return 0;
2066
2067 if (!pt->data_queued) {
2068 struct auxtrace_buffer *buffer;
2069 off_t data_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002070 int fd = perf_data__fd(session->data);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002071 int err;
2072
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002073 if (perf_data__is_pipe(session->data)) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03002074 data_offset = 0;
2075 } else {
2076 data_offset = lseek(fd, 0, SEEK_CUR);
2077 if (data_offset == -1)
2078 return -errno;
2079 }
2080
2081 err = auxtrace_queues__add_event(&pt->queues, session, event,
2082 data_offset, &buffer);
2083 if (err)
2084 return err;
2085
2086 /* Dump here now we have copied a piped trace out of the pipe */
2087 if (dump_trace) {
2088 if (auxtrace_buffer__get_data(buffer, fd)) {
2089 intel_pt_dump_event(pt, buffer->data,
2090 buffer->size);
2091 auxtrace_buffer__put_data(buffer);
2092 }
2093 }
2094 }
2095
2096 return 0;
2097}
2098
2099struct intel_pt_synth {
2100 struct perf_tool dummy_tool;
2101 struct perf_session *session;
2102};
2103
2104static int intel_pt_event_synth(struct perf_tool *tool,
2105 union perf_event *event,
2106 struct perf_sample *sample __maybe_unused,
2107 struct machine *machine __maybe_unused)
2108{
2109 struct intel_pt_synth *intel_pt_synth =
2110 container_of(tool, struct intel_pt_synth, dummy_tool);
2111
2112 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
2113 NULL);
2114}
2115
Adrian Hunter63a22cd2017-05-26 11:17:31 +03002116static int intel_pt_synth_event(struct perf_session *session, const char *name,
Adrian Hunter90e457f2015-07-17 19:33:41 +03002117 struct perf_event_attr *attr, u64 id)
2118{
2119 struct intel_pt_synth intel_pt_synth;
Adrian Hunter63a22cd2017-05-26 11:17:31 +03002120 int err;
2121
2122 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
2123 name, id, (u64)attr->sample_type);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002124
2125 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
2126 intel_pt_synth.session = session;
2127
Adrian Hunter63a22cd2017-05-26 11:17:31 +03002128 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
2129 &id, intel_pt_event_synth);
2130 if (err)
2131 pr_err("%s: failed to synthesize '%s' event type\n",
2132 __func__, name);
2133
2134 return err;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002135}
2136
Adrian Hunterbbac88ed2017-05-26 11:17:32 +03002137static void intel_pt_set_event_name(struct perf_evlist *evlist, u64 id,
2138 const char *name)
2139{
2140 struct perf_evsel *evsel;
2141
2142 evlist__for_each_entry(evlist, evsel) {
2143 if (evsel->id && evsel->id[0] == id) {
2144 if (evsel->name)
2145 zfree(&evsel->name);
2146 evsel->name = strdup(name);
2147 break;
2148 }
2149 }
2150}
2151
Adrian Hunter85a564d2017-05-26 11:17:30 +03002152static struct perf_evsel *intel_pt_evsel(struct intel_pt *pt,
2153 struct perf_evlist *evlist)
2154{
2155 struct perf_evsel *evsel;
2156
2157 evlist__for_each_entry(evlist, evsel) {
2158 if (evsel->attr.type == pt->pmu_type && evsel->ids)
2159 return evsel;
2160 }
2161
2162 return NULL;
2163}
2164
Adrian Hunter90e457f2015-07-17 19:33:41 +03002165static int intel_pt_synth_events(struct intel_pt *pt,
2166 struct perf_session *session)
2167{
2168 struct perf_evlist *evlist = session->evlist;
Adrian Hunter85a564d2017-05-26 11:17:30 +03002169 struct perf_evsel *evsel = intel_pt_evsel(pt, evlist);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002170 struct perf_event_attr attr;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002171 u64 id;
2172 int err;
2173
Adrian Hunter85a564d2017-05-26 11:17:30 +03002174 if (!evsel) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03002175 pr_debug("There are no selected events with Intel Processor Trace data\n");
2176 return 0;
2177 }
2178
2179 memset(&attr, 0, sizeof(struct perf_event_attr));
2180 attr.size = sizeof(struct perf_event_attr);
2181 attr.type = PERF_TYPE_HARDWARE;
2182 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
2183 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
2184 PERF_SAMPLE_PERIOD;
2185 if (pt->timeless_decoding)
2186 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
2187 else
2188 attr.sample_type |= PERF_SAMPLE_TIME;
2189 if (!pt->per_cpu_mmaps)
2190 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
2191 attr.exclude_user = evsel->attr.exclude_user;
2192 attr.exclude_kernel = evsel->attr.exclude_kernel;
2193 attr.exclude_hv = evsel->attr.exclude_hv;
2194 attr.exclude_host = evsel->attr.exclude_host;
2195 attr.exclude_guest = evsel->attr.exclude_guest;
2196 attr.sample_id_all = evsel->attr.sample_id_all;
2197 attr.read_format = evsel->attr.read_format;
2198
2199 id = evsel->id[0] + 1000000000;
2200 if (!id)
2201 id = 1;
2202
Adrian Hunter4a9fd4e2017-05-26 11:17:33 +03002203 if (pt->synth_opts.branches) {
2204 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
2205 attr.sample_period = 1;
2206 attr.sample_type |= PERF_SAMPLE_ADDR;
2207 err = intel_pt_synth_event(session, "branches", &attr, id);
2208 if (err)
2209 return err;
2210 pt->sample_branches = true;
2211 pt->branches_sample_type = attr.sample_type;
2212 pt->branches_id = id;
2213 id += 1;
2214 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
2215 }
2216
2217 if (pt->synth_opts.callchain)
2218 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
2219 if (pt->synth_opts.last_branch)
2220 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
2221
Adrian Hunter90e457f2015-07-17 19:33:41 +03002222 if (pt->synth_opts.instructions) {
2223 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2224 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
2225 attr.sample_period =
2226 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
2227 else
2228 attr.sample_period = pt->synth_opts.period;
Adrian Hunter63a22cd2017-05-26 11:17:31 +03002229 err = intel_pt_synth_event(session, "instructions", &attr, id);
2230 if (err)
Adrian Hunter90e457f2015-07-17 19:33:41 +03002231 return err;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002232 pt->sample_instructions = true;
2233 pt->instructions_sample_type = attr.sample_type;
2234 pt->instructions_id = id;
2235 id += 1;
2236 }
2237
Adrian Hunter4a9fd4e2017-05-26 11:17:33 +03002238 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
2239 attr.sample_period = 1;
2240
Adrian Hunter90e457f2015-07-17 19:33:41 +03002241 if (pt->synth_opts.transactions) {
2242 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
Adrian Hunter63a22cd2017-05-26 11:17:31 +03002243 err = intel_pt_synth_event(session, "transactions", &attr, id);
2244 if (err)
Adrian Hunter90e457f2015-07-17 19:33:41 +03002245 return err;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002246 pt->sample_transactions = true;
Adrian Hunter21160742017-05-26 11:17:18 +03002247 pt->transactions_sample_type = attr.sample_type;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002248 pt->transactions_id = id;
Adrian Hunterbbac88ed2017-05-26 11:17:32 +03002249 intel_pt_set_event_name(evlist, id, "transactions");
Adrian Hunter90e457f2015-07-17 19:33:41 +03002250 id += 1;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002251 }
2252
Adrian Hunter37973072017-06-30 11:36:45 +03002253 attr.type = PERF_TYPE_SYNTH;
2254 attr.sample_type |= PERF_SAMPLE_RAW;
2255
2256 if (pt->synth_opts.ptwrites) {
2257 attr.config = PERF_SYNTH_INTEL_PTWRITE;
2258 err = intel_pt_synth_event(session, "ptwrite", &attr, id);
2259 if (err)
2260 return err;
2261 pt->sample_ptwrites = true;
2262 pt->ptwrites_sample_type = attr.sample_type;
2263 pt->ptwrites_id = id;
2264 intel_pt_set_event_name(evlist, id, "ptwrite");
2265 id += 1;
2266 }
2267
2268 if (pt->synth_opts.pwr_events) {
2269 pt->sample_pwr_events = true;
2270 pt->pwr_events_sample_type = attr.sample_type;
2271
2272 attr.config = PERF_SYNTH_INTEL_CBR;
2273 err = intel_pt_synth_event(session, "cbr", &attr, id);
2274 if (err)
2275 return err;
2276 pt->cbr_id = id;
2277 intel_pt_set_event_name(evlist, id, "cbr");
2278 id += 1;
2279 }
2280
2281 if (pt->synth_opts.pwr_events && (evsel->attr.config & 0x10)) {
2282 attr.config = PERF_SYNTH_INTEL_MWAIT;
2283 err = intel_pt_synth_event(session, "mwait", &attr, id);
2284 if (err)
2285 return err;
2286 pt->mwait_id = id;
2287 intel_pt_set_event_name(evlist, id, "mwait");
2288 id += 1;
2289
2290 attr.config = PERF_SYNTH_INTEL_PWRE;
2291 err = intel_pt_synth_event(session, "pwre", &attr, id);
2292 if (err)
2293 return err;
2294 pt->pwre_id = id;
2295 intel_pt_set_event_name(evlist, id, "pwre");
2296 id += 1;
2297
2298 attr.config = PERF_SYNTH_INTEL_EXSTOP;
2299 err = intel_pt_synth_event(session, "exstop", &attr, id);
2300 if (err)
2301 return err;
2302 pt->exstop_id = id;
2303 intel_pt_set_event_name(evlist, id, "exstop");
2304 id += 1;
2305
2306 attr.config = PERF_SYNTH_INTEL_PWRX;
2307 err = intel_pt_synth_event(session, "pwrx", &attr, id);
2308 if (err)
2309 return err;
2310 pt->pwrx_id = id;
2311 intel_pt_set_event_name(evlist, id, "pwrx");
2312 id += 1;
2313 }
2314
Adrian Hunter90e457f2015-07-17 19:33:41 +03002315 return 0;
2316}
2317
2318static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist)
2319{
2320 struct perf_evsel *evsel;
2321
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002322 evlist__for_each_entry_reverse(evlist, evsel) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03002323 const char *name = perf_evsel__name(evsel);
2324
2325 if (!strcmp(name, "sched:sched_switch"))
2326 return evsel;
2327 }
2328
2329 return NULL;
2330}
2331
Adrian Hunter86c27862015-08-13 12:40:57 +03002332static bool intel_pt_find_switch(struct perf_evlist *evlist)
2333{
2334 struct perf_evsel *evsel;
2335
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002336 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter86c27862015-08-13 12:40:57 +03002337 if (evsel->attr.context_switch)
2338 return true;
2339 }
2340
2341 return false;
2342}
2343
Adrian Hunterba11ba62015-09-25 16:15:56 +03002344static int intel_pt_perf_config(const char *var, const char *value, void *data)
2345{
2346 struct intel_pt *pt = data;
2347
2348 if (!strcmp(var, "intel-pt.mispred-all"))
2349 pt->mispred_all = perf_config_bool(var, value);
2350
2351 return 0;
2352}
2353
Adrian Hunter90e457f2015-07-17 19:33:41 +03002354static const char * const intel_pt_info_fmts[] = {
Adrian Hunter11fa7cb2015-07-17 19:33:54 +03002355 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
2356 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
2357 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
2358 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
2359 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
2360 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
2361 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
2362 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
2363 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
2364 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
2365 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
2366 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
2367 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
2368 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
Adrian Hunterfa8025c2016-09-23 17:38:42 +03002369 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
Adrian Hunter2b9e32c2016-09-23 17:38:46 +03002370 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
Adrian Hunter90e457f2015-07-17 19:33:41 +03002371};
2372
2373static void intel_pt_print_info(u64 *arr, int start, int finish)
2374{
2375 int i;
2376
2377 if (!dump_trace)
2378 return;
2379
2380 for (i = start; i <= finish; i++)
2381 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
2382}
2383
Adrian Hunter2b9e32c2016-09-23 17:38:46 +03002384static void intel_pt_print_info_str(const char *name, const char *str)
2385{
2386 if (!dump_trace)
2387 return;
2388
2389 fprintf(stdout, " %-20s%s\n", name, str ? str : "");
2390}
2391
Adrian Hunter40b746a2016-09-23 17:38:44 +03002392static bool intel_pt_has(struct auxtrace_info_event *auxtrace_info, int pos)
2393{
2394 return auxtrace_info->header.size >=
2395 sizeof(struct auxtrace_info_event) + (sizeof(u64) * (pos + 1));
2396}
2397
Adrian Hunter90e457f2015-07-17 19:33:41 +03002398int intel_pt_process_auxtrace_info(union perf_event *event,
2399 struct perf_session *session)
2400{
2401 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
2402 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
2403 struct intel_pt *pt;
Adrian Hunter2b9e32c2016-09-23 17:38:46 +03002404 void *info_end;
2405 u64 *info;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002406 int err;
2407
2408 if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
2409 min_sz)
2410 return -EINVAL;
2411
2412 pt = zalloc(sizeof(struct intel_pt));
2413 if (!pt)
2414 return -ENOMEM;
2415
Adrian Hunter2acee102016-09-23 17:38:48 +03002416 addr_filters__init(&pt->filts);
2417
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03002418 err = perf_config(intel_pt_perf_config, pt);
2419 if (err)
2420 goto err_free;
Adrian Hunterba11ba62015-09-25 16:15:56 +03002421
Adrian Hunter90e457f2015-07-17 19:33:41 +03002422 err = auxtrace_queues__init(&pt->queues);
2423 if (err)
2424 goto err_free;
2425
2426 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
2427
2428 pt->session = session;
2429 pt->machine = &session->machines.host; /* No kvm support */
2430 pt->auxtrace_type = auxtrace_info->type;
2431 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
2432 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
2433 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
2434 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
2435 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
2436 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
2437 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
2438 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
2439 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
2440 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
2441 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
2442 INTEL_PT_PER_CPU_MMAPS);
2443
Adrian Hunter40b746a2016-09-23 17:38:44 +03002444 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
Adrian Hunter11fa7cb2015-07-17 19:33:54 +03002445 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
2446 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
2447 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
2448 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
2449 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
2450 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
2451 INTEL_PT_CYC_BIT);
2452 }
2453
Adrian Hunter40b746a2016-09-23 17:38:44 +03002454 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
Adrian Hunterfa8025c2016-09-23 17:38:42 +03002455 pt->max_non_turbo_ratio =
2456 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
2457 intel_pt_print_info(&auxtrace_info->priv[0],
2458 INTEL_PT_MAX_NONTURBO_RATIO,
2459 INTEL_PT_MAX_NONTURBO_RATIO);
2460 }
2461
Adrian Hunter2b9e32c2016-09-23 17:38:46 +03002462 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
2463 info_end = (void *)info + auxtrace_info->header.size;
2464
2465 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
2466 size_t len;
2467
2468 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
2469 intel_pt_print_info(&auxtrace_info->priv[0],
2470 INTEL_PT_FILTER_STR_LEN,
2471 INTEL_PT_FILTER_STR_LEN);
2472 if (len) {
2473 const char *filter = (const char *)info;
2474
2475 len = roundup(len + 1, 8);
2476 info += len >> 3;
2477 if ((void *)info > info_end) {
2478 pr_err("%s: bad filter string length\n", __func__);
2479 err = -EINVAL;
2480 goto err_free_queues;
2481 }
2482 pt->filter = memdup(filter, len);
2483 if (!pt->filter) {
2484 err = -ENOMEM;
2485 goto err_free_queues;
2486 }
2487 if (session->header.needs_swap)
2488 mem_bswap_64(pt->filter, len);
2489 if (pt->filter[len - 1]) {
2490 pr_err("%s: filter string not null terminated\n", __func__);
2491 err = -EINVAL;
2492 goto err_free_queues;
2493 }
Adrian Hunter2acee102016-09-23 17:38:48 +03002494 err = addr_filters__parse_bare_filter(&pt->filts,
2495 filter);
2496 if (err)
2497 goto err_free_queues;
Adrian Hunter2b9e32c2016-09-23 17:38:46 +03002498 }
2499 intel_pt_print_info_str("Filter string", pt->filter);
2500 }
2501
Adrian Hunter90e457f2015-07-17 19:33:41 +03002502 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
2503 pt->have_tsc = intel_pt_have_tsc(pt);
2504 pt->sampling_mode = false;
2505 pt->est_tsc = !pt->timeless_decoding;
2506
2507 pt->unknown_thread = thread__new(999999999, 999999999);
2508 if (!pt->unknown_thread) {
2509 err = -ENOMEM;
2510 goto err_free_queues;
2511 }
Adrian Hunter3a4acda2016-02-01 03:21:04 +00002512
2513 /*
2514 * Since this thread will not be kept in any rbtree not in a
2515 * list, initialize its list node so that at thread__put() the
2516 * current thread lifetime assuption is kept and we don't segfault
2517 * at list_del_init().
2518 */
2519 INIT_LIST_HEAD(&pt->unknown_thread->node);
2520
Adrian Hunter90e457f2015-07-17 19:33:41 +03002521 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
2522 if (err)
2523 goto err_delete_thread;
2524 if (thread__init_map_groups(pt->unknown_thread, pt->machine)) {
2525 err = -ENOMEM;
2526 goto err_delete_thread;
2527 }
2528
2529 pt->auxtrace.process_event = intel_pt_process_event;
2530 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
2531 pt->auxtrace.flush_events = intel_pt_flush;
2532 pt->auxtrace.free_events = intel_pt_free_events;
2533 pt->auxtrace.free = intel_pt_free;
2534 session->auxtrace = &pt->auxtrace;
2535
2536 if (dump_trace)
2537 return 0;
2538
2539 if (pt->have_sched_switch == 1) {
2540 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
2541 if (!pt->switch_evsel) {
2542 pr_err("%s: missing sched_switch event\n", __func__);
Adrian Hunter4d34e102016-09-23 17:38:43 +03002543 err = -EINVAL;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002544 goto err_delete_thread;
2545 }
Adrian Hunter86c27862015-08-13 12:40:57 +03002546 } else if (pt->have_sched_switch == 2 &&
2547 !intel_pt_find_switch(session->evlist)) {
2548 pr_err("%s: missing context_switch attribute flag\n", __func__);
Adrian Hunter4d34e102016-09-23 17:38:43 +03002549 err = -EINVAL;
Adrian Hunter86c27862015-08-13 12:40:57 +03002550 goto err_delete_thread;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002551 }
2552
2553 if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
2554 pt->synth_opts = *session->itrace_synth_opts;
2555 } else {
2556 itrace_synth_opts__set_default(&pt->synth_opts);
2557 if (use_browser != -1) {
2558 pt->synth_opts.branches = false;
2559 pt->synth_opts.callchain = true;
2560 }
Adrian Hunter50f736372016-06-23 16:40:57 +03002561 if (session->itrace_synth_opts)
2562 pt->synth_opts.thread_stack =
2563 session->itrace_synth_opts->thread_stack;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002564 }
2565
2566 if (pt->synth_opts.log)
2567 intel_pt_log_enable();
2568
2569 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
2570 if (pt->tc.time_mult) {
2571 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
2572
Adrian Hunterfa8025c2016-09-23 17:38:42 +03002573 if (!pt->max_non_turbo_ratio)
2574 pt->max_non_turbo_ratio =
2575 (tsc_freq + 50000000) / 100000000;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002576 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
2577 intel_pt_log("Maximum non-turbo ratio %u\n",
2578 pt->max_non_turbo_ratio);
Adrian Hunter37973072017-06-30 11:36:45 +03002579 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002580 }
2581
2582 if (pt->synth_opts.calls)
2583 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
2584 PERF_IP_FLAG_TRACE_END;
2585 if (pt->synth_opts.returns)
2586 pt->branches_filter |= PERF_IP_FLAG_RETURN |
2587 PERF_IP_FLAG_TRACE_BEGIN;
2588
2589 if (pt->synth_opts.callchain && !symbol_conf.use_callchain) {
2590 symbol_conf.use_callchain = true;
2591 if (callchain_register_param(&callchain_param) < 0) {
2592 symbol_conf.use_callchain = false;
2593 pt->synth_opts.callchain = false;
2594 }
2595 }
2596
2597 err = intel_pt_synth_events(pt, session);
2598 if (err)
2599 goto err_delete_thread;
2600
2601 err = auxtrace_queues__process_index(&pt->queues, session);
2602 if (err)
2603 goto err_delete_thread;
2604
2605 if (pt->queues.populated)
2606 pt->data_queued = true;
2607
2608 if (pt->timeless_decoding)
2609 pr_debug2("Intel PT decoding without timestamps\n");
2610
2611 return 0;
2612
2613err_delete_thread:
Arnaldo Carvalho de Meloabd82862015-12-11 19:11:23 -03002614 thread__zput(pt->unknown_thread);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002615err_free_queues:
2616 intel_pt_log_disable();
2617 auxtrace_queues__free(&pt->queues);
2618 session->auxtrace = NULL;
2619err_free:
Adrian Hunter2acee102016-09-23 17:38:48 +03002620 addr_filters__exit(&pt->filts);
Adrian Hunter2b9e32c2016-09-23 17:38:46 +03002621 zfree(&pt->filter);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002622 free(pt);
2623 return err;
2624}