blob: 6c7e44ac444d07be5ebf775d0d218661b638a4e9 [file] [log] [blame]
Thomas Gleixner2025cf92019-05-29 07:18:02 -07001// SPDX-License-Identifier: GPL-2.0-only
Adrian Hunter90e457f2015-07-17 19:33:41 +03002/*
3 * intel_pt.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
Adrian Hunter90e457f2015-07-17 19:33:41 +03005 */
6
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -03007#include <inttypes.h>
Adrian Hunter90e457f2015-07-17 19:33:41 +03008#include <stdio.h>
9#include <stdbool.h>
10#include <errno.h>
11#include <linux/kernel.h>
Arnaldo Carvalho de Melo8520a982019-08-29 16:18:59 -030012#include <linux/string.h>
Adrian Hunter90e457f2015-07-17 19:33:41 +030013#include <linux/types.h>
Arnaldo Carvalho de Melo7f7c5362019-07-04 11:32:27 -030014#include <linux/zalloc.h>
Adrian Hunter90e457f2015-07-17 19:33:41 +030015
Adrian Hunter90e457f2015-07-17 19:33:41 +030016#include "session.h"
17#include "machine.h"
Arnaldo Carvalho de Melo98521b32017-04-25 15:45:35 -030018#include "memswap.h"
Adrian Hunterf14445e2015-09-25 16:15:45 +030019#include "sort.h"
Adrian Hunter90e457f2015-07-17 19:33:41 +030020#include "tool.h"
21#include "event.h"
22#include "evlist.h"
23#include "evsel.h"
24#include "map.h"
25#include "color.h"
Adrian Hunter90e457f2015-07-17 19:33:41 +030026#include "thread.h"
27#include "thread-stack.h"
28#include "symbol.h"
29#include "callchain.h"
30#include "dso.h"
31#include "debug.h"
32#include "auxtrace.h"
33#include "tsc.h"
34#include "intel-pt.h"
Taeung Song41840d22016-06-23 17:55:17 +090035#include "config.h"
Arnaldo Carvalho de Melo40c7d242020-05-05 11:49:08 -030036#include "util/perf_api_probe.h"
Arnaldo Carvalho de Meloea49e012019-09-18 11:36:13 -030037#include "util/synthetic-events.h"
Adrian Hunter2c47db92019-06-04 16:00:09 +030038#include "time-utils.h"
Adrian Hunter90e457f2015-07-17 19:33:41 +030039
Adrian Hunter9e9a6182019-06-10 10:27:59 +030040#include "../arch/x86/include/uapi/asm/perf_regs.h"
41
Adrian Hunter90e457f2015-07-17 19:33:41 +030042#include "intel-pt-decoder/intel-pt-log.h"
43#include "intel-pt-decoder/intel-pt-decoder.h"
44#include "intel-pt-decoder/intel-pt-insn-decoder.h"
45#include "intel-pt-decoder/intel-pt-pkt-decoder.h"
46
47#define MAX_TIMESTAMP (~0ULL)
48
Adrian Hunter2c47db92019-06-04 16:00:09 +030049struct range {
50 u64 start;
51 u64 end;
52};
53
Adrian Hunter90e457f2015-07-17 19:33:41 +030054struct intel_pt {
55 struct auxtrace auxtrace;
56 struct auxtrace_queues queues;
57 struct auxtrace_heap heap;
58 u32 auxtrace_type;
59 struct perf_session *session;
60 struct machine *machine;
Jiri Olsa32dcd022019-07-21 13:23:51 +020061 struct evsel *switch_evsel;
Adrian Hunter90e457f2015-07-17 19:33:41 +030062 struct thread *unknown_thread;
63 bool timeless_decoding;
64 bool sampling_mode;
65 bool snapshot_mode;
66 bool per_cpu_mmaps;
67 bool have_tsc;
68 bool data_queued;
69 bool est_tsc;
70 bool sync_switch;
Adrian Hunterba11ba62015-09-25 16:15:56 +030071 bool mispred_all;
Adrian Hunter1ef998f2020-04-29 18:07:44 +030072 bool use_thread_stack;
Adrian Huntercf888e02020-04-29 18:07:45 +030073 bool callstack;
74 unsigned int br_stack_sz;
Adrian Hunterf0a0251c2020-04-29 18:07:49 +030075 unsigned int br_stack_sz_plus;
Adrian Hunter90e457f2015-07-17 19:33:41 +030076 int have_sched_switch;
77 u32 pmu_type;
78 u64 kernel_start;
79 u64 switch_ip;
80 u64 ptss_ip;
81
82 struct perf_tsc_conversion tc;
83 bool cap_user_time_zero;
84
85 struct itrace_synth_opts synth_opts;
86
87 bool sample_instructions;
88 u64 instructions_sample_type;
Adrian Hunter90e457f2015-07-17 19:33:41 +030089 u64 instructions_id;
90
91 bool sample_branches;
92 u32 branches_filter;
93 u64 branches_sample_type;
94 u64 branches_id;
95
96 bool sample_transactions;
97 u64 transactions_sample_type;
98 u64 transactions_id;
99
Adrian Hunter37973072017-06-30 11:36:45 +0300100 bool sample_ptwrites;
101 u64 ptwrites_sample_type;
102 u64 ptwrites_id;
103
104 bool sample_pwr_events;
105 u64 pwr_events_sample_type;
106 u64 mwait_id;
107 u64 pwre_id;
108 u64 exstop_id;
109 u64 pwrx_id;
110 u64 cbr_id;
111
Adrian Huntere62ca652019-06-10 10:27:56 +0300112 bool sample_pebs;
Jiri Olsa32dcd022019-07-21 13:23:51 +0200113 struct evsel *pebs_evsel;
Adrian Huntere62ca652019-06-10 10:27:56 +0300114
Adrian Hunter90e457f2015-07-17 19:33:41 +0300115 u64 tsc_bit;
Adrian Hunter11fa7cb2015-07-17 19:33:54 +0300116 u64 mtc_bit;
117 u64 mtc_freq_bits;
118 u32 tsc_ctc_ratio_n;
119 u32 tsc_ctc_ratio_d;
120 u64 cyc_bit;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300121 u64 noretcomp_bit;
122 unsigned max_non_turbo_ratio;
Adrian Hunter37973072017-06-30 11:36:45 +0300123 unsigned cbr2khz;
Andi Kleend1706b32016-03-28 10:45:38 -0700124
125 unsigned long num_events;
Adrian Hunter2b9e32c2016-09-23 17:38:46 +0300126
127 char *filter;
Adrian Hunter2acee102016-09-23 17:38:48 +0300128 struct addr_filters filts;
Adrian Hunter2c47db92019-06-04 16:00:09 +0300129
130 struct range *time_ranges;
131 unsigned int range_cnt;
Adrian Hunter2855c052020-04-01 13:16:08 +0300132
133 struct ip_callchain *chain;
Adrian Hunterf0a0251c2020-04-29 18:07:49 +0300134 struct branch_stack *br_stack;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300135};
136
137enum switch_state {
138 INTEL_PT_SS_NOT_TRACING,
139 INTEL_PT_SS_UNKNOWN,
140 INTEL_PT_SS_TRACING,
141 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
142 INTEL_PT_SS_EXPECTING_SWITCH_IP,
143};
144
145struct intel_pt_queue {
146 struct intel_pt *pt;
147 unsigned int queue_nr;
148 struct auxtrace_buffer *buffer;
Adrian Hunter9c665062018-03-07 16:02:27 +0200149 struct auxtrace_buffer *old_buffer;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300150 void *decoder;
151 const struct intel_pt_state *state;
152 struct ip_callchain *chain;
Adrian Hunterf14445e2015-09-25 16:15:45 +0300153 struct branch_stack *last_branch;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300154 union perf_event *event_buf;
155 bool on_heap;
156 bool stop;
157 bool step_through_buffers;
158 bool use_buffer_pid_tid;
Adrian Hunter63d8e382018-03-07 16:02:22 +0200159 bool sync_switch;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300160 pid_t pid, tid;
161 int cpu;
162 int switch_state;
163 pid_t next_tid;
164 struct thread *thread;
165 bool exclude_kernel;
166 bool have_sample;
167 u64 time;
168 u64 timestamp;
Adrian Hunter2c47db92019-06-04 16:00:09 +0300169 u64 sel_timestamp;
170 bool sel_start;
171 unsigned int sel_idx;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300172 u32 flags;
173 u16 insn_len;
Adrian Hunter2a21d032015-07-17 19:33:48 +0300174 u64 last_insn_cnt;
Adrian Hunter5b1dc0f2019-05-20 14:37:13 +0300175 u64 ipc_insn_cnt;
176 u64 ipc_cyc_cnt;
177 u64 last_in_insn_cnt;
178 u64 last_in_cyc_cnt;
179 u64 last_br_insn_cnt;
180 u64 last_br_cyc_cnt;
Adrian Hunter5fe2cf72019-06-22 12:32:45 +0300181 unsigned int cbr_seen;
Andi Kleenfaaa8762016-10-07 16:42:26 +0300182 char insn[INTEL_PT_INSN_BUF_SZ];
Adrian Hunter90e457f2015-07-17 19:33:41 +0300183};
184
185static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
186 unsigned char *buf, size_t len)
187{
188 struct intel_pt_pkt packet;
189 size_t pos = 0;
190 int ret, pkt_len, i;
191 char desc[INTEL_PT_PKT_DESC_MAX];
192 const char *color = PERF_COLOR_BLUE;
Adrian Hunteredff7802019-06-10 10:27:53 +0300193 enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300194
195 color_fprintf(stdout, color,
196 ". ... Intel Processor Trace data: size %zu bytes\n",
197 len);
198
199 while (len) {
Adrian Hunteredff7802019-06-10 10:27:53 +0300200 ret = intel_pt_get_packet(buf, len, &packet, &ctx);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300201 if (ret > 0)
202 pkt_len = ret;
203 else
204 pkt_len = 1;
205 printf(".");
206 color_fprintf(stdout, color, " %08x: ", pos);
207 for (i = 0; i < pkt_len; i++)
208 color_fprintf(stdout, color, " %02x", buf[i]);
209 for (; i < 16; i++)
210 color_fprintf(stdout, color, " ");
211 if (ret > 0) {
212 ret = intel_pt_pkt_desc(&packet, desc,
213 INTEL_PT_PKT_DESC_MAX);
214 if (ret > 0)
215 color_fprintf(stdout, color, " %s\n", desc);
216 } else {
217 color_fprintf(stdout, color, " Bad packet!\n");
218 }
219 pos += pkt_len;
220 buf += pkt_len;
221 len -= pkt_len;
222 }
223}
224
225static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
226 size_t len)
227{
228 printf(".\n");
229 intel_pt_dump(pt, buf, len);
230}
231
Adrian Hunter93f8be22018-11-05 09:35:04 +0200232static void intel_pt_log_event(union perf_event *event)
233{
234 FILE *f = intel_pt_log_fp();
235
236 if (!intel_pt_enable_logging || !f)
237 return;
238
239 perf_event__fprintf(event, f);
240}
241
Adrian Hunterdbd13432019-11-15 14:42:24 +0200242static void intel_pt_dump_sample(struct perf_session *session,
243 struct perf_sample *sample)
244{
245 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
246 auxtrace);
247
248 printf("\n");
249 intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
250}
251
Adrian Hunter90e457f2015-07-17 19:33:41 +0300252static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
253 struct auxtrace_buffer *b)
254{
Adrian Hunter117db4b2018-03-07 16:02:21 +0200255 bool consecutive = false;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300256 void *start;
257
258 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
Adrian Hunter117db4b2018-03-07 16:02:21 +0200259 pt->have_tsc, &consecutive);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300260 if (!start)
261 return -EINVAL;
262 b->use_size = b->data + b->size - start;
263 b->use_data = start;
Adrian Hunter117db4b2018-03-07 16:02:21 +0200264 if (b->use_size && consecutive)
265 b->consecutive = true;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300266 return 0;
267}
268
Adrian Huntere96f7df2019-06-04 16:00:07 +0300269static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
270 struct auxtrace_buffer *buffer,
271 struct auxtrace_buffer *old_buffer,
272 struct intel_pt_buffer *b)
Adrian Hunter90e457f2015-07-17 19:33:41 +0300273{
Adrian Hunter599a5be2018-03-07 16:02:29 +0200274 bool might_overlap;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300275
Adrian Hunter90e457f2015-07-17 19:33:41 +0300276 if (!buffer->data) {
Jiri Olsa8ceb41d2017-01-23 22:07:59 +0100277 int fd = perf_data__fd(ptq->pt->session->data);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300278
279 buffer->data = auxtrace_buffer__get_data(buffer, fd);
280 if (!buffer->data)
281 return -ENOMEM;
282 }
283
Adrian Hunter599a5be2018-03-07 16:02:29 +0200284 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
285 if (might_overlap && !buffer->consecutive && old_buffer &&
Adrian Hunter90e457f2015-07-17 19:33:41 +0300286 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
287 return -ENOMEM;
288
Adrian Hunter90e457f2015-07-17 19:33:41 +0300289 if (buffer->use_data) {
290 b->len = buffer->use_size;
291 b->buf = buffer->use_data;
292 } else {
293 b->len = buffer->size;
294 b->buf = buffer->data;
295 }
296 b->ref_timestamp = buffer->reference;
297
Adrian Hunter599a5be2018-03-07 16:02:29 +0200298 if (!old_buffer || (might_overlap && !buffer->consecutive)) {
Adrian Hunter90e457f2015-07-17 19:33:41 +0300299 b->consecutive = false;
300 b->trace_nr = buffer->buffer_nr + 1;
301 } else {
302 b->consecutive = true;
303 }
304
Adrian Huntere96f7df2019-06-04 16:00:07 +0300305 return 0;
306}
307
Adrian Hunterda9000a2019-06-04 16:00:08 +0300308/* Do not drop buffers with references - refer intel_pt_get_trace() */
309static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
310 struct auxtrace_buffer *buffer)
311{
312 if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
313 return;
314
315 auxtrace_buffer__drop_data(buffer);
316}
317
318/* Must be serialized with respect to intel_pt_get_trace() */
319static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
320 void *cb_data)
321{
322 struct intel_pt_queue *ptq = data;
323 struct auxtrace_buffer *buffer = ptq->buffer;
324 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
325 struct auxtrace_queue *queue;
326 int err = 0;
327
328 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
329
330 while (1) {
331 struct intel_pt_buffer b = { .len = 0 };
332
333 buffer = auxtrace_buffer__next(queue, buffer);
334 if (!buffer)
335 break;
336
337 err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
338 if (err)
339 break;
340
341 if (b.len) {
342 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
343 old_buffer = buffer;
344 } else {
345 intel_pt_lookahead_drop_buffer(ptq, buffer);
346 continue;
347 }
348
349 err = cb(&b, cb_data);
350 if (err)
351 break;
352 }
353
354 if (buffer != old_buffer)
355 intel_pt_lookahead_drop_buffer(ptq, buffer);
356 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
357
358 return err;
359}
360
361/*
362 * This function assumes data is processed sequentially only.
363 * Must be serialized with respect to intel_pt_lookahead()
364 */
Adrian Huntere96f7df2019-06-04 16:00:07 +0300365static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
366{
367 struct intel_pt_queue *ptq = data;
368 struct auxtrace_buffer *buffer = ptq->buffer;
369 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
370 struct auxtrace_queue *queue;
371 int err;
372
373 if (ptq->stop) {
374 b->len = 0;
375 return 0;
376 }
377
378 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
379
380 buffer = auxtrace_buffer__next(queue, buffer);
381 if (!buffer) {
382 if (old_buffer)
383 auxtrace_buffer__drop_data(old_buffer);
384 b->len = 0;
385 return 0;
386 }
387
388 ptq->buffer = buffer;
389
390 err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
391 if (err)
392 return err;
393
Adrian Hunter90e457f2015-07-17 19:33:41 +0300394 if (ptq->step_through_buffers)
395 ptq->stop = true;
396
Adrian Hunter9c665062018-03-07 16:02:27 +0200397 if (b->len) {
398 if (old_buffer)
399 auxtrace_buffer__drop_data(old_buffer);
400 ptq->old_buffer = buffer;
401 } else {
402 auxtrace_buffer__drop_data(buffer);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300403 return intel_pt_get_trace(b, data);
Adrian Hunter9c665062018-03-07 16:02:27 +0200404 }
Adrian Hunter90e457f2015-07-17 19:33:41 +0300405
406 return 0;
407}
408
409struct intel_pt_cache_entry {
410 struct auxtrace_cache_entry entry;
411 u64 insn_cnt;
412 u64 byte_cnt;
413 enum intel_pt_insn_op op;
414 enum intel_pt_insn_branch branch;
415 int length;
416 int32_t rel;
Andi Kleenfaaa8762016-10-07 16:42:26 +0300417 char insn[INTEL_PT_INSN_BUF_SZ];
Adrian Hunter90e457f2015-07-17 19:33:41 +0300418};
419
420static int intel_pt_config_div(const char *var, const char *value, void *data)
421{
422 int *d = data;
423 long val;
424
425 if (!strcmp(var, "intel-pt.cache-divisor")) {
426 val = strtol(value, NULL, 0);
427 if (val > 0 && val <= INT_MAX)
428 *d = val;
429 }
430
431 return 0;
432}
433
434static int intel_pt_cache_divisor(void)
435{
436 static int d;
437
438 if (d)
439 return d;
440
441 perf_config(intel_pt_config_div, &d);
442
443 if (!d)
444 d = 64;
445
446 return d;
447}
448
449static unsigned int intel_pt_cache_size(struct dso *dso,
450 struct machine *machine)
451{
452 off_t size;
453
454 size = dso__data_size(dso, machine);
455 size /= intel_pt_cache_divisor();
456 if (size < 1000)
457 return 10;
458 if (size > (1 << 21))
459 return 21;
460 return 32 - __builtin_clz(size);
461}
462
463static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
464 struct machine *machine)
465{
466 struct auxtrace_cache *c;
467 unsigned int bits;
468
469 if (dso->auxtrace_cache)
470 return dso->auxtrace_cache;
471
472 bits = intel_pt_cache_size(dso, machine);
473
474 /* Ignoring cache creation failure */
475 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
476
477 dso->auxtrace_cache = c;
478
479 return c;
480}
481
482static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
483 u64 offset, u64 insn_cnt, u64 byte_cnt,
484 struct intel_pt_insn *intel_pt_insn)
485{
486 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
487 struct intel_pt_cache_entry *e;
488 int err;
489
490 if (!c)
491 return -ENOMEM;
492
493 e = auxtrace_cache__alloc_entry(c);
494 if (!e)
495 return -ENOMEM;
496
497 e->insn_cnt = insn_cnt;
498 e->byte_cnt = byte_cnt;
499 e->op = intel_pt_insn->op;
500 e->branch = intel_pt_insn->branch;
501 e->length = intel_pt_insn->length;
502 e->rel = intel_pt_insn->rel;
Andi Kleenfaaa8762016-10-07 16:42:26 +0300503 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300504
505 err = auxtrace_cache__add(c, offset, &e->entry);
506 if (err)
507 auxtrace_cache__free_entry(c, e);
508
509 return err;
510}
511
512static struct intel_pt_cache_entry *
513intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
514{
515 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
516
517 if (!c)
518 return NULL;
519
520 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
521}
522
Adrian Hunterb22f90a2020-05-12 15:19:20 +0300523static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
524 u64 offset)
525{
526 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
527
528 if (!c)
529 return;
530
531 auxtrace_cache__remove(dso->auxtrace_cache, offset);
532}
533
Adrian Hunter5d4f0ed2018-10-31 11:10:43 +0200534static inline u8 intel_pt_cpumode(struct intel_pt *pt, uint64_t ip)
535{
536 return ip >= pt->kernel_start ?
537 PERF_RECORD_MISC_KERNEL :
538 PERF_RECORD_MISC_USER;
539}
540
Adrian Hunter90e457f2015-07-17 19:33:41 +0300541static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
542 uint64_t *insn_cnt_ptr, uint64_t *ip,
543 uint64_t to_ip, uint64_t max_insn_cnt,
544 void *data)
545{
546 struct intel_pt_queue *ptq = data;
547 struct machine *machine = ptq->pt->machine;
548 struct thread *thread;
549 struct addr_location al;
Adrian Hunter32f98aa2016-10-07 16:42:25 +0300550 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
Adrian Hunter90e457f2015-07-17 19:33:41 +0300551 ssize_t len;
552 int x86_64;
553 u8 cpumode;
554 u64 offset, start_offset, start_ip;
555 u64 insn_cnt = 0;
556 bool one_map = true;
557
Andi Kleenfaaa8762016-10-07 16:42:26 +0300558 intel_pt_insn->length = 0;
559
Adrian Hunter90e457f2015-07-17 19:33:41 +0300560 if (to_ip && *ip == to_ip)
561 goto out_no_cache;
562
Adrian Hunter5d4f0ed2018-10-31 11:10:43 +0200563 cpumode = intel_pt_cpumode(ptq->pt, *ip);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300564
565 thread = ptq->thread;
566 if (!thread) {
567 if (cpumode != PERF_RECORD_MISC_KERNEL)
568 return -EINVAL;
569 thread = ptq->pt->unknown_thread;
570 }
571
572 while (1) {
Arnaldo Carvalho de Melo71a84b52018-04-24 11:58:56 -0300573 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
Adrian Hunter90e457f2015-07-17 19:33:41 +0300574 return -EINVAL;
575
576 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
577 dso__data_status_seen(al.map->dso,
578 DSO_DATA_STATUS_SEEN_ITRACE))
579 return -ENOENT;
580
581 offset = al.map->map_ip(al.map, *ip);
582
583 if (!to_ip && one_map) {
584 struct intel_pt_cache_entry *e;
585
586 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
587 if (e &&
588 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
589 *insn_cnt_ptr = e->insn_cnt;
590 *ip += e->byte_cnt;
591 intel_pt_insn->op = e->op;
592 intel_pt_insn->branch = e->branch;
593 intel_pt_insn->length = e->length;
594 intel_pt_insn->rel = e->rel;
Andi Kleenfaaa8762016-10-07 16:42:26 +0300595 memcpy(intel_pt_insn->buf, e->insn,
596 INTEL_PT_INSN_BUF_SZ);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300597 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
598 return 0;
599 }
600 }
601
602 start_offset = offset;
603 start_ip = *ip;
604
605 /* Load maps to ensure dso->is_64_bit has been updated */
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -0300606 map__load(al.map);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300607
608 x86_64 = al.map->dso->is_64_bit;
609
610 while (1) {
611 len = dso__data_read_offset(al.map->dso, machine,
Adrian Hunter32f98aa2016-10-07 16:42:25 +0300612 offset, buf,
613 INTEL_PT_INSN_BUF_SZ);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300614 if (len <= 0)
615 return -EINVAL;
616
617 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
618 return -EINVAL;
619
620 intel_pt_log_insn(intel_pt_insn, *ip);
621
622 insn_cnt += 1;
623
624 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
625 goto out;
626
627 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
628 goto out_no_cache;
629
630 *ip += intel_pt_insn->length;
631
632 if (to_ip && *ip == to_ip)
633 goto out_no_cache;
634
635 if (*ip >= al.map->end)
636 break;
637
638 offset += intel_pt_insn->length;
639 }
640 one_map = false;
641 }
642out:
643 *insn_cnt_ptr = insn_cnt;
644
645 if (!one_map)
646 goto out_no_cache;
647
648 /*
649 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
650 * entries.
651 */
652 if (to_ip) {
653 struct intel_pt_cache_entry *e;
654
655 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
656 if (e)
657 return 0;
658 }
659
660 /* Ignore cache errors */
661 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
662 *ip - start_ip, intel_pt_insn);
663
664 return 0;
665
666out_no_cache:
667 *insn_cnt_ptr = insn_cnt;
668 return 0;
669}
670
Adrian Hunter2acee102016-09-23 17:38:48 +0300671static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
672 uint64_t offset, const char *filename)
673{
674 struct addr_filter *filt;
675 bool have_filter = false;
676 bool hit_tracestop = false;
677 bool hit_filter = false;
678
679 list_for_each_entry(filt, &pt->filts.head, list) {
680 if (filt->start)
681 have_filter = true;
682
683 if ((filename && !filt->filename) ||
684 (!filename && filt->filename) ||
685 (filename && strcmp(filename, filt->filename)))
686 continue;
687
688 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
689 continue;
690
691 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
692 ip, offset, filename ? filename : "[kernel]",
693 filt->start ? "filter" : "stop",
694 filt->addr, filt->size);
695
696 if (filt->start)
697 hit_filter = true;
698 else
699 hit_tracestop = true;
700 }
701
702 if (!hit_tracestop && !hit_filter)
703 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
704 ip, offset, filename ? filename : "[kernel]");
705
706 return hit_tracestop || (have_filter && !hit_filter);
707}
708
709static int __intel_pt_pgd_ip(uint64_t ip, void *data)
710{
711 struct intel_pt_queue *ptq = data;
712 struct thread *thread;
713 struct addr_location al;
714 u8 cpumode;
715 u64 offset;
716
717 if (ip >= ptq->pt->kernel_start)
718 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
719
720 cpumode = PERF_RECORD_MISC_USER;
721
722 thread = ptq->thread;
723 if (!thread)
724 return -EINVAL;
725
Arnaldo Carvalho de Melo71a84b52018-04-24 11:58:56 -0300726 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
Adrian Hunter2acee102016-09-23 17:38:48 +0300727 return -EINVAL;
728
729 offset = al.map->map_ip(al.map, ip);
730
731 return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
732 al.map->dso->long_name);
733}
734
735static bool intel_pt_pgd_ip(uint64_t ip, void *data)
736{
737 return __intel_pt_pgd_ip(ip, data) > 0;
738}
739
Adrian Hunter90e457f2015-07-17 19:33:41 +0300740static bool intel_pt_get_config(struct intel_pt *pt,
741 struct perf_event_attr *attr, u64 *config)
742{
743 if (attr->type == pt->pmu_type) {
744 if (config)
745 *config = attr->config;
746 return true;
747 }
748
749 return false;
750}
751
752static bool intel_pt_exclude_kernel(struct intel_pt *pt)
753{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200754 struct evsel *evsel;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300755
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300756 evlist__for_each_entry(pt->session->evlist, evsel) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200757 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
758 !evsel->core.attr.exclude_kernel)
Adrian Hunter90e457f2015-07-17 19:33:41 +0300759 return false;
760 }
761 return true;
762}
763
764static bool intel_pt_return_compression(struct intel_pt *pt)
765{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200766 struct evsel *evsel;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300767 u64 config;
768
769 if (!pt->noretcomp_bit)
770 return true;
771
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300772 evlist__for_each_entry(pt->session->evlist, evsel) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200773 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
Adrian Hunter90e457f2015-07-17 19:33:41 +0300774 (config & pt->noretcomp_bit))
775 return false;
776 }
777 return true;
778}
779
Adrian Hunter83959812017-05-26 11:17:11 +0300780static bool intel_pt_branch_enable(struct intel_pt *pt)
781{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200782 struct evsel *evsel;
Adrian Hunter83959812017-05-26 11:17:11 +0300783 u64 config;
784
785 evlist__for_each_entry(pt->session->evlist, evsel) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200786 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
Adrian Hunter83959812017-05-26 11:17:11 +0300787 (config & 1) && !(config & 0x2000))
788 return false;
789 }
790 return true;
791}
792
Adrian Hunter11fa7cb2015-07-17 19:33:54 +0300793static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
794{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200795 struct evsel *evsel;
Adrian Hunter11fa7cb2015-07-17 19:33:54 +0300796 unsigned int shift;
797 u64 config;
798
799 if (!pt->mtc_freq_bits)
800 return 0;
801
802 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
803 config >>= 1;
804
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300805 evlist__for_each_entry(pt->session->evlist, evsel) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200806 if (intel_pt_get_config(pt, &evsel->core.attr, &config))
Adrian Hunter11fa7cb2015-07-17 19:33:54 +0300807 return (config & pt->mtc_freq_bits) >> shift;
808 }
809 return 0;
810}
811
Adrian Hunter90e457f2015-07-17 19:33:41 +0300812static bool intel_pt_timeless_decoding(struct intel_pt *pt)
813{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200814 struct evsel *evsel;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300815 bool timeless_decoding = true;
816 u64 config;
817
818 if (!pt->tsc_bit || !pt->cap_user_time_zero)
819 return true;
820
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300821 evlist__for_each_entry(pt->session->evlist, evsel) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200822 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
Adrian Hunter90e457f2015-07-17 19:33:41 +0300823 return true;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200824 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
Adrian Hunter90e457f2015-07-17 19:33:41 +0300825 if (config & pt->tsc_bit)
826 timeless_decoding = false;
827 else
828 return true;
829 }
830 }
831 return timeless_decoding;
832}
833
834static bool intel_pt_tracing_kernel(struct intel_pt *pt)
835{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200836 struct evsel *evsel;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300837
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300838 evlist__for_each_entry(pt->session->evlist, evsel) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200839 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
840 !evsel->core.attr.exclude_kernel)
Adrian Hunter90e457f2015-07-17 19:33:41 +0300841 return true;
842 }
843 return false;
844}
845
846static bool intel_pt_have_tsc(struct intel_pt *pt)
847{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200848 struct evsel *evsel;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300849 bool have_tsc = false;
850 u64 config;
851
852 if (!pt->tsc_bit)
853 return false;
854
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300855 evlist__for_each_entry(pt->session->evlist, evsel) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200856 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
Adrian Hunter90e457f2015-07-17 19:33:41 +0300857 if (config & pt->tsc_bit)
858 have_tsc = true;
859 else
860 return false;
861 }
862 }
863 return have_tsc;
864}
865
Adrian Hunterdbd13432019-11-15 14:42:24 +0200866static bool intel_pt_sampling_mode(struct intel_pt *pt)
867{
868 struct evsel *evsel;
869
870 evlist__for_each_entry(pt->session->evlist, evsel) {
871 if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) &&
872 evsel->core.attr.aux_sample_size)
873 return true;
874 }
875 return false;
876}
877
Adrian Hunter90e457f2015-07-17 19:33:41 +0300878static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
879{
880 u64 quot, rem;
881
882 quot = ns / pt->tc.time_mult;
883 rem = ns % pt->tc.time_mult;
884 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
885 pt->tc.time_mult;
886}
887
Adrian Hunter2855c052020-04-01 13:16:08 +0300888static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt)
889{
890 size_t sz = sizeof(struct ip_callchain);
891
892 /* Add 1 to callchain_sz for callchain context */
893 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
894 return zalloc(sz);
895}
896
897static int intel_pt_callchain_init(struct intel_pt *pt)
898{
899 struct evsel *evsel;
900
901 evlist__for_each_entry(pt->session->evlist, evsel) {
902 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN))
903 evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN;
904 }
905
906 pt->chain = intel_pt_alloc_chain(pt);
907 if (!pt->chain)
908 return -ENOMEM;
909
910 return 0;
911}
912
913static void intel_pt_add_callchain(struct intel_pt *pt,
914 struct perf_sample *sample)
915{
916 struct thread *thread = machine__findnew_thread(pt->machine,
917 sample->pid,
918 sample->tid);
919
920 thread_stack__sample_late(thread, sample->cpu, pt->chain,
921 pt->synth_opts.callchain_sz + 1, sample->ip,
922 pt->kernel_start);
923
924 sample->callchain = pt->chain;
925}
926
Adrian Hunter961224d2020-05-16 15:35:48 +0300927static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt)
Adrian Hunterf0a0251c2020-04-29 18:07:49 +0300928{
929 size_t sz = sizeof(struct branch_stack);
930
Adrian Hunter961224d2020-05-16 15:35:48 +0300931 sz += entry_cnt * sizeof(struct branch_entry);
Adrian Hunterf0a0251c2020-04-29 18:07:49 +0300932 return zalloc(sz);
933}
934
935static int intel_pt_br_stack_init(struct intel_pt *pt)
936{
937 struct evsel *evsel;
938
939 evlist__for_each_entry(pt->session->evlist, evsel) {
940 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK))
941 evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK;
942 }
943
Adrian Hunter961224d2020-05-16 15:35:48 +0300944 pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz);
Adrian Hunterf0a0251c2020-04-29 18:07:49 +0300945 if (!pt->br_stack)
946 return -ENOMEM;
947
948 return 0;
949}
950
951static void intel_pt_add_br_stack(struct intel_pt *pt,
952 struct perf_sample *sample)
953{
954 struct thread *thread = machine__findnew_thread(pt->machine,
955 sample->pid,
956 sample->tid);
957
958 thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
959 pt->br_stack_sz, sample->ip,
960 pt->kernel_start);
961
962 sample->branch_stack = pt->br_stack;
963}
964
Adrian Hunter961224d2020-05-16 15:35:48 +0300965/* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
966#define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U)
967
Adrian Hunter90e457f2015-07-17 19:33:41 +0300968static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
969 unsigned int queue_nr)
970{
971 struct intel_pt_params params = { .get_trace = 0, };
Adrian Hunter9fb52332018-05-31 13:23:45 +0300972 struct perf_env *env = pt->machine->env;
Adrian Hunter90e457f2015-07-17 19:33:41 +0300973 struct intel_pt_queue *ptq;
974
975 ptq = zalloc(sizeof(struct intel_pt_queue));
976 if (!ptq)
977 return NULL;
978
979 if (pt->synth_opts.callchain) {
Adrian Hunter2855c052020-04-01 13:16:08 +0300980 ptq->chain = intel_pt_alloc_chain(pt);
Adrian Hunter90e457f2015-07-17 19:33:41 +0300981 if (!ptq->chain)
982 goto out_free;
983 }
984
Adrian Hunter961224d2020-05-16 15:35:48 +0300985 if (pt->synth_opts.last_branch || pt->synth_opts.other_events) {
986 unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz);
987
988 ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
Adrian Hunterf14445e2015-09-25 16:15:45 +0300989 if (!ptq->last_branch)
990 goto out_free;
Adrian Hunterf14445e2015-09-25 16:15:45 +0300991 }
992
Adrian Hunter90e457f2015-07-17 19:33:41 +0300993 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
994 if (!ptq->event_buf)
995 goto out_free;
996
997 ptq->pt = pt;
998 ptq->queue_nr = queue_nr;
999 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
1000 ptq->pid = -1;
1001 ptq->tid = -1;
1002 ptq->cpu = -1;
1003 ptq->next_tid = -1;
1004
1005 params.get_trace = intel_pt_get_trace;
1006 params.walk_insn = intel_pt_walk_next_insn;
Adrian Hunterda9000a2019-06-04 16:00:08 +03001007 params.lookahead = intel_pt_lookahead;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001008 params.data = ptq;
1009 params.return_compression = intel_pt_return_compression(pt);
Adrian Hunter83959812017-05-26 11:17:11 +03001010 params.branch_enable = intel_pt_branch_enable(pt);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001011 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
Adrian Hunter11fa7cb2015-07-17 19:33:54 +03001012 params.mtc_period = intel_pt_mtc_period(pt);
1013 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
1014 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001015
Adrian Hunter2acee102016-09-23 17:38:48 +03001016 if (pt->filts.cnt > 0)
1017 params.pgd_ip = intel_pt_pgd_ip;
1018
Adrian Hunter90e457f2015-07-17 19:33:41 +03001019 if (pt->synth_opts.instructions) {
1020 if (pt->synth_opts.period) {
1021 switch (pt->synth_opts.period_type) {
1022 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
1023 params.period_type =
1024 INTEL_PT_PERIOD_INSTRUCTIONS;
1025 params.period = pt->synth_opts.period;
1026 break;
1027 case PERF_ITRACE_PERIOD_TICKS:
1028 params.period_type = INTEL_PT_PERIOD_TICKS;
1029 params.period = pt->synth_opts.period;
1030 break;
1031 case PERF_ITRACE_PERIOD_NANOSECS:
1032 params.period_type = INTEL_PT_PERIOD_TICKS;
1033 params.period = intel_pt_ns_to_ticks(pt,
1034 pt->synth_opts.period);
1035 break;
1036 default:
1037 break;
1038 }
1039 }
1040
1041 if (!params.period) {
1042 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
Adrian Huntere1791342015-09-25 16:15:32 +03001043 params.period = 1;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001044 }
1045 }
1046
Adrian Hunter9fb52332018-05-31 13:23:45 +03001047 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
1048 params.flags |= INTEL_PT_FUP_WITH_NLIP;
1049
Adrian Hunter90e457f2015-07-17 19:33:41 +03001050 ptq->decoder = intel_pt_decoder_new(&params);
1051 if (!ptq->decoder)
1052 goto out_free;
1053
1054 return ptq;
1055
1056out_free:
1057 zfree(&ptq->event_buf);
Adrian Hunterf14445e2015-09-25 16:15:45 +03001058 zfree(&ptq->last_branch);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001059 zfree(&ptq->chain);
1060 free(ptq);
1061 return NULL;
1062}
1063
1064static void intel_pt_free_queue(void *priv)
1065{
1066 struct intel_pt_queue *ptq = priv;
1067
1068 if (!ptq)
1069 return;
1070 thread__zput(ptq->thread);
1071 intel_pt_decoder_free(ptq->decoder);
1072 zfree(&ptq->event_buf);
Adrian Hunterf14445e2015-09-25 16:15:45 +03001073 zfree(&ptq->last_branch);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001074 zfree(&ptq->chain);
1075 free(ptq);
1076}
1077
1078static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
1079 struct auxtrace_queue *queue)
1080{
1081 struct intel_pt_queue *ptq = queue->priv;
1082
1083 if (queue->tid == -1 || pt->have_sched_switch) {
1084 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
1085 thread__zput(ptq->thread);
1086 }
1087
1088 if (!ptq->thread && ptq->tid != -1)
1089 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
1090
1091 if (ptq->thread) {
1092 ptq->pid = ptq->thread->pid_;
1093 if (queue->cpu == -1)
1094 ptq->cpu = ptq->thread->cpu;
1095 }
1096}
1097
1098static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
1099{
1100 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
1101 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
1102 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
1103 if (ptq->state->to_ip)
1104 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1105 PERF_IP_FLAG_ASYNC |
1106 PERF_IP_FLAG_INTERRUPT;
1107 else
1108 ptq->flags = PERF_IP_FLAG_BRANCH |
1109 PERF_IP_FLAG_TRACE_END;
1110 ptq->insn_len = 0;
1111 } else {
1112 if (ptq->state->from_ip)
1113 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
1114 else
1115 ptq->flags = PERF_IP_FLAG_BRANCH |
1116 PERF_IP_FLAG_TRACE_BEGIN;
1117 if (ptq->state->flags & INTEL_PT_IN_TX)
1118 ptq->flags |= PERF_IP_FLAG_IN_TX;
1119 ptq->insn_len = ptq->state->insn_len;
Andi Kleenfaaa8762016-10-07 16:42:26 +03001120 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001121 }
Adrian Hunterc6b5da02018-09-20 16:00:47 +03001122
1123 if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
1124 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
1125 if (ptq->state->type & INTEL_PT_TRACE_END)
1126 ptq->flags |= PERF_IP_FLAG_TRACE_END;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001127}
1128
Adrian Hunter2c47db92019-06-04 16:00:09 +03001129static void intel_pt_setup_time_range(struct intel_pt *pt,
1130 struct intel_pt_queue *ptq)
1131{
1132 if (!pt->range_cnt)
1133 return;
1134
1135 ptq->sel_timestamp = pt->time_ranges[0].start;
1136 ptq->sel_idx = 0;
1137
1138 if (ptq->sel_timestamp) {
1139 ptq->sel_start = true;
1140 } else {
1141 ptq->sel_timestamp = pt->time_ranges[0].end;
1142 ptq->sel_start = false;
1143 }
1144}
1145
Adrian Hunter90e457f2015-07-17 19:33:41 +03001146static int intel_pt_setup_queue(struct intel_pt *pt,
1147 struct auxtrace_queue *queue,
1148 unsigned int queue_nr)
1149{
1150 struct intel_pt_queue *ptq = queue->priv;
1151
1152 if (list_empty(&queue->head))
1153 return 0;
1154
1155 if (!ptq) {
1156 ptq = intel_pt_alloc_queue(pt, queue_nr);
1157 if (!ptq)
1158 return -ENOMEM;
1159 queue->priv = ptq;
1160
1161 if (queue->cpu != -1)
1162 ptq->cpu = queue->cpu;
1163 ptq->tid = queue->tid;
1164
Adrian Hunter5fe2cf72019-06-22 12:32:45 +03001165 ptq->cbr_seen = UINT_MAX;
1166
Adrian Hunter1c071c82018-03-07 16:02:26 +02001167 if (pt->sampling_mode && !pt->snapshot_mode &&
1168 pt->timeless_decoding)
1169 ptq->step_through_buffers = true;
Adrian Hunter63d8e382018-03-07 16:02:22 +02001170
1171 ptq->sync_switch = pt->sync_switch;
Adrian Hunter2c47db92019-06-04 16:00:09 +03001172
1173 intel_pt_setup_time_range(pt, ptq);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001174 }
1175
1176 if (!ptq->on_heap &&
Adrian Hunter63d8e382018-03-07 16:02:22 +02001177 (!ptq->sync_switch ||
Adrian Hunter90e457f2015-07-17 19:33:41 +03001178 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
1179 const struct intel_pt_state *state;
1180 int ret;
1181
1182 if (pt->timeless_decoding)
1183 return 0;
1184
1185 intel_pt_log("queue %u getting timestamp\n", queue_nr);
1186 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1187 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
Adrian Hunter2c47db92019-06-04 16:00:09 +03001188
1189 if (ptq->sel_start && ptq->sel_timestamp) {
1190 ret = intel_pt_fast_forward(ptq->decoder,
1191 ptq->sel_timestamp);
1192 if (ret)
1193 return ret;
1194 }
1195
Adrian Hunter90e457f2015-07-17 19:33:41 +03001196 while (1) {
1197 state = intel_pt_decode(ptq->decoder);
1198 if (state->err) {
1199 if (state->err == INTEL_PT_ERR_NODATA) {
1200 intel_pt_log("queue %u has no timestamp\n",
1201 queue_nr);
1202 return 0;
1203 }
1204 continue;
1205 }
1206 if (state->timestamp)
1207 break;
1208 }
1209
1210 ptq->timestamp = state->timestamp;
1211 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
1212 queue_nr, ptq->timestamp);
1213 ptq->state = state;
1214 ptq->have_sample = true;
Adrian Hunter2c47db92019-06-04 16:00:09 +03001215 if (ptq->sel_start && ptq->sel_timestamp &&
1216 ptq->timestamp < ptq->sel_timestamp)
1217 ptq->have_sample = false;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001218 intel_pt_sample_flags(ptq);
1219 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1220 if (ret)
1221 return ret;
1222 ptq->on_heap = true;
1223 }
1224
1225 return 0;
1226}
1227
1228static int intel_pt_setup_queues(struct intel_pt *pt)
1229{
1230 unsigned int i;
1231 int ret;
1232
1233 for (i = 0; i < pt->queues.nr_queues; i++) {
1234 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1235 if (ret)
1236 return ret;
1237 }
1238 return 0;
1239}
1240
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001241static inline bool intel_pt_skip_event(struct intel_pt *pt)
1242{
1243 return pt->synth_opts.initial_skip &&
1244 pt->num_events++ < pt->synth_opts.initial_skip;
1245}
1246
Adrian Hunter5fe2cf72019-06-22 12:32:45 +03001247/*
1248 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
1249 * Also ensure CBR is first non-skipped event by allowing for 4 more samples
1250 * from this decoder state.
1251 */
1252static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt)
1253{
1254 return pt->synth_opts.initial_skip &&
1255 pt->num_events + 4 < pt->synth_opts.initial_skip;
1256}
1257
Adrian Hunter0dfded32019-06-10 10:27:57 +03001258static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
1259 union perf_event *event,
1260 struct perf_sample *sample)
1261{
1262 event->sample.header.type = PERF_RECORD_SAMPLE;
1263 event->sample.header.size = sizeof(struct perf_event_header);
1264
1265 sample->pid = ptq->pid;
1266 sample->tid = ptq->tid;
1267 sample->cpu = ptq->cpu;
1268 sample->insn_len = ptq->insn_len;
1269 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1270}
1271
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001272static void intel_pt_prep_b_sample(struct intel_pt *pt,
1273 struct intel_pt_queue *ptq,
1274 union perf_event *event,
1275 struct perf_sample *sample)
1276{
Adrian Hunter0dfded32019-06-10 10:27:57 +03001277 intel_pt_prep_a_sample(ptq, event, sample);
1278
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001279 if (!pt->timeless_decoding)
1280 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1281
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001282 sample->ip = ptq->state->from_ip;
Adrian Hunter5d4f0ed2018-10-31 11:10:43 +02001283 sample->cpumode = intel_pt_cpumode(pt, sample->ip);
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001284 sample->addr = ptq->state->to_ip;
1285 sample->period = 1;
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001286 sample->flags = ptq->flags;
Adrian Hunter5d4f0ed2018-10-31 11:10:43 +02001287
Adrian Hunter5d4f0ed2018-10-31 11:10:43 +02001288 event->sample.header.misc = sample->cpumode;
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001289}
1290
Adrian Hunter90e457f2015-07-17 19:33:41 +03001291static int intel_pt_inject_event(union perf_event *event,
Adrian Huntera10eb532018-01-16 15:14:50 +02001292 struct perf_sample *sample, u64 type)
Adrian Hunter90e457f2015-07-17 19:33:41 +03001293{
1294 event->header.size = perf_event__sample_event_size(sample, type, 0);
Adrian Hunter936f1f32018-01-16 15:14:52 +02001295 return perf_event__synthesize_sample(event, type, 0, sample);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001296}
1297
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001298static inline int intel_pt_opt_inject(struct intel_pt *pt,
1299 union perf_event *event,
1300 struct perf_sample *sample, u64 type)
1301{
1302 if (!pt->synth_opts.inject)
1303 return 0;
1304
Adrian Huntera10eb532018-01-16 15:14:50 +02001305 return intel_pt_inject_event(event, sample, type);
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001306}
1307
Adrian Huntercf888e02020-04-29 18:07:45 +03001308static int intel_pt_deliver_synth_event(struct intel_pt *pt,
1309 union perf_event *event,
1310 struct perf_sample *sample, u64 type)
Adrian Hunter90e457f2015-07-17 19:33:41 +03001311{
1312 int ret;
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001313
1314 ret = intel_pt_opt_inject(pt, event, sample, type);
1315 if (ret)
1316 return ret;
1317
1318 ret = perf_session__deliver_synth_event(pt->session, event, sample);
1319 if (ret)
1320 pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1321
1322 return ret;
1323}
1324
1325static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1326{
Adrian Hunter90e457f2015-07-17 19:33:41 +03001327 struct intel_pt *pt = ptq->pt;
1328 union perf_event *event = ptq->event_buf;
1329 struct perf_sample sample = { .ip = 0, };
Adrian Hunterf14445e2015-09-25 16:15:45 +03001330 struct dummy_branch_stack {
1331 u64 nr;
Kan Liang42bbabe2020-02-28 08:30:00 -08001332 u64 hw_idx;
Adrian Hunterf14445e2015-09-25 16:15:45 +03001333 struct branch_entry entries;
1334 } dummy_bs;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001335
Adrian Hunter385e3302015-09-25 16:15:44 +03001336 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1337 return 0;
1338
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001339 if (intel_pt_skip_event(pt))
Andi Kleend1706b32016-03-28 10:45:38 -07001340 return 0;
1341
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001342 intel_pt_prep_b_sample(pt, ptq, event, &sample);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001343
Adrian Hunter90e457f2015-07-17 19:33:41 +03001344 sample.id = ptq->pt->branches_id;
1345 sample.stream_id = ptq->pt->branches_id;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001346
Adrian Hunterf14445e2015-09-25 16:15:45 +03001347 /*
1348 * perf report cannot handle events without a branch stack when using
1349 * SORT_MODE__BRANCH so make a dummy one.
1350 */
1351 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1352 dummy_bs = (struct dummy_branch_stack){
1353 .nr = 1,
Kan Liang42bbabe2020-02-28 08:30:00 -08001354 .hw_idx = -1ULL,
Adrian Hunterf14445e2015-09-25 16:15:45 +03001355 .entries = {
1356 .from = sample.ip,
1357 .to = sample.addr,
1358 },
1359 };
1360 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1361 }
1362
Adrian Hunter5b1dc0f2019-05-20 14:37:13 +03001363 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1364 if (sample.cyc_cnt) {
1365 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1366 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
1367 ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
1368 }
1369
Adrian Huntercf888e02020-04-29 18:07:45 +03001370 return intel_pt_deliver_synth_event(pt, event, &sample,
1371 pt->branches_sample_type);
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001372}
1373
1374static void intel_pt_prep_sample(struct intel_pt *pt,
1375 struct intel_pt_queue *ptq,
1376 union perf_event *event,
1377 struct perf_sample *sample)
1378{
1379 intel_pt_prep_b_sample(pt, ptq, event, sample);
1380
1381 if (pt->synth_opts.callchain) {
Adrian Hunter256d92b2018-12-21 14:06:19 +02001382 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
Adrian Hunter24248302018-10-31 11:10:42 +02001383 pt->synth_opts.callchain_sz + 1,
1384 sample->ip, pt->kernel_start);
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001385 sample->callchain = ptq->chain;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001386 }
1387
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001388 if (pt->synth_opts.last_branch) {
Adrian Huntercf888e02020-04-29 18:07:45 +03001389 thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
1390 pt->br_stack_sz);
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001391 sample->branch_stack = ptq->last_branch;
1392 }
1393}
1394
Adrian Hunter90e457f2015-07-17 19:33:41 +03001395static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1396{
Adrian Hunter90e457f2015-07-17 19:33:41 +03001397 struct intel_pt *pt = ptq->pt;
1398 union perf_event *event = ptq->event_buf;
1399 struct perf_sample sample = { .ip = 0, };
1400
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001401 if (intel_pt_skip_event(pt))
Andi Kleend1706b32016-03-28 10:45:38 -07001402 return 0;
1403
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001404 intel_pt_prep_sample(pt, ptq, event, &sample);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001405
Adrian Hunter90e457f2015-07-17 19:33:41 +03001406 sample.id = ptq->pt->instructions_id;
1407 sample.stream_id = ptq->pt->instructions_id;
Adrian Hunter2a21d032015-07-17 19:33:48 +03001408 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001409
Adrian Hunter5b1dc0f2019-05-20 14:37:13 +03001410 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1411 if (sample.cyc_cnt) {
1412 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1413 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
1414 ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
1415 }
1416
Adrian Hunter2a21d032015-07-17 19:33:48 +03001417 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1418
Adrian Huntercf888e02020-04-29 18:07:45 +03001419 return intel_pt_deliver_synth_event(pt, event, &sample,
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001420 pt->instructions_sample_type);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001421}
1422
1423static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1424{
Adrian Hunter90e457f2015-07-17 19:33:41 +03001425 struct intel_pt *pt = ptq->pt;
1426 union perf_event *event = ptq->event_buf;
1427 struct perf_sample sample = { .ip = 0, };
1428
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001429 if (intel_pt_skip_event(pt))
Andi Kleend1706b32016-03-28 10:45:38 -07001430 return 0;
1431
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001432 intel_pt_prep_sample(pt, ptq, event, &sample);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001433
Adrian Hunter90e457f2015-07-17 19:33:41 +03001434 sample.id = ptq->pt->transactions_id;
1435 sample.stream_id = ptq->pt->transactions_id;
Adrian Hunter90e457f2015-07-17 19:33:41 +03001436
Adrian Huntercf888e02020-04-29 18:07:45 +03001437 return intel_pt_deliver_synth_event(pt, event, &sample,
Adrian Hunter0f3e5372017-05-26 11:17:27 +03001438 pt->transactions_sample_type);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001439}
1440
Adrian Hunter37973072017-06-30 11:36:45 +03001441static void intel_pt_prep_p_sample(struct intel_pt *pt,
1442 struct intel_pt_queue *ptq,
1443 union perf_event *event,
1444 struct perf_sample *sample)
1445{
1446 intel_pt_prep_sample(pt, ptq, event, sample);
1447
1448 /*
1449 * Zero IP is used to mean "trace start" but that is not the case for
1450 * power or PTWRITE events with no IP, so clear the flags.
1451 */
1452 if (!sample->ip)
1453 sample->flags = 0;
1454}
1455
1456static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1457{
1458 struct intel_pt *pt = ptq->pt;
1459 union perf_event *event = ptq->event_buf;
1460 struct perf_sample sample = { .ip = 0, };
1461 struct perf_synth_intel_ptwrite raw;
1462
1463 if (intel_pt_skip_event(pt))
1464 return 0;
1465
1466 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1467
1468 sample.id = ptq->pt->ptwrites_id;
1469 sample.stream_id = ptq->pt->ptwrites_id;
1470
1471 raw.flags = 0;
1472 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1473 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1474
1475 sample.raw_size = perf_synth__raw_size(raw);
1476 sample.raw_data = perf_synth__raw_data(&raw);
1477
Adrian Huntercf888e02020-04-29 18:07:45 +03001478 return intel_pt_deliver_synth_event(pt, event, &sample,
Adrian Hunter37973072017-06-30 11:36:45 +03001479 pt->ptwrites_sample_type);
1480}
1481
1482static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1483{
1484 struct intel_pt *pt = ptq->pt;
1485 union perf_event *event = ptq->event_buf;
1486 struct perf_sample sample = { .ip = 0, };
1487 struct perf_synth_intel_cbr raw;
1488 u32 flags;
1489
Adrian Hunter5fe2cf72019-06-22 12:32:45 +03001490 if (intel_pt_skip_cbr_event(pt))
Adrian Hunter37973072017-06-30 11:36:45 +03001491 return 0;
1492
Adrian Hunter5fe2cf72019-06-22 12:32:45 +03001493 ptq->cbr_seen = ptq->state->cbr;
1494
Adrian Hunter37973072017-06-30 11:36:45 +03001495 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1496
1497 sample.id = ptq->pt->cbr_id;
1498 sample.stream_id = ptq->pt->cbr_id;
1499
1500 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1501 raw.flags = cpu_to_le32(flags);
1502 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1503 raw.reserved3 = 0;
1504
1505 sample.raw_size = perf_synth__raw_size(raw);
1506 sample.raw_data = perf_synth__raw_data(&raw);
1507
Adrian Huntercf888e02020-04-29 18:07:45 +03001508 return intel_pt_deliver_synth_event(pt, event, &sample,
Adrian Hunter37973072017-06-30 11:36:45 +03001509 pt->pwr_events_sample_type);
1510}
1511
1512static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1513{
1514 struct intel_pt *pt = ptq->pt;
1515 union perf_event *event = ptq->event_buf;
1516 struct perf_sample sample = { .ip = 0, };
1517 struct perf_synth_intel_mwait raw;
1518
1519 if (intel_pt_skip_event(pt))
1520 return 0;
1521
1522 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1523
1524 sample.id = ptq->pt->mwait_id;
1525 sample.stream_id = ptq->pt->mwait_id;
1526
1527 raw.reserved = 0;
1528 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1529
1530 sample.raw_size = perf_synth__raw_size(raw);
1531 sample.raw_data = perf_synth__raw_data(&raw);
1532
Adrian Huntercf888e02020-04-29 18:07:45 +03001533 return intel_pt_deliver_synth_event(pt, event, &sample,
Adrian Hunter37973072017-06-30 11:36:45 +03001534 pt->pwr_events_sample_type);
1535}
1536
1537static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
1538{
1539 struct intel_pt *pt = ptq->pt;
1540 union perf_event *event = ptq->event_buf;
1541 struct perf_sample sample = { .ip = 0, };
1542 struct perf_synth_intel_pwre raw;
1543
1544 if (intel_pt_skip_event(pt))
1545 return 0;
1546
1547 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1548
1549 sample.id = ptq->pt->pwre_id;
1550 sample.stream_id = ptq->pt->pwre_id;
1551
1552 raw.reserved = 0;
1553 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
1554
1555 sample.raw_size = perf_synth__raw_size(raw);
1556 sample.raw_data = perf_synth__raw_data(&raw);
1557
Adrian Huntercf888e02020-04-29 18:07:45 +03001558 return intel_pt_deliver_synth_event(pt, event, &sample,
Adrian Hunter37973072017-06-30 11:36:45 +03001559 pt->pwr_events_sample_type);
1560}
1561
1562static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
1563{
1564 struct intel_pt *pt = ptq->pt;
1565 union perf_event *event = ptq->event_buf;
1566 struct perf_sample sample = { .ip = 0, };
1567 struct perf_synth_intel_exstop raw;
1568
1569 if (intel_pt_skip_event(pt))
1570 return 0;
1571
1572 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1573
1574 sample.id = ptq->pt->exstop_id;
1575 sample.stream_id = ptq->pt->exstop_id;
1576
1577 raw.flags = 0;
1578 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1579
1580 sample.raw_size = perf_synth__raw_size(raw);
1581 sample.raw_data = perf_synth__raw_data(&raw);
1582
Adrian Huntercf888e02020-04-29 18:07:45 +03001583 return intel_pt_deliver_synth_event(pt, event, &sample,
Adrian Hunter37973072017-06-30 11:36:45 +03001584 pt->pwr_events_sample_type);
1585}
1586
1587static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
1588{
1589 struct intel_pt *pt = ptq->pt;
1590 union perf_event *event = ptq->event_buf;
1591 struct perf_sample sample = { .ip = 0, };
1592 struct perf_synth_intel_pwrx raw;
1593
1594 if (intel_pt_skip_event(pt))
1595 return 0;
1596
1597 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1598
1599 sample.id = ptq->pt->pwrx_id;
1600 sample.stream_id = ptq->pt->pwrx_id;
1601
1602 raw.reserved = 0;
1603 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
1604
1605 sample.raw_size = perf_synth__raw_size(raw);
1606 sample.raw_data = perf_synth__raw_data(&raw);
1607
Adrian Huntercf888e02020-04-29 18:07:45 +03001608 return intel_pt_deliver_synth_event(pt, event, &sample,
Adrian Hunter37973072017-06-30 11:36:45 +03001609 pt->pwr_events_sample_type);
1610}
1611
Adrian Hunter9e9a6182019-06-10 10:27:59 +03001612/*
1613 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
1614 * intel_pt_add_gp_regs().
1615 */
1616static const int pebs_gp_regs[] = {
1617 [PERF_REG_X86_FLAGS] = 1,
1618 [PERF_REG_X86_IP] = 2,
1619 [PERF_REG_X86_AX] = 3,
1620 [PERF_REG_X86_CX] = 4,
1621 [PERF_REG_X86_DX] = 5,
1622 [PERF_REG_X86_BX] = 6,
1623 [PERF_REG_X86_SP] = 7,
1624 [PERF_REG_X86_BP] = 8,
1625 [PERF_REG_X86_SI] = 9,
1626 [PERF_REG_X86_DI] = 10,
1627 [PERF_REG_X86_R8] = 11,
1628 [PERF_REG_X86_R9] = 12,
1629 [PERF_REG_X86_R10] = 13,
1630 [PERF_REG_X86_R11] = 14,
1631 [PERF_REG_X86_R12] = 15,
1632 [PERF_REG_X86_R13] = 16,
1633 [PERF_REG_X86_R14] = 17,
1634 [PERF_REG_X86_R15] = 18,
1635};
1636
1637static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos,
1638 const struct intel_pt_blk_items *items,
1639 u64 regs_mask)
1640{
1641 const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS];
1642 u32 mask = items->mask[INTEL_PT_GP_REGS_POS];
1643 u32 bit;
1644 int i;
1645
1646 for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) {
1647 /* Get the PEBS gp_regs array index */
1648 int n = pebs_gp_regs[i] - 1;
1649
1650 if (n < 0)
1651 continue;
1652 /*
1653 * Add only registers that were requested (i.e. 'regs_mask') and
1654 * that were provided (i.e. 'mask'), and update the resulting
1655 * mask (i.e. 'intr_regs->mask') accordingly.
1656 */
1657 if (mask & 1 << n && regs_mask & bit) {
1658 intr_regs->mask |= bit;
1659 *pos++ = gp_regs[n];
1660 }
1661 }
1662
1663 return pos;
1664}
1665
Adrian Hunter143d34a2019-06-10 10:28:00 +03001666#ifndef PERF_REG_X86_XMM0
1667#define PERF_REG_X86_XMM0 32
1668#endif
1669
1670static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos,
1671 const struct intel_pt_blk_items *items,
1672 u64 regs_mask)
1673{
1674 u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0);
1675 const u64 *xmm = items->xmm;
1676
1677 /*
1678 * If there are any XMM registers, then there should be all of them.
1679 * Nevertheless, follow the logic to add only registers that were
1680 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
1681 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
1682 */
1683 intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0;
1684
1685 for (; mask; mask >>= 1, xmm++) {
1686 if (mask & 1)
1687 *pos++ = *xmm;
1688 }
1689}
1690
Adrian Hunteraa62afd2019-06-10 10:28:01 +03001691#define LBR_INFO_MISPRED (1ULL << 63)
1692#define LBR_INFO_IN_TX (1ULL << 62)
1693#define LBR_INFO_ABORT (1ULL << 61)
1694#define LBR_INFO_CYCLES 0xffff
1695
1696/* Refer kernel's intel_pmu_store_pebs_lbrs() */
1697static u64 intel_pt_lbr_flags(u64 info)
1698{
1699 union {
1700 struct branch_flags flags;
1701 u64 result;
Kan Liangff165622020-03-19 13:25:12 -07001702 } u;
1703
1704 u.result = 0;
1705 u.flags.mispred = !!(info & LBR_INFO_MISPRED);
1706 u.flags.predicted = !(info & LBR_INFO_MISPRED);
1707 u.flags.in_tx = !!(info & LBR_INFO_IN_TX);
1708 u.flags.abort = !!(info & LBR_INFO_ABORT);
1709 u.flags.cycles = info & LBR_INFO_CYCLES;
Adrian Hunteraa62afd2019-06-10 10:28:01 +03001710
1711 return u.result;
1712}
1713
1714static void intel_pt_add_lbrs(struct branch_stack *br_stack,
1715 const struct intel_pt_blk_items *items)
1716{
1717 u64 *to;
1718 int i;
1719
1720 br_stack->nr = 0;
1721
1722 to = &br_stack->entries[0].from;
1723
1724 for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) {
1725 u32 mask = items->mask[i];
1726 const u64 *from = items->val[i];
1727
1728 for (; mask; mask >>= 3, from += 3) {
1729 if ((mask & 7) == 7) {
1730 *to++ = from[0];
1731 *to++ = from[1];
1732 *to++ = intel_pt_lbr_flags(from[2]);
1733 br_stack->nr += 1;
1734 }
1735 }
1736 }
1737}
1738
Adrian Hunter9d0bc532019-06-10 10:27:58 +03001739static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
Adrian Huntere62ca652019-06-10 10:27:56 +03001740{
Adrian Hunter9d0bc532019-06-10 10:27:58 +03001741 const struct intel_pt_blk_items *items = &ptq->state->items;
1742 struct perf_sample sample = { .ip = 0, };
1743 union perf_event *event = ptq->event_buf;
1744 struct intel_pt *pt = ptq->pt;
Jiri Olsa32dcd022019-07-21 13:23:51 +02001745 struct evsel *evsel = pt->pebs_evsel;
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001746 u64 sample_type = evsel->core.attr.sample_type;
Jiri Olsadeaf3212019-09-02 22:12:26 +02001747 u64 id = evsel->core.id[0];
Adrian Hunter9d0bc532019-06-10 10:27:58 +03001748 u8 cpumode;
Adrian Hunter4c95ad22020-06-30 16:39:35 +03001749 u64 regs[8 * sizeof(sample.intr_regs.mask)];
Adrian Hunter9d0bc532019-06-10 10:27:58 +03001750
1751 if (intel_pt_skip_event(pt))
1752 return 0;
1753
1754 intel_pt_prep_a_sample(ptq, event, &sample);
1755
1756 sample.id = id;
1757 sample.stream_id = id;
1758
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001759 if (!evsel->core.attr.freq)
1760 sample.period = evsel->core.attr.sample_period;
Adrian Hunter9d0bc532019-06-10 10:27:58 +03001761
1762 /* No support for non-zero CS base */
1763 if (items->has_ip)
1764 sample.ip = items->ip;
1765 else if (items->has_rip)
1766 sample.ip = items->rip;
1767 else
1768 sample.ip = ptq->state->from_ip;
1769
1770 /* No support for guest mode at this time */
1771 cpumode = sample.ip < ptq->pt->kernel_start ?
1772 PERF_RECORD_MISC_USER :
1773 PERF_RECORD_MISC_KERNEL;
1774
1775 event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
1776
1777 sample.cpumode = cpumode;
1778
1779 if (sample_type & PERF_SAMPLE_TIME) {
1780 u64 timestamp = 0;
1781
1782 if (items->has_timestamp)
1783 timestamp = items->timestamp;
1784 else if (!pt->timeless_decoding)
1785 timestamp = ptq->timestamp;
1786 if (timestamp)
1787 sample.time = tsc_to_perf_time(timestamp, &pt->tc);
1788 }
1789
Adrian Huntere01f0ef2019-06-10 10:28:03 +03001790 if (sample_type & PERF_SAMPLE_CALLCHAIN &&
1791 pt->synth_opts.callchain) {
1792 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1793 pt->synth_opts.callchain_sz, sample.ip,
1794 pt->kernel_start);
1795 sample.callchain = ptq->chain;
1796 }
1797
Adrian Hunter9e9a6182019-06-10 10:27:59 +03001798 if (sample_type & PERF_SAMPLE_REGS_INTR &&
Adrian Hunter4c95ad22020-06-30 16:39:35 +03001799 (items->mask[INTEL_PT_GP_REGS_POS] ||
1800 items->mask[INTEL_PT_XMM_POS])) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001801 u64 regs_mask = evsel->core.attr.sample_regs_intr;
Adrian Hunter143d34a2019-06-10 10:28:00 +03001802 u64 *pos;
Adrian Hunter9e9a6182019-06-10 10:27:59 +03001803
1804 sample.intr_regs.abi = items->is_32_bit ?
1805 PERF_SAMPLE_REGS_ABI_32 :
1806 PERF_SAMPLE_REGS_ABI_64;
1807 sample.intr_regs.regs = regs;
1808
Adrian Hunter143d34a2019-06-10 10:28:00 +03001809 pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
1810
1811 intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
Adrian Hunter9e9a6182019-06-10 10:27:59 +03001812 }
1813
Adrian Hunteraa62afd2019-06-10 10:28:01 +03001814 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
Adrian Hunteraa62afd2019-06-10 10:28:01 +03001815 if (items->mask[INTEL_PT_LBR_0_POS] ||
1816 items->mask[INTEL_PT_LBR_1_POS] ||
1817 items->mask[INTEL_PT_LBR_2_POS]) {
Adrian Hunter961224d2020-05-16 15:35:48 +03001818 intel_pt_add_lbrs(ptq->last_branch, items);
Adrian Hunteraa62afd2019-06-10 10:28:01 +03001819 } else if (pt->synth_opts.last_branch) {
Adrian Huntercf888e02020-04-29 18:07:45 +03001820 thread_stack__br_sample(ptq->thread, ptq->cpu,
1821 ptq->last_branch,
1822 pt->br_stack_sz);
Adrian Hunteraa62afd2019-06-10 10:28:01 +03001823 } else {
Adrian Hunter961224d2020-05-16 15:35:48 +03001824 ptq->last_branch->nr = 0;
Adrian Hunteraa62afd2019-06-10 10:28:01 +03001825 }
Adrian Hunter961224d2020-05-16 15:35:48 +03001826 sample.branch_stack = ptq->last_branch;
Adrian Hunteraa62afd2019-06-10 10:28:01 +03001827 }
1828
Adrian Hunter975846e2019-06-10 10:28:02 +03001829 if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
1830 sample.addr = items->mem_access_address;
1831
1832 if (sample_type & PERF_SAMPLE_WEIGHT) {
1833 /*
1834 * Refer kernel's setup_pebs_adaptive_sample_data() and
1835 * intel_hsw_weight().
1836 */
1837 if (items->has_mem_access_latency)
1838 sample.weight = items->mem_access_latency;
1839 if (!sample.weight && items->has_tsx_aux_info) {
1840 /* Cycles last block */
1841 sample.weight = (u32)items->tsx_aux_info;
1842 }
1843 }
1844
1845 if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
1846 u64 ax = items->has_rax ? items->rax : 0;
1847 /* Refer kernel's intel_hsw_transaction() */
1848 u64 txn = (u8)(items->tsx_aux_info >> 32);
1849
1850 /* For RTM XABORTs also log the abort code from AX */
1851 if (txn & PERF_TXN_TRANSACTION && ax & 1)
1852 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
1853 sample.transaction = txn;
1854 }
1855
Adrian Huntercf888e02020-04-29 18:07:45 +03001856 return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
Adrian Huntere62ca652019-06-10 10:27:56 +03001857}
1858
Adrian Hunter90e457f2015-07-17 19:33:41 +03001859static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
Adrian Hunter16bd4322019-02-06 12:39:47 +02001860 pid_t pid, pid_t tid, u64 ip, u64 timestamp)
Adrian Hunter90e457f2015-07-17 19:33:41 +03001861{
1862 union perf_event event;
1863 char msg[MAX_AUXTRACE_ERROR_MSG];
1864 int err;
1865
1866 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
1867
1868 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
Adrian Hunter16bd4322019-02-06 12:39:47 +02001869 code, cpu, pid, tid, ip, msg, timestamp);
Adrian Hunter90e457f2015-07-17 19:33:41 +03001870
1871 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
1872 if (err)
1873 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1874 err);
1875
1876 return err;
1877}
1878
Adrian Hunter16bd4322019-02-06 12:39:47 +02001879static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
1880 const struct intel_pt_state *state)
1881{
1882 struct intel_pt *pt = ptq->pt;
1883 u64 tm = ptq->timestamp;
1884
1885 tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
1886
1887 return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
1888 ptq->tid, state->from_ip, tm);
1889}
1890
Adrian Hunter90e457f2015-07-17 19:33:41 +03001891static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
1892{
1893 struct auxtrace_queue *queue;
1894 pid_t tid = ptq->next_tid;
1895 int err;
1896
1897 if (tid == -1)
1898 return 0;
1899
1900 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
1901
1902 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
1903
1904 queue = &pt->queues.queue_array[ptq->queue_nr];
1905 intel_pt_set_pid_tid_cpu(pt, queue);
1906
1907 ptq->next_tid = -1;
1908
1909 return err;
1910}
1911
1912static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
1913{
1914 struct intel_pt *pt = ptq->pt;
1915
1916 return ip == pt->switch_ip &&
1917 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
1918 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
1919 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
1920}
1921
Adrian Hunter37973072017-06-30 11:36:45 +03001922#define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
Adrian Hunter5fe2cf72019-06-22 12:32:45 +03001923 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
Adrian Hunter37973072017-06-30 11:36:45 +03001924
Adrian Hunter90e457f2015-07-17 19:33:41 +03001925static int intel_pt_sample(struct intel_pt_queue *ptq)
1926{
1927 const struct intel_pt_state *state = ptq->state;
1928 struct intel_pt *pt = ptq->pt;
1929 int err;
1930
1931 if (!ptq->have_sample)
1932 return 0;
1933
1934 ptq->have_sample = false;
1935
Adrian Hunter5b1dc0f2019-05-20 14:37:13 +03001936 if (ptq->state->tot_cyc_cnt > ptq->ipc_cyc_cnt) {
1937 /*
1938 * Cycle count and instruction count only go together to create
1939 * a valid IPC ratio when the cycle count changes.
1940 */
1941 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
1942 ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
1943 }
1944
Adrian Huntere62ca652019-06-10 10:27:56 +03001945 /*
1946 * Do PEBS first to allow for the possibility that the PEBS timestamp
1947 * precedes the current timestamp.
1948 */
1949 if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) {
1950 err = intel_pt_synth_pebs_sample(ptq);
1951 if (err)
1952 return err;
1953 }
1954
Adrian Hunter5fe2cf72019-06-22 12:32:45 +03001955 if (pt->sample_pwr_events) {
1956 if (ptq->state->cbr != ptq->cbr_seen) {
Adrian Hunter37973072017-06-30 11:36:45 +03001957 err = intel_pt_synth_cbr_sample(ptq);
1958 if (err)
1959 return err;
1960 }
Adrian Hunter5fe2cf72019-06-22 12:32:45 +03001961 if (state->type & INTEL_PT_PWR_EVT) {
1962 if (state->type & INTEL_PT_MWAIT_OP) {
1963 err = intel_pt_synth_mwait_sample(ptq);
1964 if (err)
1965 return err;
1966 }
1967 if (state->type & INTEL_PT_PWR_ENTRY) {
1968 err = intel_pt_synth_pwre_sample(ptq);
1969 if (err)
1970 return err;
1971 }
1972 if (state->type & INTEL_PT_EX_STOP) {
1973 err = intel_pt_synth_exstop_sample(ptq);
1974 if (err)
1975 return err;
1976 }
1977 if (state->type & INTEL_PT_PWR_EXIT) {
1978 err = intel_pt_synth_pwrx_sample(ptq);
1979 if (err)
1980 return err;
1981 }
Adrian Hunter37973072017-06-30 11:36:45 +03001982 }
1983 }
1984
Adrian Hunter406a1802017-05-26 11:17:29 +03001985 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03001986 err = intel_pt_synth_instruction_sample(ptq);
1987 if (err)
1988 return err;
1989 }
1990
Adrian Hunter406a1802017-05-26 11:17:29 +03001991 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03001992 err = intel_pt_synth_transaction_sample(ptq);
1993 if (err)
1994 return err;
1995 }
1996
Adrian Hunter37973072017-06-30 11:36:45 +03001997 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
1998 err = intel_pt_synth_ptwrite_sample(ptq);
1999 if (err)
2000 return err;
2001 }
2002
Adrian Hunter90e457f2015-07-17 19:33:41 +03002003 if (!(state->type & INTEL_PT_BRANCH))
2004 return 0;
2005
Adrian Huntercf888e02020-04-29 18:07:45 +03002006 if (pt->use_thread_stack) {
2007 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
2008 state->from_ip, state->to_ip, ptq->insn_len,
2009 state->trace_nr, pt->callstack,
Adrian Hunterf0a0251c2020-04-29 18:07:49 +03002010 pt->br_stack_sz_plus,
Adrian Huntercf888e02020-04-29 18:07:45 +03002011 pt->mispred_all);
2012 } else {
Adrian Hunter256d92b2018-12-21 14:06:19 +02002013 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
Adrian Huntercf888e02020-04-29 18:07:45 +03002014 }
Adrian Hunter90e457f2015-07-17 19:33:41 +03002015
2016 if (pt->sample_branches) {
2017 err = intel_pt_synth_branch_sample(ptq);
2018 if (err)
2019 return err;
2020 }
2021
Adrian Hunter63d8e382018-03-07 16:02:22 +02002022 if (!ptq->sync_switch)
Adrian Hunter90e457f2015-07-17 19:33:41 +03002023 return 0;
2024
2025 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
2026 switch (ptq->switch_state) {
Adrian Hunterdbcb82b2018-05-31 13:23:42 +03002027 case INTEL_PT_SS_NOT_TRACING:
Adrian Hunter90e457f2015-07-17 19:33:41 +03002028 case INTEL_PT_SS_UNKNOWN:
2029 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2030 err = intel_pt_next_tid(pt, ptq);
2031 if (err)
2032 return err;
2033 ptq->switch_state = INTEL_PT_SS_TRACING;
2034 break;
2035 default:
2036 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
2037 return 1;
2038 }
2039 } else if (!state->to_ip) {
2040 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2041 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
2042 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2043 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2044 state->to_ip == pt->ptss_ip &&
2045 (ptq->flags & PERF_IP_FLAG_CALL)) {
2046 ptq->switch_state = INTEL_PT_SS_TRACING;
2047 }
2048
2049 return 0;
2050}
2051
Adrian Hunter86c27862015-08-13 12:40:57 +03002052static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
Adrian Hunter90e457f2015-07-17 19:33:41 +03002053{
Adrian Hunter86c27862015-08-13 12:40:57 +03002054 struct machine *machine = pt->machine;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002055 struct map *map;
2056 struct symbol *sym, *start;
2057 u64 ip, switch_ip = 0;
Adrian Hunter86c27862015-08-13 12:40:57 +03002058 const char *ptss;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002059
2060 if (ptss_ip)
2061 *ptss_ip = 0;
2062
Arnaldo Carvalho de Meloa5e813c2015-09-30 11:54:04 -03002063 map = machine__kernel_map(machine);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002064 if (!map)
2065 return 0;
2066
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -03002067 if (map__load(map))
Adrian Hunter90e457f2015-07-17 19:33:41 +03002068 return 0;
2069
Arnaldo Carvalho de Melo5cf88a62018-04-25 17:01:46 -03002070 start = dso__first_symbol(map->dso);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002071
2072 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2073 if (sym->binding == STB_GLOBAL &&
2074 !strcmp(sym->name, "__switch_to")) {
2075 ip = map->unmap_ip(map, sym->start);
2076 if (ip >= map->start && ip < map->end) {
2077 switch_ip = ip;
2078 break;
2079 }
2080 }
2081 }
2082
2083 if (!switch_ip || !ptss_ip)
2084 return 0;
2085
Adrian Hunter86c27862015-08-13 12:40:57 +03002086 if (pt->have_sched_switch == 1)
2087 ptss = "perf_trace_sched_switch";
2088 else
2089 ptss = "__perf_event_task_sched_out";
2090
Adrian Hunter90e457f2015-07-17 19:33:41 +03002091 for (sym = start; sym; sym = dso__next_symbol(sym)) {
Adrian Hunter86c27862015-08-13 12:40:57 +03002092 if (!strcmp(sym->name, ptss)) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03002093 ip = map->unmap_ip(map, sym->start);
2094 if (ip >= map->start && ip < map->end) {
2095 *ptss_ip = ip;
2096 break;
2097 }
2098 }
2099 }
2100
2101 return switch_ip;
2102}
2103
Adrian Hunter63d8e382018-03-07 16:02:22 +02002104static void intel_pt_enable_sync_switch(struct intel_pt *pt)
2105{
2106 unsigned int i;
2107
2108 pt->sync_switch = true;
2109
2110 for (i = 0; i < pt->queues.nr_queues; i++) {
2111 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2112 struct intel_pt_queue *ptq = queue->priv;
2113
2114 if (ptq)
2115 ptq->sync_switch = true;
2116 }
2117}
2118
Adrian Hunter2c47db92019-06-04 16:00:09 +03002119/*
2120 * To filter against time ranges, it is only necessary to look at the next start
2121 * or end time.
2122 */
2123static bool intel_pt_next_time(struct intel_pt_queue *ptq)
2124{
2125 struct intel_pt *pt = ptq->pt;
2126
2127 if (ptq->sel_start) {
2128 /* Next time is an end time */
2129 ptq->sel_start = false;
2130 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
2131 return true;
2132 } else if (ptq->sel_idx + 1 < pt->range_cnt) {
2133 /* Next time is a start time */
2134 ptq->sel_start = true;
2135 ptq->sel_idx += 1;
2136 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
2137 return true;
2138 }
2139
2140 /* No next time */
2141 return false;
2142}
2143
2144static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
2145{
2146 int err;
2147
2148 while (1) {
2149 if (ptq->sel_start) {
2150 if (ptq->timestamp >= ptq->sel_timestamp) {
2151 /* After start time, so consider next time */
2152 intel_pt_next_time(ptq);
2153 if (!ptq->sel_timestamp) {
2154 /* No end time */
2155 return 0;
2156 }
2157 /* Check against end time */
2158 continue;
2159 }
2160 /* Before start time, so fast forward */
2161 ptq->have_sample = false;
2162 if (ptq->sel_timestamp > *ff_timestamp) {
2163 if (ptq->sync_switch) {
2164 intel_pt_next_tid(ptq->pt, ptq);
2165 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2166 }
2167 *ff_timestamp = ptq->sel_timestamp;
2168 err = intel_pt_fast_forward(ptq->decoder,
2169 ptq->sel_timestamp);
2170 if (err)
2171 return err;
2172 }
2173 return 0;
2174 } else if (ptq->timestamp > ptq->sel_timestamp) {
2175 /* After end time, so consider next time */
2176 if (!intel_pt_next_time(ptq)) {
2177 /* No next time range, so stop decoding */
2178 ptq->have_sample = false;
2179 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2180 return 1;
2181 }
2182 /* Check against next start time */
2183 continue;
2184 } else {
2185 /* Before end time */
2186 return 0;
2187 }
2188 }
2189}
2190
Adrian Hunter90e457f2015-07-17 19:33:41 +03002191static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
2192{
2193 const struct intel_pt_state *state = ptq->state;
2194 struct intel_pt *pt = ptq->pt;
Adrian Hunter2c47db92019-06-04 16:00:09 +03002195 u64 ff_timestamp = 0;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002196 int err;
2197
2198 if (!pt->kernel_start) {
2199 pt->kernel_start = machine__kernel_start(pt->machine);
Adrian Hunter86c27862015-08-13 12:40:57 +03002200 if (pt->per_cpu_mmaps &&
2201 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
Adrian Hunter90e457f2015-07-17 19:33:41 +03002202 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
2203 !pt->sampling_mode) {
Adrian Hunter86c27862015-08-13 12:40:57 +03002204 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002205 if (pt->switch_ip) {
2206 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
2207 pt->switch_ip, pt->ptss_ip);
Adrian Hunter63d8e382018-03-07 16:02:22 +02002208 intel_pt_enable_sync_switch(pt);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002209 }
2210 }
2211 }
2212
2213 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
2214 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2215 while (1) {
2216 err = intel_pt_sample(ptq);
2217 if (err)
2218 return err;
2219
2220 state = intel_pt_decode(ptq->decoder);
2221 if (state->err) {
2222 if (state->err == INTEL_PT_ERR_NODATA)
2223 return 1;
Adrian Hunter63d8e382018-03-07 16:02:22 +02002224 if (ptq->sync_switch &&
Adrian Hunter90e457f2015-07-17 19:33:41 +03002225 state->from_ip >= pt->kernel_start) {
Adrian Hunter63d8e382018-03-07 16:02:22 +02002226 ptq->sync_switch = false;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002227 intel_pt_next_tid(pt, ptq);
2228 }
2229 if (pt->synth_opts.errors) {
Adrian Hunter16bd4322019-02-06 12:39:47 +02002230 err = intel_ptq_synth_error(ptq, state);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002231 if (err)
2232 return err;
2233 }
2234 continue;
2235 }
2236
2237 ptq->state = state;
2238 ptq->have_sample = true;
2239 intel_pt_sample_flags(ptq);
2240
2241 /* Use estimated TSC upon return to user space */
2242 if (pt->est_tsc &&
2243 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
2244 state->to_ip && state->to_ip < pt->kernel_start) {
2245 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2246 state->timestamp, state->est_timestamp);
2247 ptq->timestamp = state->est_timestamp;
2248 /* Use estimated TSC in unknown switch state */
Adrian Hunter63d8e382018-03-07 16:02:22 +02002249 } else if (ptq->sync_switch &&
Adrian Hunter90e457f2015-07-17 19:33:41 +03002250 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2251 intel_pt_is_switch_ip(ptq, state->to_ip) &&
2252 ptq->next_tid == -1) {
2253 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2254 state->timestamp, state->est_timestamp);
2255 ptq->timestamp = state->est_timestamp;
2256 } else if (state->timestamp > ptq->timestamp) {
2257 ptq->timestamp = state->timestamp;
2258 }
2259
Adrian Hunter2c47db92019-06-04 16:00:09 +03002260 if (ptq->sel_timestamp) {
2261 err = intel_pt_time_filter(ptq, &ff_timestamp);
2262 if (err)
2263 return err;
2264 }
2265
Adrian Hunter90e457f2015-07-17 19:33:41 +03002266 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
2267 *timestamp = ptq->timestamp;
2268 return 0;
2269 }
2270 }
2271 return 0;
2272}
2273
2274static inline int intel_pt_update_queues(struct intel_pt *pt)
2275{
2276 if (pt->queues.new_data) {
2277 pt->queues.new_data = false;
2278 return intel_pt_setup_queues(pt);
2279 }
2280 return 0;
2281}
2282
2283static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
2284{
2285 unsigned int queue_nr;
2286 u64 ts;
2287 int ret;
2288
2289 while (1) {
2290 struct auxtrace_queue *queue;
2291 struct intel_pt_queue *ptq;
2292
2293 if (!pt->heap.heap_cnt)
2294 return 0;
2295
2296 if (pt->heap.heap_array[0].ordinal >= timestamp)
2297 return 0;
2298
2299 queue_nr = pt->heap.heap_array[0].queue_nr;
2300 queue = &pt->queues.queue_array[queue_nr];
2301 ptq = queue->priv;
2302
2303 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
2304 queue_nr, pt->heap.heap_array[0].ordinal,
2305 timestamp);
2306
2307 auxtrace_heap__pop(&pt->heap);
2308
2309 if (pt->heap.heap_cnt) {
2310 ts = pt->heap.heap_array[0].ordinal + 1;
2311 if (ts > timestamp)
2312 ts = timestamp;
2313 } else {
2314 ts = timestamp;
2315 }
2316
2317 intel_pt_set_pid_tid_cpu(pt, queue);
2318
2319 ret = intel_pt_run_decoder(ptq, &ts);
2320
2321 if (ret < 0) {
2322 auxtrace_heap__add(&pt->heap, queue_nr, ts);
2323 return ret;
2324 }
2325
2326 if (!ret) {
2327 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
2328 if (ret < 0)
2329 return ret;
2330 } else {
2331 ptq->on_heap = false;
2332 }
2333 }
2334
2335 return 0;
2336}
2337
2338static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
2339 u64 time_)
2340{
2341 struct auxtrace_queues *queues = &pt->queues;
2342 unsigned int i;
2343 u64 ts = 0;
2344
2345 for (i = 0; i < queues->nr_queues; i++) {
2346 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2347 struct intel_pt_queue *ptq = queue->priv;
2348
2349 if (ptq && (tid == -1 || ptq->tid == tid)) {
2350 ptq->time = time_;
2351 intel_pt_set_pid_tid_cpu(pt, queue);
2352 intel_pt_run_decoder(ptq, &ts);
2353 }
2354 }
2355 return 0;
2356}
2357
Adrian Hunterdbd13432019-11-15 14:42:24 +02002358static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
2359 struct auxtrace_queue *queue,
2360 struct perf_sample *sample)
2361{
2362 struct machine *m = ptq->pt->machine;
2363
2364 ptq->pid = sample->pid;
2365 ptq->tid = sample->tid;
2366 ptq->cpu = queue->cpu;
2367
2368 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
2369 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2370
2371 thread__zput(ptq->thread);
2372
2373 if (ptq->tid == -1)
2374 return;
2375
2376 if (ptq->pid == -1) {
2377 ptq->thread = machine__find_thread(m, -1, ptq->tid);
2378 if (ptq->thread)
2379 ptq->pid = ptq->thread->pid_;
2380 return;
2381 }
2382
2383 ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
2384}
2385
2386static int intel_pt_process_timeless_sample(struct intel_pt *pt,
2387 struct perf_sample *sample)
2388{
2389 struct auxtrace_queue *queue;
2390 struct intel_pt_queue *ptq;
2391 u64 ts = 0;
2392
2393 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
2394 if (!queue)
2395 return -EINVAL;
2396
2397 ptq = queue->priv;
2398 if (!ptq)
2399 return 0;
2400
2401 ptq->stop = false;
2402 ptq->time = sample->time;
2403 intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
2404 intel_pt_run_decoder(ptq, &ts);
2405 return 0;
2406}
2407
Adrian Hunter90e457f2015-07-17 19:33:41 +03002408static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
2409{
2410 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
Adrian Hunter16bd4322019-02-06 12:39:47 +02002411 sample->pid, sample->tid, 0, sample->time);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002412}
2413
2414static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
2415{
2416 unsigned i, j;
2417
2418 if (cpu < 0 || !pt->queues.nr_queues)
2419 return NULL;
2420
2421 if ((unsigned)cpu >= pt->queues.nr_queues)
2422 i = pt->queues.nr_queues - 1;
2423 else
2424 i = cpu;
2425
2426 if (pt->queues.queue_array[i].cpu == cpu)
2427 return pt->queues.queue_array[i].priv;
2428
2429 for (j = 0; i > 0; j++) {
2430 if (pt->queues.queue_array[--i].cpu == cpu)
2431 return pt->queues.queue_array[i].priv;
2432 }
2433
2434 for (; j < pt->queues.nr_queues; j++) {
2435 if (pt->queues.queue_array[j].cpu == cpu)
2436 return pt->queues.queue_array[j].priv;
2437 }
2438
2439 return NULL;
2440}
2441
Adrian Hunter86c27862015-08-13 12:40:57 +03002442static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
2443 u64 timestamp)
Adrian Hunter90e457f2015-07-17 19:33:41 +03002444{
2445 struct intel_pt_queue *ptq;
Adrian Hunter86c27862015-08-13 12:40:57 +03002446 int err;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002447
2448 if (!pt->sync_switch)
Adrian Hunter86c27862015-08-13 12:40:57 +03002449 return 1;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002450
2451 ptq = intel_pt_cpu_to_ptq(pt, cpu);
Adrian Hunter63d8e382018-03-07 16:02:22 +02002452 if (!ptq || !ptq->sync_switch)
Adrian Hunter86c27862015-08-13 12:40:57 +03002453 return 1;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002454
2455 switch (ptq->switch_state) {
2456 case INTEL_PT_SS_NOT_TRACING:
Adrian Hunter90e457f2015-07-17 19:33:41 +03002457 break;
2458 case INTEL_PT_SS_UNKNOWN:
2459 case INTEL_PT_SS_TRACING:
2460 ptq->next_tid = tid;
2461 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
2462 return 0;
2463 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2464 if (!ptq->on_heap) {
Adrian Hunter86c27862015-08-13 12:40:57 +03002465 ptq->timestamp = perf_time_to_tsc(timestamp,
Adrian Hunter90e457f2015-07-17 19:33:41 +03002466 &pt->tc);
2467 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
2468 ptq->timestamp);
2469 if (err)
2470 return err;
2471 ptq->on_heap = true;
2472 }
2473 ptq->switch_state = INTEL_PT_SS_TRACING;
2474 break;
2475 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
Adrian Hunter90e457f2015-07-17 19:33:41 +03002476 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
2477 break;
2478 default:
2479 break;
2480 }
Adrian Hunter86c27862015-08-13 12:40:57 +03002481
Adrian Hunter14f1cfd2019-04-12 14:38:30 +03002482 ptq->next_tid = -1;
2483
Adrian Hunter86c27862015-08-13 12:40:57 +03002484 return 1;
2485}
2486
2487static int intel_pt_process_switch(struct intel_pt *pt,
2488 struct perf_sample *sample)
2489{
Jiri Olsa32dcd022019-07-21 13:23:51 +02002490 struct evsel *evsel;
Adrian Hunter86c27862015-08-13 12:40:57 +03002491 pid_t tid;
2492 int cpu, ret;
2493
2494 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
2495 if (evsel != pt->switch_evsel)
2496 return 0;
2497
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03002498 tid = evsel__intval(evsel, sample, "next_pid");
Adrian Hunter86c27862015-08-13 12:40:57 +03002499 cpu = sample->cpu;
2500
2501 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2502 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
2503 &pt->tc));
2504
2505 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2506 if (ret <= 0)
2507 return ret;
2508
Adrian Hunter90e457f2015-07-17 19:33:41 +03002509 return machine__set_current_tid(pt->machine, cpu, -1, tid);
2510}
2511
Adrian Hunterc7b4f15f2019-04-12 14:38:29 +03002512static int intel_pt_context_switch_in(struct intel_pt *pt,
2513 struct perf_sample *sample)
2514{
2515 pid_t pid = sample->pid;
2516 pid_t tid = sample->tid;
2517 int cpu = sample->cpu;
2518
2519 if (pt->sync_switch) {
2520 struct intel_pt_queue *ptq;
2521
2522 ptq = intel_pt_cpu_to_ptq(pt, cpu);
2523 if (ptq && ptq->sync_switch) {
2524 ptq->next_tid = -1;
2525 switch (ptq->switch_state) {
2526 case INTEL_PT_SS_NOT_TRACING:
2527 case INTEL_PT_SS_UNKNOWN:
2528 case INTEL_PT_SS_TRACING:
2529 break;
2530 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2531 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2532 ptq->switch_state = INTEL_PT_SS_TRACING;
2533 break;
2534 default:
2535 break;
2536 }
2537 }
2538 }
2539
2540 /*
2541 * If the current tid has not been updated yet, ensure it is now that
2542 * a "switch in" event has occurred.
2543 */
2544 if (machine__get_current_tid(pt->machine, cpu) == tid)
2545 return 0;
2546
2547 return machine__set_current_tid(pt->machine, cpu, pid, tid);
2548}
2549
Adrian Hunter86c27862015-08-13 12:40:57 +03002550static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
2551 struct perf_sample *sample)
2552{
2553 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2554 pid_t pid, tid;
2555 int cpu, ret;
2556
2557 cpu = sample->cpu;
2558
2559 if (pt->have_sched_switch == 3) {
2560 if (!out)
Adrian Hunterc7b4f15f2019-04-12 14:38:29 +03002561 return intel_pt_context_switch_in(pt, sample);
Adrian Hunter86c27862015-08-13 12:40:57 +03002562 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
2563 pr_err("Expecting CPU-wide context switch event\n");
2564 return -EINVAL;
2565 }
2566 pid = event->context_switch.next_prev_pid;
2567 tid = event->context_switch.next_prev_tid;
2568 } else {
2569 if (out)
2570 return 0;
2571 pid = sample->pid;
2572 tid = sample->tid;
2573 }
2574
2575 if (tid == -1) {
2576 pr_err("context_switch event has no tid\n");
2577 return -EINVAL;
2578 }
2579
2580 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2581 cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
2582 &pt->tc));
2583
2584 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2585 if (ret <= 0)
2586 return ret;
2587
2588 return machine__set_current_tid(pt->machine, cpu, pid, tid);
2589}
2590
Adrian Hunter90e457f2015-07-17 19:33:41 +03002591static int intel_pt_process_itrace_start(struct intel_pt *pt,
2592 union perf_event *event,
2593 struct perf_sample *sample)
2594{
2595 if (!pt->per_cpu_mmaps)
2596 return 0;
2597
2598 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2599 sample->cpu, event->itrace_start.pid,
2600 event->itrace_start.tid, sample->time,
2601 perf_time_to_tsc(sample->time, &pt->tc));
2602
2603 return machine__set_current_tid(pt->machine, sample->cpu,
2604 event->itrace_start.pid,
2605 event->itrace_start.tid);
2606}
2607
Adrian Hunterb22f90a2020-05-12 15:19:20 +03002608static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
2609 struct addr_location *al)
2610{
2611 if (!al->map || addr < al->map->start || addr >= al->map->end) {
2612 if (!thread__find_map(thread, cpumode, addr, al))
2613 return -1;
2614 }
2615
2616 return 0;
2617}
2618
2619/* Invalidate all instruction cache entries that overlap the text poke */
2620static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
2621{
2622 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2623 u64 addr = event->text_poke.addr + event->text_poke.new_len - 1;
2624 /* Assume text poke begins in a basic block no more than 4096 bytes */
2625 int cnt = 4096 + event->text_poke.new_len;
2626 struct thread *thread = pt->unknown_thread;
2627 struct addr_location al = { .map = NULL };
2628 struct machine *machine = pt->machine;
2629 struct intel_pt_cache_entry *e;
2630 u64 offset;
2631
2632 if (!event->text_poke.new_len)
2633 return 0;
2634
2635 for (; cnt; cnt--, addr--) {
2636 if (intel_pt_find_map(thread, cpumode, addr, &al)) {
2637 if (addr < event->text_poke.addr)
2638 return 0;
2639 continue;
2640 }
2641
2642 if (!al.map->dso || !al.map->dso->auxtrace_cache)
2643 continue;
2644
2645 offset = al.map->map_ip(al.map, addr);
2646
2647 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
2648 if (!e)
2649 continue;
2650
2651 if (addr + e->byte_cnt + e->length <= event->text_poke.addr) {
2652 /*
2653 * No overlap. Working backwards there cannot be another
2654 * basic block that overlaps the text poke if there is a
2655 * branch instruction before the text poke address.
2656 */
2657 if (e->branch != INTEL_PT_BR_NO_BRANCH)
2658 return 0;
2659 } else {
2660 intel_pt_cache_invalidate(al.map->dso, machine, offset);
2661 intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n",
2662 al.map->dso->long_name, addr);
2663 }
2664 }
2665
2666 return 0;
2667}
2668
Adrian Hunter90e457f2015-07-17 19:33:41 +03002669static int intel_pt_process_event(struct perf_session *session,
2670 union perf_event *event,
2671 struct perf_sample *sample,
2672 struct perf_tool *tool)
2673{
2674 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2675 auxtrace);
2676 u64 timestamp;
2677 int err = 0;
2678
2679 if (dump_trace)
2680 return 0;
2681
2682 if (!tool->ordered_events) {
2683 pr_err("Intel Processor Trace requires ordered events\n");
2684 return -EINVAL;
2685 }
2686
Adrian Hunter81cd60c2015-08-20 11:51:32 +03002687 if (sample->time && sample->time != (u64)-1)
Adrian Hunter90e457f2015-07-17 19:33:41 +03002688 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
2689 else
2690 timestamp = 0;
2691
2692 if (timestamp || pt->timeless_decoding) {
2693 err = intel_pt_update_queues(pt);
2694 if (err)
2695 return err;
2696 }
2697
2698 if (pt->timeless_decoding) {
Adrian Hunterdbd13432019-11-15 14:42:24 +02002699 if (pt->sampling_mode) {
2700 if (sample->aux_sample.size)
2701 err = intel_pt_process_timeless_sample(pt,
2702 sample);
2703 } else if (event->header.type == PERF_RECORD_EXIT) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03002704 err = intel_pt_process_timeless_queues(pt,
Adrian Hunter53ff6bc2015-08-18 12:07:05 +03002705 event->fork.tid,
Adrian Hunter90e457f2015-07-17 19:33:41 +03002706 sample->time);
2707 }
2708 } else if (timestamp) {
2709 err = intel_pt_process_queues(pt, timestamp);
2710 }
2711 if (err)
2712 return err;
2713
Adrian Hunter2855c052020-04-01 13:16:08 +03002714 if (event->header.type == PERF_RECORD_SAMPLE) {
2715 if (pt->synth_opts.add_callchain && !sample->callchain)
2716 intel_pt_add_callchain(pt, sample);
Adrian Hunterf0a0251c2020-04-29 18:07:49 +03002717 if (pt->synth_opts.add_last_branch && !sample->branch_stack)
2718 intel_pt_add_br_stack(pt, sample);
Adrian Hunter2855c052020-04-01 13:16:08 +03002719 }
2720
Adrian Hunter90e457f2015-07-17 19:33:41 +03002721 if (event->header.type == PERF_RECORD_AUX &&
2722 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
2723 pt->synth_opts.errors) {
2724 err = intel_pt_lost(pt, sample);
2725 if (err)
2726 return err;
2727 }
2728
2729 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
2730 err = intel_pt_process_switch(pt, sample);
2731 else if (event->header.type == PERF_RECORD_ITRACE_START)
2732 err = intel_pt_process_itrace_start(pt, event, sample);
Adrian Hunter86c27862015-08-13 12:40:57 +03002733 else if (event->header.type == PERF_RECORD_SWITCH ||
2734 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2735 err = intel_pt_context_switch(pt, event, sample);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002736
Adrian Hunterb22f90a2020-05-12 15:19:20 +03002737 if (!err && event->header.type == PERF_RECORD_TEXT_POKE)
2738 err = intel_pt_text_poke(pt, event);
2739
Adrian Hunter93f8be22018-11-05 09:35:04 +02002740 intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
2741 event->header.type, sample->cpu, sample->time, timestamp);
2742 intel_pt_log_event(event);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002743
2744 return err;
2745}
2746
2747static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
2748{
2749 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2750 auxtrace);
2751 int ret;
2752
2753 if (dump_trace)
2754 return 0;
2755
2756 if (!tool->ordered_events)
2757 return -EINVAL;
2758
2759 ret = intel_pt_update_queues(pt);
2760 if (ret < 0)
2761 return ret;
2762
2763 if (pt->timeless_decoding)
2764 return intel_pt_process_timeless_queues(pt, -1,
2765 MAX_TIMESTAMP - 1);
2766
2767 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
2768}
2769
2770static void intel_pt_free_events(struct perf_session *session)
2771{
2772 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2773 auxtrace);
2774 struct auxtrace_queues *queues = &pt->queues;
2775 unsigned int i;
2776
2777 for (i = 0; i < queues->nr_queues; i++) {
2778 intel_pt_free_queue(queues->queue_array[i].priv);
2779 queues->queue_array[i].priv = NULL;
2780 }
2781 intel_pt_log_disable();
2782 auxtrace_queues__free(queues);
2783}
2784
2785static void intel_pt_free(struct perf_session *session)
2786{
2787 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2788 auxtrace);
2789
2790 auxtrace_heap__free(&pt->heap);
2791 intel_pt_free_events(session);
2792 session->auxtrace = NULL;
Arnaldo Carvalho de Meloabd82862015-12-11 19:11:23 -03002793 thread__put(pt->unknown_thread);
Adrian Hunter2acee102016-09-23 17:38:48 +03002794 addr_filters__exit(&pt->filts);
Adrian Hunter2855c052020-04-01 13:16:08 +03002795 zfree(&pt->chain);
Adrian Hunter2b9e32c2016-09-23 17:38:46 +03002796 zfree(&pt->filter);
Adrian Hunter2c47db92019-06-04 16:00:09 +03002797 zfree(&pt->time_ranges);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002798 free(pt);
2799}
2800
Adrian Hunter6b52bb02020-04-01 13:15:59 +03002801static bool intel_pt_evsel_is_auxtrace(struct perf_session *session,
2802 struct evsel *evsel)
2803{
2804 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2805 auxtrace);
2806
2807 return evsel->core.attr.type == pt->pmu_type;
2808}
2809
Adrian Hunter90e457f2015-07-17 19:33:41 +03002810static int intel_pt_process_auxtrace_event(struct perf_session *session,
2811 union perf_event *event,
2812 struct perf_tool *tool __maybe_unused)
2813{
2814 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2815 auxtrace);
2816
Adrian Hunter90e457f2015-07-17 19:33:41 +03002817 if (!pt->data_queued) {
2818 struct auxtrace_buffer *buffer;
2819 off_t data_offset;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002820 int fd = perf_data__fd(session->data);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002821 int err;
2822
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01002823 if (perf_data__is_pipe(session->data)) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03002824 data_offset = 0;
2825 } else {
2826 data_offset = lseek(fd, 0, SEEK_CUR);
2827 if (data_offset == -1)
2828 return -errno;
2829 }
2830
2831 err = auxtrace_queues__add_event(&pt->queues, session, event,
2832 data_offset, &buffer);
2833 if (err)
2834 return err;
2835
2836 /* Dump here now we have copied a piped trace out of the pipe */
2837 if (dump_trace) {
2838 if (auxtrace_buffer__get_data(buffer, fd)) {
2839 intel_pt_dump_event(pt, buffer->data,
2840 buffer->size);
2841 auxtrace_buffer__put_data(buffer);
2842 }
2843 }
2844 }
2845
2846 return 0;
2847}
2848
Adrian Hunterdbd13432019-11-15 14:42:24 +02002849static int intel_pt_queue_data(struct perf_session *session,
2850 struct perf_sample *sample,
2851 union perf_event *event, u64 data_offset)
2852{
2853 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2854 auxtrace);
2855 u64 timestamp;
2856
2857 if (event) {
2858 return auxtrace_queues__add_event(&pt->queues, session, event,
2859 data_offset, NULL);
2860 }
2861
2862 if (sample->time && sample->time != (u64)-1)
2863 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
2864 else
2865 timestamp = 0;
2866
2867 return auxtrace_queues__add_sample(&pt->queues, session, sample,
2868 data_offset, timestamp);
2869}
2870
Adrian Hunter90e457f2015-07-17 19:33:41 +03002871struct intel_pt_synth {
2872 struct perf_tool dummy_tool;
2873 struct perf_session *session;
2874};
2875
2876static int intel_pt_event_synth(struct perf_tool *tool,
2877 union perf_event *event,
2878 struct perf_sample *sample __maybe_unused,
2879 struct machine *machine __maybe_unused)
2880{
2881 struct intel_pt_synth *intel_pt_synth =
2882 container_of(tool, struct intel_pt_synth, dummy_tool);
2883
2884 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
2885 NULL);
2886}
2887
Adrian Hunter63a22cd2017-05-26 11:17:31 +03002888static int intel_pt_synth_event(struct perf_session *session, const char *name,
Adrian Hunter90e457f2015-07-17 19:33:41 +03002889 struct perf_event_attr *attr, u64 id)
2890{
2891 struct intel_pt_synth intel_pt_synth;
Adrian Hunter63a22cd2017-05-26 11:17:31 +03002892 int err;
2893
2894 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
2895 name, id, (u64)attr->sample_type);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002896
2897 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
2898 intel_pt_synth.session = session;
2899
Adrian Hunter63a22cd2017-05-26 11:17:31 +03002900 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
2901 &id, intel_pt_event_synth);
2902 if (err)
2903 pr_err("%s: failed to synthesize '%s' event type\n",
2904 __func__, name);
2905
2906 return err;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002907}
2908
Jiri Olsa63503db2019-07-21 13:23:52 +02002909static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
Adrian Hunterbbac88ed2017-05-26 11:17:32 +03002910 const char *name)
2911{
Jiri Olsa32dcd022019-07-21 13:23:51 +02002912 struct evsel *evsel;
Adrian Hunterbbac88ed2017-05-26 11:17:32 +03002913
2914 evlist__for_each_entry(evlist, evsel) {
Jiri Olsadeaf3212019-09-02 22:12:26 +02002915 if (evsel->core.id && evsel->core.id[0] == id) {
Adrian Hunterbbac88ed2017-05-26 11:17:32 +03002916 if (evsel->name)
2917 zfree(&evsel->name);
2918 evsel->name = strdup(name);
2919 break;
2920 }
2921 }
2922}
2923
Jiri Olsa32dcd022019-07-21 13:23:51 +02002924static struct evsel *intel_pt_evsel(struct intel_pt *pt,
Jiri Olsa63503db2019-07-21 13:23:52 +02002925 struct evlist *evlist)
Adrian Hunter85a564d2017-05-26 11:17:30 +03002926{
Jiri Olsa32dcd022019-07-21 13:23:51 +02002927 struct evsel *evsel;
Adrian Hunter85a564d2017-05-26 11:17:30 +03002928
2929 evlist__for_each_entry(evlist, evsel) {
Jiri Olsae7eb9002019-09-02 22:15:47 +02002930 if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
Adrian Hunter85a564d2017-05-26 11:17:30 +03002931 return evsel;
2932 }
2933
2934 return NULL;
2935}
2936
Adrian Hunter90e457f2015-07-17 19:33:41 +03002937static int intel_pt_synth_events(struct intel_pt *pt,
2938 struct perf_session *session)
2939{
Jiri Olsa63503db2019-07-21 13:23:52 +02002940 struct evlist *evlist = session->evlist;
Jiri Olsa32dcd022019-07-21 13:23:51 +02002941 struct evsel *evsel = intel_pt_evsel(pt, evlist);
Adrian Hunter90e457f2015-07-17 19:33:41 +03002942 struct perf_event_attr attr;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002943 u64 id;
2944 int err;
2945
Adrian Hunter85a564d2017-05-26 11:17:30 +03002946 if (!evsel) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03002947 pr_debug("There are no selected events with Intel Processor Trace data\n");
2948 return 0;
2949 }
2950
2951 memset(&attr, 0, sizeof(struct perf_event_attr));
2952 attr.size = sizeof(struct perf_event_attr);
2953 attr.type = PERF_TYPE_HARDWARE;
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002954 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002955 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
2956 PERF_SAMPLE_PERIOD;
2957 if (pt->timeless_decoding)
2958 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
2959 else
2960 attr.sample_type |= PERF_SAMPLE_TIME;
2961 if (!pt->per_cpu_mmaps)
2962 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002963 attr.exclude_user = evsel->core.attr.exclude_user;
2964 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
2965 attr.exclude_hv = evsel->core.attr.exclude_hv;
2966 attr.exclude_host = evsel->core.attr.exclude_host;
2967 attr.exclude_guest = evsel->core.attr.exclude_guest;
2968 attr.sample_id_all = evsel->core.attr.sample_id_all;
2969 attr.read_format = evsel->core.attr.read_format;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002970
Jiri Olsadeaf3212019-09-02 22:12:26 +02002971 id = evsel->core.id[0] + 1000000000;
Adrian Hunter90e457f2015-07-17 19:33:41 +03002972 if (!id)
2973 id = 1;
2974
Adrian Hunter4a9fd4e2017-05-26 11:17:33 +03002975 if (pt->synth_opts.branches) {
2976 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
2977 attr.sample_period = 1;
2978 attr.sample_type |= PERF_SAMPLE_ADDR;
2979 err = intel_pt_synth_event(session, "branches", &attr, id);
2980 if (err)
2981 return err;
2982 pt->sample_branches = true;
2983 pt->branches_sample_type = attr.sample_type;
2984 pt->branches_id = id;
2985 id += 1;
2986 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
2987 }
2988
2989 if (pt->synth_opts.callchain)
2990 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
2991 if (pt->synth_opts.last_branch)
2992 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
2993
Adrian Hunter90e457f2015-07-17 19:33:41 +03002994 if (pt->synth_opts.instructions) {
2995 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2996 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
2997 attr.sample_period =
2998 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
2999 else
3000 attr.sample_period = pt->synth_opts.period;
Adrian Hunter63a22cd2017-05-26 11:17:31 +03003001 err = intel_pt_synth_event(session, "instructions", &attr, id);
3002 if (err)
Adrian Hunter90e457f2015-07-17 19:33:41 +03003003 return err;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003004 pt->sample_instructions = true;
3005 pt->instructions_sample_type = attr.sample_type;
3006 pt->instructions_id = id;
3007 id += 1;
3008 }
3009
Adrian Hunter4a9fd4e2017-05-26 11:17:33 +03003010 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
3011 attr.sample_period = 1;
3012
Adrian Hunter90e457f2015-07-17 19:33:41 +03003013 if (pt->synth_opts.transactions) {
3014 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
Adrian Hunter63a22cd2017-05-26 11:17:31 +03003015 err = intel_pt_synth_event(session, "transactions", &attr, id);
3016 if (err)
Adrian Hunter90e457f2015-07-17 19:33:41 +03003017 return err;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003018 pt->sample_transactions = true;
Adrian Hunter21160742017-05-26 11:17:18 +03003019 pt->transactions_sample_type = attr.sample_type;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003020 pt->transactions_id = id;
Adrian Hunterbbac88ed2017-05-26 11:17:32 +03003021 intel_pt_set_event_name(evlist, id, "transactions");
Adrian Hunter90e457f2015-07-17 19:33:41 +03003022 id += 1;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003023 }
3024
Adrian Hunter37973072017-06-30 11:36:45 +03003025 attr.type = PERF_TYPE_SYNTH;
3026 attr.sample_type |= PERF_SAMPLE_RAW;
3027
3028 if (pt->synth_opts.ptwrites) {
3029 attr.config = PERF_SYNTH_INTEL_PTWRITE;
3030 err = intel_pt_synth_event(session, "ptwrite", &attr, id);
3031 if (err)
3032 return err;
3033 pt->sample_ptwrites = true;
3034 pt->ptwrites_sample_type = attr.sample_type;
3035 pt->ptwrites_id = id;
3036 intel_pt_set_event_name(evlist, id, "ptwrite");
3037 id += 1;
3038 }
3039
3040 if (pt->synth_opts.pwr_events) {
3041 pt->sample_pwr_events = true;
3042 pt->pwr_events_sample_type = attr.sample_type;
3043
3044 attr.config = PERF_SYNTH_INTEL_CBR;
3045 err = intel_pt_synth_event(session, "cbr", &attr, id);
3046 if (err)
3047 return err;
3048 pt->cbr_id = id;
3049 intel_pt_set_event_name(evlist, id, "cbr");
3050 id += 1;
3051 }
3052
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003053 if (pt->synth_opts.pwr_events && (evsel->core.attr.config & 0x10)) {
Adrian Hunter37973072017-06-30 11:36:45 +03003054 attr.config = PERF_SYNTH_INTEL_MWAIT;
3055 err = intel_pt_synth_event(session, "mwait", &attr, id);
3056 if (err)
3057 return err;
3058 pt->mwait_id = id;
3059 intel_pt_set_event_name(evlist, id, "mwait");
3060 id += 1;
3061
3062 attr.config = PERF_SYNTH_INTEL_PWRE;
3063 err = intel_pt_synth_event(session, "pwre", &attr, id);
3064 if (err)
3065 return err;
3066 pt->pwre_id = id;
3067 intel_pt_set_event_name(evlist, id, "pwre");
3068 id += 1;
3069
3070 attr.config = PERF_SYNTH_INTEL_EXSTOP;
3071 err = intel_pt_synth_event(session, "exstop", &attr, id);
3072 if (err)
3073 return err;
3074 pt->exstop_id = id;
3075 intel_pt_set_event_name(evlist, id, "exstop");
3076 id += 1;
3077
3078 attr.config = PERF_SYNTH_INTEL_PWRX;
3079 err = intel_pt_synth_event(session, "pwrx", &attr, id);
3080 if (err)
3081 return err;
3082 pt->pwrx_id = id;
3083 intel_pt_set_event_name(evlist, id, "pwrx");
3084 id += 1;
3085 }
3086
Adrian Hunter90e457f2015-07-17 19:33:41 +03003087 return 0;
3088}
3089
Adrian Hunter9e64cef2019-08-06 11:46:04 +03003090static void intel_pt_setup_pebs_events(struct intel_pt *pt)
3091{
3092 struct evsel *evsel;
3093
3094 if (!pt->synth_opts.other_events)
3095 return;
3096
3097 evlist__for_each_entry(pt->session->evlist, evsel) {
Jiri Olsadeaf3212019-09-02 22:12:26 +02003098 if (evsel->core.attr.aux_output && evsel->core.id) {
Adrian Hunter9e64cef2019-08-06 11:46:04 +03003099 pt->sample_pebs = true;
3100 pt->pebs_evsel = evsel;
3101 return;
3102 }
3103 }
3104}
3105
Jiri Olsa63503db2019-07-21 13:23:52 +02003106static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist)
Adrian Hunter90e457f2015-07-17 19:33:41 +03003107{
Jiri Olsa32dcd022019-07-21 13:23:51 +02003108 struct evsel *evsel;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003109
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03003110 evlist__for_each_entry_reverse(evlist, evsel) {
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -03003111 const char *name = evsel__name(evsel);
Adrian Hunter90e457f2015-07-17 19:33:41 +03003112
3113 if (!strcmp(name, "sched:sched_switch"))
3114 return evsel;
3115 }
3116
3117 return NULL;
3118}
3119
Jiri Olsa63503db2019-07-21 13:23:52 +02003120static bool intel_pt_find_switch(struct evlist *evlist)
Adrian Hunter86c27862015-08-13 12:40:57 +03003121{
Jiri Olsa32dcd022019-07-21 13:23:51 +02003122 struct evsel *evsel;
Adrian Hunter86c27862015-08-13 12:40:57 +03003123
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03003124 evlist__for_each_entry(evlist, evsel) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02003125 if (evsel->core.attr.context_switch)
Adrian Hunter86c27862015-08-13 12:40:57 +03003126 return true;
3127 }
3128
3129 return false;
3130}
3131
Adrian Hunterba11ba62015-09-25 16:15:56 +03003132static int intel_pt_perf_config(const char *var, const char *value, void *data)
3133{
3134 struct intel_pt *pt = data;
3135
3136 if (!strcmp(var, "intel-pt.mispred-all"))
3137 pt->mispred_all = perf_config_bool(var, value);
3138
3139 return 0;
3140}
3141
Adrian Hunter2c47db92019-06-04 16:00:09 +03003142/* Find least TSC which converts to ns or later */
3143static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
3144{
3145 u64 tsc, tm;
3146
3147 tsc = perf_time_to_tsc(ns, &pt->tc);
3148
3149 while (1) {
3150 tm = tsc_to_perf_time(tsc, &pt->tc);
3151 if (tm < ns)
3152 break;
3153 tsc -= 1;
3154 }
3155
3156 while (tm < ns)
3157 tm = tsc_to_perf_time(++tsc, &pt->tc);
3158
3159 return tsc;
3160}
3161
3162/* Find greatest TSC which converts to ns or earlier */
3163static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
3164{
3165 u64 tsc, tm;
3166
3167 tsc = perf_time_to_tsc(ns, &pt->tc);
3168
3169 while (1) {
3170 tm = tsc_to_perf_time(tsc, &pt->tc);
3171 if (tm > ns)
3172 break;
3173 tsc += 1;
3174 }
3175
3176 while (tm > ns)
3177 tm = tsc_to_perf_time(--tsc, &pt->tc);
3178
3179 return tsc;
3180}
3181
3182static int intel_pt_setup_time_ranges(struct intel_pt *pt,
3183 struct itrace_synth_opts *opts)
3184{
3185 struct perf_time_interval *p = opts->ptime_range;
3186 int n = opts->range_num;
3187 int i;
3188
3189 if (!n || !p || pt->timeless_decoding)
3190 return 0;
3191
3192 pt->time_ranges = calloc(n, sizeof(struct range));
3193 if (!pt->time_ranges)
3194 return -ENOMEM;
3195
3196 pt->range_cnt = n;
3197
3198 intel_pt_log("%s: %u range(s)\n", __func__, n);
3199
3200 for (i = 0; i < n; i++) {
3201 struct range *r = &pt->time_ranges[i];
3202 u64 ts = p[i].start;
3203 u64 te = p[i].end;
3204
3205 /*
3206 * Take care to ensure the TSC range matches the perf-time range
3207 * when converted back to perf-time.
3208 */
3209 r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
3210 r->end = te ? intel_pt_tsc_end(te, pt) : 0;
3211
3212 intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
3213 i, ts, te);
3214 intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
3215 i, r->start, r->end);
3216 }
3217
3218 return 0;
3219}
3220
Adrian Hunter90e457f2015-07-17 19:33:41 +03003221static const char * const intel_pt_info_fmts[] = {
Adrian Hunter11fa7cb2015-07-17 19:33:54 +03003222 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
3223 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
3224 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
3225 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
3226 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
3227 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
3228 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
3229 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
3230 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
3231 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
3232 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
3233 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
3234 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
3235 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
Adrian Hunterfa8025c2016-09-23 17:38:42 +03003236 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
Adrian Hunter2b9e32c2016-09-23 17:38:46 +03003237 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
Adrian Hunter90e457f2015-07-17 19:33:41 +03003238};
3239
Jiri Olsa9a8dad02019-08-28 15:57:02 +02003240static void intel_pt_print_info(__u64 *arr, int start, int finish)
Adrian Hunter90e457f2015-07-17 19:33:41 +03003241{
3242 int i;
3243
3244 if (!dump_trace)
3245 return;
3246
3247 for (i = start; i <= finish; i++)
3248 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
3249}
3250
Adrian Hunter2b9e32c2016-09-23 17:38:46 +03003251static void intel_pt_print_info_str(const char *name, const char *str)
3252{
3253 if (!dump_trace)
3254 return;
3255
3256 fprintf(stdout, " %-20s%s\n", name, str ? str : "");
3257}
3258
Jiri Olsa72932372019-08-28 15:57:16 +02003259static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos)
Adrian Hunter40b746a2016-09-23 17:38:44 +03003260{
3261 return auxtrace_info->header.size >=
Jiri Olsa72932372019-08-28 15:57:16 +02003262 sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1));
Adrian Hunter40b746a2016-09-23 17:38:44 +03003263}
3264
Adrian Hunter90e457f2015-07-17 19:33:41 +03003265int intel_pt_process_auxtrace_info(union perf_event *event,
3266 struct perf_session *session)
3267{
Jiri Olsa72932372019-08-28 15:57:16 +02003268 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003269 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
3270 struct intel_pt *pt;
Adrian Hunter2b9e32c2016-09-23 17:38:46 +03003271 void *info_end;
Jiri Olsa9a8dad02019-08-28 15:57:02 +02003272 __u64 *info;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003273 int err;
3274
Jiri Olsa72932372019-08-28 15:57:16 +02003275 if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
Adrian Hunter90e457f2015-07-17 19:33:41 +03003276 min_sz)
3277 return -EINVAL;
3278
3279 pt = zalloc(sizeof(struct intel_pt));
3280 if (!pt)
3281 return -ENOMEM;
3282
Adrian Hunter2acee102016-09-23 17:38:48 +03003283 addr_filters__init(&pt->filts);
3284
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03003285 err = perf_config(intel_pt_perf_config, pt);
3286 if (err)
3287 goto err_free;
Adrian Hunterba11ba62015-09-25 16:15:56 +03003288
Adrian Hunter90e457f2015-07-17 19:33:41 +03003289 err = auxtrace_queues__init(&pt->queues);
3290 if (err)
3291 goto err_free;
3292
3293 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
3294
3295 pt->session = session;
3296 pt->machine = &session->machines.host; /* No kvm support */
3297 pt->auxtrace_type = auxtrace_info->type;
3298 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
3299 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
3300 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
3301 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
3302 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
3303 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
3304 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
3305 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
3306 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
3307 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
3308 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
3309 INTEL_PT_PER_CPU_MMAPS);
3310
Adrian Hunter40b746a2016-09-23 17:38:44 +03003311 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
Adrian Hunter11fa7cb2015-07-17 19:33:54 +03003312 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
3313 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
3314 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
3315 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
3316 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
3317 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
3318 INTEL_PT_CYC_BIT);
3319 }
3320
Adrian Hunter40b746a2016-09-23 17:38:44 +03003321 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
Adrian Hunterfa8025c2016-09-23 17:38:42 +03003322 pt->max_non_turbo_ratio =
3323 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
3324 intel_pt_print_info(&auxtrace_info->priv[0],
3325 INTEL_PT_MAX_NONTURBO_RATIO,
3326 INTEL_PT_MAX_NONTURBO_RATIO);
3327 }
3328
Adrian Hunter2b9e32c2016-09-23 17:38:46 +03003329 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
3330 info_end = (void *)info + auxtrace_info->header.size;
3331
3332 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
3333 size_t len;
3334
3335 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
3336 intel_pt_print_info(&auxtrace_info->priv[0],
3337 INTEL_PT_FILTER_STR_LEN,
3338 INTEL_PT_FILTER_STR_LEN);
3339 if (len) {
3340 const char *filter = (const char *)info;
3341
3342 len = roundup(len + 1, 8);
3343 info += len >> 3;
3344 if ((void *)info > info_end) {
3345 pr_err("%s: bad filter string length\n", __func__);
3346 err = -EINVAL;
3347 goto err_free_queues;
3348 }
3349 pt->filter = memdup(filter, len);
3350 if (!pt->filter) {
3351 err = -ENOMEM;
3352 goto err_free_queues;
3353 }
3354 if (session->header.needs_swap)
3355 mem_bswap_64(pt->filter, len);
3356 if (pt->filter[len - 1]) {
3357 pr_err("%s: filter string not null terminated\n", __func__);
3358 err = -EINVAL;
3359 goto err_free_queues;
3360 }
Adrian Hunter2acee102016-09-23 17:38:48 +03003361 err = addr_filters__parse_bare_filter(&pt->filts,
3362 filter);
3363 if (err)
3364 goto err_free_queues;
Adrian Hunter2b9e32c2016-09-23 17:38:46 +03003365 }
3366 intel_pt_print_info_str("Filter string", pt->filter);
3367 }
3368
Adrian Hunter90e457f2015-07-17 19:33:41 +03003369 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
Adrian Hunter07633382019-03-01 12:35:36 +02003370 if (pt->timeless_decoding && !pt->tc.time_mult)
3371 pt->tc.time_mult = 1;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003372 pt->have_tsc = intel_pt_have_tsc(pt);
Adrian Hunterdbd13432019-11-15 14:42:24 +02003373 pt->sampling_mode = intel_pt_sampling_mode(pt);
Adrian Hunter90e457f2015-07-17 19:33:41 +03003374 pt->est_tsc = !pt->timeless_decoding;
3375
3376 pt->unknown_thread = thread__new(999999999, 999999999);
3377 if (!pt->unknown_thread) {
3378 err = -ENOMEM;
3379 goto err_free_queues;
3380 }
Adrian Hunter3a4acda2016-02-01 03:21:04 +00003381
3382 /*
3383 * Since this thread will not be kept in any rbtree not in a
3384 * list, initialize its list node so that at thread__put() the
3385 * current thread lifetime assuption is kept and we don't segfault
3386 * at list_del_init().
3387 */
3388 INIT_LIST_HEAD(&pt->unknown_thread->node);
3389
Adrian Hunter90e457f2015-07-17 19:33:41 +03003390 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
3391 if (err)
3392 goto err_delete_thread;
Arnaldo Carvalho de Melo79b6bb72019-11-25 21:58:33 -03003393 if (thread__init_maps(pt->unknown_thread, pt->machine)) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03003394 err = -ENOMEM;
3395 goto err_delete_thread;
3396 }
3397
3398 pt->auxtrace.process_event = intel_pt_process_event;
3399 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
Adrian Hunterdbd13432019-11-15 14:42:24 +02003400 pt->auxtrace.queue_data = intel_pt_queue_data;
3401 pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003402 pt->auxtrace.flush_events = intel_pt_flush;
3403 pt->auxtrace.free_events = intel_pt_free_events;
3404 pt->auxtrace.free = intel_pt_free;
Adrian Hunter6b52bb02020-04-01 13:15:59 +03003405 pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003406 session->auxtrace = &pt->auxtrace;
3407
3408 if (dump_trace)
3409 return 0;
3410
3411 if (pt->have_sched_switch == 1) {
3412 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
3413 if (!pt->switch_evsel) {
3414 pr_err("%s: missing sched_switch event\n", __func__);
Adrian Hunter4d34e102016-09-23 17:38:43 +03003415 err = -EINVAL;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003416 goto err_delete_thread;
3417 }
Adrian Hunter86c27862015-08-13 12:40:57 +03003418 } else if (pt->have_sched_switch == 2 &&
3419 !intel_pt_find_switch(session->evlist)) {
3420 pr_err("%s: missing context_switch attribute flag\n", __func__);
Adrian Hunter4d34e102016-09-23 17:38:43 +03003421 err = -EINVAL;
Adrian Hunter86c27862015-08-13 12:40:57 +03003422 goto err_delete_thread;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003423 }
3424
Leo Yan323fd742019-07-08 22:39:36 +08003425 if (session->itrace_synth_opts->set) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03003426 pt->synth_opts = *session->itrace_synth_opts;
3427 } else {
Andi Kleen4eb06812018-09-20 11:05:37 -07003428 itrace_synth_opts__set_default(&pt->synth_opts,
3429 session->itrace_synth_opts->default_no_sample);
Adrian Hunter26f19c22019-05-20 14:37:07 +03003430 if (!session->itrace_synth_opts->default_no_sample &&
3431 !session->itrace_synth_opts->inject) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03003432 pt->synth_opts.branches = false;
3433 pt->synth_opts.callchain = true;
Adrian Hunter2855c052020-04-01 13:16:08 +03003434 pt->synth_opts.add_callchain = true;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003435 }
Leo Yan323fd742019-07-08 22:39:36 +08003436 pt->synth_opts.thread_stack =
Adrian Hunter50f736372016-06-23 16:40:57 +03003437 session->itrace_synth_opts->thread_stack;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003438 }
3439
3440 if (pt->synth_opts.log)
3441 intel_pt_log_enable();
3442
3443 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
3444 if (pt->tc.time_mult) {
3445 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
3446
Adrian Hunterfa8025c2016-09-23 17:38:42 +03003447 if (!pt->max_non_turbo_ratio)
3448 pt->max_non_turbo_ratio =
3449 (tsc_freq + 50000000) / 100000000;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003450 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
3451 intel_pt_log("Maximum non-turbo ratio %u\n",
3452 pt->max_non_turbo_ratio);
Adrian Hunter37973072017-06-30 11:36:45 +03003453 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003454 }
3455
Leo Yan323fd742019-07-08 22:39:36 +08003456 err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
3457 if (err)
3458 goto err_delete_thread;
Adrian Hunter2c47db92019-06-04 16:00:09 +03003459
Adrian Hunter90e457f2015-07-17 19:33:41 +03003460 if (pt->synth_opts.calls)
3461 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
3462 PERF_IP_FLAG_TRACE_END;
3463 if (pt->synth_opts.returns)
3464 pt->branches_filter |= PERF_IP_FLAG_RETURN |
3465 PERF_IP_FLAG_TRACE_BEGIN;
3466
Adrian Hunter2855c052020-04-01 13:16:08 +03003467 if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) &&
3468 !symbol_conf.use_callchain) {
Adrian Hunter90e457f2015-07-17 19:33:41 +03003469 symbol_conf.use_callchain = true;
3470 if (callchain_register_param(&callchain_param) < 0) {
3471 symbol_conf.use_callchain = false;
3472 pt->synth_opts.callchain = false;
Adrian Hunter2855c052020-04-01 13:16:08 +03003473 pt->synth_opts.add_callchain = false;
Adrian Hunter90e457f2015-07-17 19:33:41 +03003474 }
3475 }
3476
Adrian Hunter2855c052020-04-01 13:16:08 +03003477 if (pt->synth_opts.add_callchain) {
3478 err = intel_pt_callchain_init(pt);
3479 if (err)
3480 goto err_delete_thread;
3481 }
3482
Adrian Hunterf0a0251c2020-04-29 18:07:49 +03003483 if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) {
Adrian Huntercf888e02020-04-29 18:07:45 +03003484 pt->br_stack_sz = pt->synth_opts.last_branch_sz;
Adrian Hunterf0a0251c2020-04-29 18:07:49 +03003485 pt->br_stack_sz_plus = pt->br_stack_sz;
3486 }
3487
3488 if (pt->synth_opts.add_last_branch) {
3489 err = intel_pt_br_stack_init(pt);
3490 if (err)
3491 goto err_delete_thread;
3492 /*
3493 * Additional branch stack size to cater for tracing from the
3494 * actual sample ip to where the sample time is recorded.
3495 * Measured at about 200 branches, but generously set to 1024.
3496 * If kernel space is not being traced, then add just 1 for the
3497 * branch to kernel space.
3498 */
3499 if (intel_pt_tracing_kernel(pt))
3500 pt->br_stack_sz_plus += 1024;
3501 else
3502 pt->br_stack_sz_plus += 1;
3503 }
Adrian Huntercf888e02020-04-29 18:07:45 +03003504
Adrian Hunter1ef998f2020-04-29 18:07:44 +03003505 pt->use_thread_stack = pt->synth_opts.callchain ||
3506 pt->synth_opts.add_callchain ||
Adrian Huntercf888e02020-04-29 18:07:45 +03003507 pt->synth_opts.thread_stack ||
Adrian Hunterf0a0251c2020-04-29 18:07:49 +03003508 pt->synth_opts.last_branch ||
3509 pt->synth_opts.add_last_branch;
Adrian Huntercf888e02020-04-29 18:07:45 +03003510
3511 pt->callstack = pt->synth_opts.callchain ||
3512 pt->synth_opts.add_callchain ||
3513 pt->synth_opts.thread_stack;
Adrian Hunter1ef998f2020-04-29 18:07:44 +03003514
Adrian Hunter90e457f2015-07-17 19:33:41 +03003515 err = intel_pt_synth_events(pt, session);
3516 if (err)
3517 goto err_delete_thread;
3518
Adrian Hunter9e64cef2019-08-06 11:46:04 +03003519 intel_pt_setup_pebs_events(pt);
3520
Adrian Hunterdbd13432019-11-15 14:42:24 +02003521 if (pt->sampling_mode || list_empty(&session->auxtrace_index))
3522 err = auxtrace_queue_data(session, true, true);
3523 else
3524 err = auxtrace_queues__process_index(&pt->queues, session);
Adrian Hunter90e457f2015-07-17 19:33:41 +03003525 if (err)
3526 goto err_delete_thread;
3527
3528 if (pt->queues.populated)
3529 pt->data_queued = true;
3530
3531 if (pt->timeless_decoding)
3532 pr_debug2("Intel PT decoding without timestamps\n");
3533
3534 return 0;
3535
3536err_delete_thread:
Adrian Hunter2855c052020-04-01 13:16:08 +03003537 zfree(&pt->chain);
Arnaldo Carvalho de Meloabd82862015-12-11 19:11:23 -03003538 thread__zput(pt->unknown_thread);
Adrian Hunter90e457f2015-07-17 19:33:41 +03003539err_free_queues:
3540 intel_pt_log_disable();
3541 auxtrace_queues__free(&pt->queues);
3542 session->auxtrace = NULL;
3543err_free:
Adrian Hunter2acee102016-09-23 17:38:48 +03003544 addr_filters__exit(&pt->filts);
Adrian Hunter2b9e32c2016-09-23 17:38:46 +03003545 zfree(&pt->filter);
Adrian Hunter2c47db92019-06-04 16:00:09 +03003546 zfree(&pt->time_ranges);
Adrian Hunter90e457f2015-07-17 19:33:41 +03003547 free(pt);
3548 return err;
3549}