blob: 2eaae140def26b5ef952d814e47a1a3179d8abbb [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
David Ahern936be502011-09-06 09:12:26 -060010#include <byteswap.h>
11#include "asm/bug.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020012#include "evsel.h"
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -020013#include "evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020014#include "util.h"
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -020015#include "cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020016#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090017#include "target.h"
Jiri Olsa287e74a2012-06-28 23:18:49 +020018#include "../../../include/linux/hw_breakpoint.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020019
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -020020#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -020021#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -020022
Arnaldo Carvalho de Melobde09462012-08-01 18:53:11 -030023static int __perf_evsel__sample_size(u64 sample_type)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -030024{
25 u64 mask = sample_type & PERF_SAMPLE_MASK;
26 int size = 0;
27 int i;
28
29 for (i = 0; i < 64; i++) {
30 if (mask & (1ULL << i))
31 size++;
32 }
33
34 size *= sizeof(u64);
35
36 return size;
37}
38
Jiri Olsa4bf9ce12012-03-22 14:37:26 +010039void hists__init(struct hists *hists)
Arnaldo Carvalho de Melo0e2a5f12011-11-04 08:16:58 -020040{
41 memset(hists, 0, sizeof(*hists));
42 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
43 hists->entries_in = &hists->entries_in_array[0];
44 hists->entries_collapsed = RB_ROOT;
45 hists->entries = RB_ROOT;
46 pthread_mutex_init(&hists->lock, NULL);
47}
48
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020049void perf_evsel__init(struct perf_evsel *evsel,
50 struct perf_event_attr *attr, int idx)
51{
52 evsel->idx = idx;
53 evsel->attr = *attr;
54 INIT_LIST_HEAD(&evsel->node);
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -030055 hists__init(&evsel->hists);
Arnaldo Carvalho de Melobde09462012-08-01 18:53:11 -030056 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020057}
58
Lin Ming23a2f3a2011-01-07 11:11:09 +080059struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020060{
61 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
62
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020063 if (evsel != NULL)
64 perf_evsel__init(evsel, attr, idx);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020065
66 return evsel;
67}
68
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -030069static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
70 "cycles",
71 "instructions",
72 "cache-references",
73 "cache-misses",
74 "branches",
75 "branch-misses",
76 "bus-cycles",
77 "stalled-cycles-frontend",
78 "stalled-cycles-backend",
79 "ref-cycles",
80};
81
Arnaldo Carvalho de Melodd4f5222012-06-13 15:52:42 -030082static const char *__perf_evsel__hw_name(u64 config)
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -030083{
84 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
85 return perf_evsel__hw_names[config];
86
87 return "unknown-hardware";
88}
89
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -030090static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -030091{
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -030092 int colon = 0, r = 0;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -030093 struct perf_event_attr *attr = &evsel->attr;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -030094 bool exclude_guest_default = false;
95
96#define MOD_PRINT(context, mod) do { \
97 if (!attr->exclude_##context) { \
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -030098 if (!colon) colon = ++r; \
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -030099 r += scnprintf(bf + r, size - r, "%c", mod); \
100 } } while(0)
101
102 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
103 MOD_PRINT(kernel, 'k');
104 MOD_PRINT(user, 'u');
105 MOD_PRINT(hv, 'h');
106 exclude_guest_default = true;
107 }
108
109 if (attr->precise_ip) {
110 if (!colon)
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300111 colon = ++r;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300112 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
113 exclude_guest_default = true;
114 }
115
116 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
117 MOD_PRINT(host, 'H');
118 MOD_PRINT(guest, 'G');
119 }
120#undef MOD_PRINT
121 if (colon)
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300122 bf[colon - 1] = ':';
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300123 return r;
124}
125
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300126static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
127{
128 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
129 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
130}
131
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300132static const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
133 "cpu-clock",
134 "task-clock",
135 "page-faults",
136 "context-switches",
137 "CPU-migrations",
138 "minor-faults",
139 "major-faults",
140 "alignment-faults",
141 "emulation-faults",
142};
143
Arnaldo Carvalho de Melodd4f5222012-06-13 15:52:42 -0300144static const char *__perf_evsel__sw_name(u64 config)
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300145{
146 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
147 return perf_evsel__sw_names[config];
148 return "unknown-software";
149}
150
151static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
152{
153 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
154 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
155}
156
Jiri Olsa287e74a2012-06-28 23:18:49 +0200157static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
158{
159 int r;
160
161 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
162
163 if (type & HW_BREAKPOINT_R)
164 r += scnprintf(bf + r, size - r, "r");
165
166 if (type & HW_BREAKPOINT_W)
167 r += scnprintf(bf + r, size - r, "w");
168
169 if (type & HW_BREAKPOINT_X)
170 r += scnprintf(bf + r, size - r, "x");
171
172 return r;
173}
174
175static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
176{
177 struct perf_event_attr *attr = &evsel->attr;
178 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
179 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
180}
181
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300182const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
183 [PERF_EVSEL__MAX_ALIASES] = {
184 { "L1-dcache", "l1-d", "l1d", "L1-data", },
185 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
186 { "LLC", "L2", },
187 { "dTLB", "d-tlb", "Data-TLB", },
188 { "iTLB", "i-tlb", "Instruction-TLB", },
189 { "branch", "branches", "bpu", "btb", "bpc", },
190 { "node", },
191};
192
193const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
194 [PERF_EVSEL__MAX_ALIASES] = {
195 { "load", "loads", "read", },
196 { "store", "stores", "write", },
197 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
198};
199
200const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
201 [PERF_EVSEL__MAX_ALIASES] = {
202 { "refs", "Reference", "ops", "access", },
203 { "misses", "miss", },
204};
205
206#define C(x) PERF_COUNT_HW_CACHE_##x
207#define CACHE_READ (1 << C(OP_READ))
208#define CACHE_WRITE (1 << C(OP_WRITE))
209#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
210#define COP(x) (1 << x)
211
212/*
213 * cache operartion stat
214 * L1I : Read and prefetch only
215 * ITLB and BPU : Read-only
216 */
217static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
218 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
219 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
220 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
221 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
222 [C(ITLB)] = (CACHE_READ),
223 [C(BPU)] = (CACHE_READ),
224 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
225};
226
227bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
228{
229 if (perf_evsel__hw_cache_stat[type] & COP(op))
230 return true; /* valid */
231 else
232 return false; /* invalid */
233}
234
235int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
236 char *bf, size_t size)
237{
238 if (result) {
239 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
240 perf_evsel__hw_cache_op[op][0],
241 perf_evsel__hw_cache_result[result][0]);
242 }
243
244 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
245 perf_evsel__hw_cache_op[op][1]);
246}
247
Arnaldo Carvalho de Melodd4f5222012-06-13 15:52:42 -0300248static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300249{
250 u8 op, result, type = (config >> 0) & 0xff;
251 const char *err = "unknown-ext-hardware-cache-type";
252
253 if (type > PERF_COUNT_HW_CACHE_MAX)
254 goto out_err;
255
256 op = (config >> 8) & 0xff;
257 err = "unknown-ext-hardware-cache-op";
258 if (op > PERF_COUNT_HW_CACHE_OP_MAX)
259 goto out_err;
260
261 result = (config >> 16) & 0xff;
262 err = "unknown-ext-hardware-cache-result";
263 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
264 goto out_err;
265
266 err = "invalid-cache";
267 if (!perf_evsel__is_cache_op_valid(type, op))
268 goto out_err;
269
270 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
271out_err:
272 return scnprintf(bf, size, "%s", err);
273}
274
275static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
276{
277 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
278 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
279}
280
Arnaldo Carvalho de Melo6eef3d92012-06-13 11:53:37 -0300281static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
282{
283 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
284 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
285}
286
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300287const char *perf_evsel__name(struct perf_evsel *evsel)
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300288{
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300289 char bf[128];
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300290
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300291 if (evsel->name)
292 return evsel->name;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300293
294 switch (evsel->attr.type) {
295 case PERF_TYPE_RAW:
Arnaldo Carvalho de Melo6eef3d92012-06-13 11:53:37 -0300296 perf_evsel__raw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300297 break;
298
299 case PERF_TYPE_HARDWARE:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300300 perf_evsel__hw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300301 break;
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300302
303 case PERF_TYPE_HW_CACHE:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300304 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300305 break;
306
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300307 case PERF_TYPE_SOFTWARE:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300308 perf_evsel__sw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300309 break;
310
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300311 case PERF_TYPE_TRACEPOINT:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300312 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300313 break;
314
Jiri Olsa287e74a2012-06-28 23:18:49 +0200315 case PERF_TYPE_BREAKPOINT:
316 perf_evsel__bp_name(evsel, bf, sizeof(bf));
317 break;
318
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300319 default:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300320 scnprintf(bf, sizeof(bf), "%s", "unknown attr type");
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300321 break;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300322 }
323
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300324 evsel->name = strdup(bf);
325
326 return evsel->name ?: "unknown";
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300327}
328
Namhyung Kim5090c6a2012-03-16 17:42:20 +0900329void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
330 struct perf_evsel *first)
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200331{
332 struct perf_event_attr *attr = &evsel->attr;
333 int track = !evsel->idx; /* only the first counter needs these */
334
David Ahern5e1c81d2012-05-13 22:01:28 -0600335 attr->disabled = 1;
Arnaldo Carvalho de Melo808e1222012-02-14 14:18:57 -0200336 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200337 attr->inherit = !opts->no_inherit;
338 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
339 PERF_FORMAT_TOTAL_TIME_RUNNING |
340 PERF_FORMAT_ID;
341
342 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
343
344 /*
345 * We default some events to a 1 default interval. But keep
346 * it a weak assumption overridable by the user.
347 */
348 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
349 opts->user_interval != ULLONG_MAX)) {
350 if (opts->freq) {
351 attr->sample_type |= PERF_SAMPLE_PERIOD;
352 attr->freq = 1;
353 attr->sample_freq = opts->freq;
354 } else {
355 attr->sample_period = opts->default_interval;
356 }
357 }
358
359 if (opts->no_samples)
360 attr->sample_freq = 0;
361
362 if (opts->inherit_stat)
363 attr->inherit_stat = 1;
364
365 if (opts->sample_address) {
366 attr->sample_type |= PERF_SAMPLE_ADDR;
367 attr->mmap_data = track;
368 }
369
370 if (opts->call_graph)
371 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
372
Namhyung Kime40ee742012-05-21 10:42:07 +0900373 if (perf_target__has_cpu(&opts->target))
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200374 attr->sample_type |= PERF_SAMPLE_CPU;
375
Andrew Vagin3e76ac72011-12-20 17:32:45 +0300376 if (opts->period)
377 attr->sample_type |= PERF_SAMPLE_PERIOD;
378
Arnaldo Carvalho de Melo808e1222012-02-14 14:18:57 -0200379 if (!opts->sample_id_all_missing &&
Namhyung Kimd67356e2012-05-07 14:09:03 +0900380 (opts->sample_time || !opts->no_inherit ||
Namhyung Kimaa22dd42012-05-16 18:45:47 +0900381 perf_target__has_cpu(&opts->target)))
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200382 attr->sample_type |= PERF_SAMPLE_TIME;
383
384 if (opts->raw_samples) {
385 attr->sample_type |= PERF_SAMPLE_TIME;
386 attr->sample_type |= PERF_SAMPLE_RAW;
387 attr->sample_type |= PERF_SAMPLE_CPU;
388 }
389
390 if (opts->no_delay) {
391 attr->watermark = 0;
392 attr->wakeup_events = 1;
393 }
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100394 if (opts->branch_stack) {
395 attr->sample_type |= PERF_SAMPLE_BRANCH_STACK;
396 attr->branch_sample_type = opts->branch_stack;
397 }
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200398
399 attr->mmap = track;
400 attr->comm = track;
401
Namhyung Kimd67356e2012-05-07 14:09:03 +0900402 if (perf_target__none(&opts->target) &&
403 (!opts->group || evsel == first)) {
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200404 attr->enable_on_exec = 1;
405 }
406}
407
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200408int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
409{
David Ahern4af4c952011-05-27 09:58:34 -0600410 int cpu, thread;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200411 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
David Ahern4af4c952011-05-27 09:58:34 -0600412
413 if (evsel->fd) {
414 for (cpu = 0; cpu < ncpus; cpu++) {
415 for (thread = 0; thread < nthreads; thread++) {
416 FD(evsel, cpu, thread) = -1;
417 }
418 }
419 }
420
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200421 return evsel->fd != NULL ? 0 : -ENOMEM;
422}
423
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200424int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
425{
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300426 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
427 if (evsel->sample_id == NULL)
428 return -ENOMEM;
429
430 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
431 if (evsel->id == NULL) {
432 xyarray__delete(evsel->sample_id);
433 evsel->sample_id = NULL;
434 return -ENOMEM;
435 }
436
437 return 0;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200438}
439
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200440int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
441{
442 evsel->counts = zalloc((sizeof(*evsel->counts) +
443 (ncpus * sizeof(struct perf_counts_values))));
444 return evsel->counts != NULL ? 0 : -ENOMEM;
445}
446
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200447void perf_evsel__free_fd(struct perf_evsel *evsel)
448{
449 xyarray__delete(evsel->fd);
450 evsel->fd = NULL;
451}
452
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200453void perf_evsel__free_id(struct perf_evsel *evsel)
454{
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300455 xyarray__delete(evsel->sample_id);
456 evsel->sample_id = NULL;
457 free(evsel->id);
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200458 evsel->id = NULL;
459}
460
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200461void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
462{
463 int cpu, thread;
464
465 for (cpu = 0; cpu < ncpus; cpu++)
466 for (thread = 0; thread < nthreads; ++thread) {
467 close(FD(evsel, cpu, thread));
468 FD(evsel, cpu, thread) = -1;
469 }
470}
471
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200472void perf_evsel__exit(struct perf_evsel *evsel)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200473{
474 assert(list_empty(&evsel->node));
475 xyarray__delete(evsel->fd);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300476 xyarray__delete(evsel->sample_id);
477 free(evsel->id);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200478}
479
480void perf_evsel__delete(struct perf_evsel *evsel)
481{
482 perf_evsel__exit(evsel);
Stephane Eranian023695d2011-02-14 11:20:01 +0200483 close_cgroup(evsel->cgrp);
Stephane Eranianf0c55bc2011-02-16 15:10:01 +0200484 free(evsel->name);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200485 free(evsel);
486}
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200487
488int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
489 int cpu, int thread, bool scale)
490{
491 struct perf_counts_values count;
492 size_t nv = scale ? 3 : 1;
493
494 if (FD(evsel, cpu, thread) < 0)
495 return -EINVAL;
496
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -0200497 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
498 return -ENOMEM;
499
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200500 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
501 return -errno;
502
503 if (scale) {
504 if (count.run == 0)
505 count.val = 0;
506 else if (count.run < count.ena)
507 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
508 } else
509 count.ena = count.run = 0;
510
511 evsel->counts->cpu[cpu] = count;
512 return 0;
513}
514
515int __perf_evsel__read(struct perf_evsel *evsel,
516 int ncpus, int nthreads, bool scale)
517{
518 size_t nv = scale ? 3 : 1;
519 int cpu, thread;
520 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
521
Arnaldo Carvalho de Melo52bcd9942011-02-03 17:26:06 -0200522 aggr->val = aggr->ena = aggr->run = 0;
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200523
524 for (cpu = 0; cpu < ncpus; cpu++) {
525 for (thread = 0; thread < nthreads; thread++) {
526 if (FD(evsel, cpu, thread) < 0)
527 continue;
528
529 if (readn(FD(evsel, cpu, thread),
530 &count, nv * sizeof(u64)) < 0)
531 return -errno;
532
533 aggr->val += count.val;
534 if (scale) {
535 aggr->ena += count.ena;
536 aggr->run += count.run;
537 }
538 }
539 }
540
541 evsel->counts->scaled = 0;
542 if (scale) {
543 if (aggr->run == 0) {
544 evsel->counts->scaled = -1;
545 aggr->val = 0;
546 return 0;
547 }
548
549 if (aggr->run < aggr->ena) {
550 evsel->counts->scaled = 1;
551 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
552 }
553 } else
554 aggr->ena = aggr->run = 0;
555
556 return 0;
557}
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200558
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200559static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200560 struct thread_map *threads, bool group,
561 struct xyarray *group_fds)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200562{
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200563 int cpu, thread;
Stephane Eranian023695d2011-02-14 11:20:01 +0200564 unsigned long flags = 0;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200565 int pid = -1, err;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200566
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200567 if (evsel->fd == NULL &&
568 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200569 return -ENOMEM;
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -0200570
Stephane Eranian023695d2011-02-14 11:20:01 +0200571 if (evsel->cgrp) {
572 flags = PERF_FLAG_PID_CGROUP;
573 pid = evsel->cgrp->fd;
574 }
575
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -0200576 for (cpu = 0; cpu < cpus->nr; cpu++) {
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200577 int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
Arnaldo Carvalho de Melo9d04f172011-01-12 00:08:18 -0200578
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200579 for (thread = 0; thread < threads->nr; thread++) {
Stephane Eranian023695d2011-02-14 11:20:01 +0200580
581 if (!evsel->cgrp)
582 pid = threads->map[thread];
583
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200584 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
Stephane Eranian023695d2011-02-14 11:20:01 +0200585 pid,
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200586 cpus->map[cpu],
Stephane Eranian023695d2011-02-14 11:20:01 +0200587 group_fd, flags);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200588 if (FD(evsel, cpu, thread) < 0) {
589 err = -errno;
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200590 goto out_close;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200591 }
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200592
593 if (group && group_fd == -1)
594 group_fd = FD(evsel, cpu, thread);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200595 }
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200596 }
597
598 return 0;
599
600out_close:
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200601 do {
602 while (--thread >= 0) {
603 close(FD(evsel, cpu, thread));
604 FD(evsel, cpu, thread) = -1;
605 }
606 thread = threads->nr;
607 } while (--cpu >= 0);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200608 return err;
609}
610
611void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
612{
613 if (evsel->fd == NULL)
614 return;
615
616 perf_evsel__close_fd(evsel, ncpus, nthreads);
617 perf_evsel__free_fd(evsel);
618 evsel->fd = NULL;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200619}
620
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200621static struct {
622 struct cpu_map map;
623 int cpus[1];
624} empty_cpu_map = {
625 .map.nr = 1,
626 .cpus = { -1, },
627};
628
629static struct {
630 struct thread_map map;
631 int threads[1];
632} empty_thread_map = {
633 .map.nr = 1,
634 .threads = { -1, },
635};
636
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200637int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200638 struct thread_map *threads, bool group,
639 struct xyarray *group_fd)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200640{
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200641 if (cpus == NULL) {
642 /* Work around old compiler warnings about strict aliasing */
643 cpus = &empty_cpu_map.map;
644 }
645
646 if (threads == NULL)
647 threads = &empty_thread_map.map;
648
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200649 return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200650}
651
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200652int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200653 struct cpu_map *cpus, bool group,
654 struct xyarray *group_fd)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200655{
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200656 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
657 group_fd);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200658}
659
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200660int perf_evsel__open_per_thread(struct perf_evsel *evsel,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200661 struct thread_map *threads, bool group,
662 struct xyarray *group_fd)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200663{
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200664 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
665 group_fd);
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200666}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200667
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200668static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
Jiri Olsa37073f92012-05-30 14:23:44 +0200669 struct perf_sample *sample,
670 bool swapped)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200671{
672 const u64 *array = event->sample.array;
Jiri Olsa37073f92012-05-30 14:23:44 +0200673 union u64_swap u;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200674
675 array += ((event->header.size -
676 sizeof(event->header)) / sizeof(u64)) - 1;
677
678 if (type & PERF_SAMPLE_CPU) {
Jiri Olsa37073f92012-05-30 14:23:44 +0200679 u.val64 = *array;
680 if (swapped) {
681 /* undo swap of u64, then swap on individual u32s */
682 u.val64 = bswap_64(u.val64);
683 u.val32[0] = bswap_32(u.val32[0]);
684 }
685
686 sample->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200687 array--;
688 }
689
690 if (type & PERF_SAMPLE_STREAM_ID) {
691 sample->stream_id = *array;
692 array--;
693 }
694
695 if (type & PERF_SAMPLE_ID) {
696 sample->id = *array;
697 array--;
698 }
699
700 if (type & PERF_SAMPLE_TIME) {
701 sample->time = *array;
702 array--;
703 }
704
705 if (type & PERF_SAMPLE_TID) {
Jiri Olsa37073f92012-05-30 14:23:44 +0200706 u.val64 = *array;
707 if (swapped) {
708 /* undo swap of u64, then swap on individual u32s */
709 u.val64 = bswap_64(u.val64);
710 u.val32[0] = bswap_32(u.val32[0]);
711 u.val32[1] = bswap_32(u.val32[1]);
712 }
713
714 sample->pid = u.val32[0];
715 sample->tid = u.val32[1];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200716 }
717
718 return 0;
719}
720
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200721static bool sample_overlap(const union perf_event *event,
722 const void *offset, u64 size)
723{
724 const void *base = event;
725
726 if (offset + size > base + event->header.size)
727 return true;
728
729 return false;
730}
731
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300732int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
David Ahern936be502011-09-06 09:12:26 -0600733 struct perf_sample *data, bool swapped)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200734{
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300735 u64 type = evsel->attr.sample_type;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200736 const u64 *array;
737
David Ahern936be502011-09-06 09:12:26 -0600738 /*
739 * used for cross-endian analysis. See git commit 65014ab3
740 * for why this goofiness is needed.
741 */
Jiri Olsa6a11f922012-05-16 08:59:04 +0200742 union u64_swap u;
David Ahern936be502011-09-06 09:12:26 -0600743
Robert Richterf3bda2c2011-12-15 17:32:39 +0100744 memset(data, 0, sizeof(*data));
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200745 data->cpu = data->pid = data->tid = -1;
746 data->stream_id = data->id = data->time = -1ULL;
Naveen N. Raoa4a03fc2012-02-03 22:31:13 +0530747 data->period = 1;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200748
749 if (event->header.type != PERF_RECORD_SAMPLE) {
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300750 if (!evsel->attr.sample_id_all)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200751 return 0;
Jiri Olsa37073f92012-05-30 14:23:44 +0200752 return perf_event__parse_id_sample(event, type, data, swapped);
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200753 }
754
755 array = event->sample.array;
756
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300757 if (evsel->sample_size + sizeof(event->header) > event->header.size)
Frederic Weisbeckera2854122011-05-21 19:33:04 +0200758 return -EFAULT;
759
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200760 if (type & PERF_SAMPLE_IP) {
761 data->ip = event->ip.ip;
762 array++;
763 }
764
765 if (type & PERF_SAMPLE_TID) {
David Ahern936be502011-09-06 09:12:26 -0600766 u.val64 = *array;
767 if (swapped) {
768 /* undo swap of u64, then swap on individual u32s */
769 u.val64 = bswap_64(u.val64);
770 u.val32[0] = bswap_32(u.val32[0]);
771 u.val32[1] = bswap_32(u.val32[1]);
772 }
773
774 data->pid = u.val32[0];
775 data->tid = u.val32[1];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200776 array++;
777 }
778
779 if (type & PERF_SAMPLE_TIME) {
780 data->time = *array;
781 array++;
782 }
783
David Ahern7cec0922011-05-30 13:08:23 -0600784 data->addr = 0;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200785 if (type & PERF_SAMPLE_ADDR) {
786 data->addr = *array;
787 array++;
788 }
789
790 data->id = -1ULL;
791 if (type & PERF_SAMPLE_ID) {
792 data->id = *array;
793 array++;
794 }
795
796 if (type & PERF_SAMPLE_STREAM_ID) {
797 data->stream_id = *array;
798 array++;
799 }
800
801 if (type & PERF_SAMPLE_CPU) {
David Ahern936be502011-09-06 09:12:26 -0600802
803 u.val64 = *array;
804 if (swapped) {
805 /* undo swap of u64, then swap on individual u32s */
806 u.val64 = bswap_64(u.val64);
807 u.val32[0] = bswap_32(u.val32[0]);
808 }
809
810 data->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200811 array++;
812 }
813
814 if (type & PERF_SAMPLE_PERIOD) {
815 data->period = *array;
816 array++;
817 }
818
819 if (type & PERF_SAMPLE_READ) {
Masanari Iidaf9d36992012-01-25 15:20:40 +0100820 fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200821 return -1;
822 }
823
824 if (type & PERF_SAMPLE_CALLCHAIN) {
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200825 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
826 return -EFAULT;
827
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200828 data->callchain = (struct ip_callchain *)array;
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200829
830 if (sample_overlap(event, array, data->callchain->nr))
831 return -EFAULT;
832
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200833 array += 1 + data->callchain->nr;
834 }
835
836 if (type & PERF_SAMPLE_RAW) {
Jiri Olsa8e303f22011-09-29 17:05:08 +0200837 const u64 *pdata;
838
David Ahern936be502011-09-06 09:12:26 -0600839 u.val64 = *array;
840 if (WARN_ONCE(swapped,
841 "Endianness of raw data not corrected!\n")) {
842 /* undo swap of u64, then swap on individual u32s */
843 u.val64 = bswap_64(u.val64);
844 u.val32[0] = bswap_32(u.val32[0]);
845 u.val32[1] = bswap_32(u.val32[1]);
846 }
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200847
848 if (sample_overlap(event, array, sizeof(u32)))
849 return -EFAULT;
850
David Ahern936be502011-09-06 09:12:26 -0600851 data->raw_size = u.val32[0];
Jiri Olsa8e303f22011-09-29 17:05:08 +0200852 pdata = (void *) array + sizeof(u32);
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200853
Jiri Olsa8e303f22011-09-29 17:05:08 +0200854 if (sample_overlap(event, pdata, data->raw_size))
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200855 return -EFAULT;
856
Jiri Olsa8e303f22011-09-29 17:05:08 +0200857 data->raw_data = (void *) pdata;
Stephane Eranianfa30c962012-03-17 23:23:18 +0100858
859 array = (void *)array + data->raw_size + sizeof(u32);
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200860 }
861
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100862 if (type & PERF_SAMPLE_BRANCH_STACK) {
863 u64 sz;
864
865 data->branch_stack = (struct branch_stack *)array;
866 array++; /* nr */
867
868 sz = data->branch_stack->nr * sizeof(struct branch_entry);
869 sz /= sizeof(u64);
870 array += sz;
871 }
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200872 return 0;
873}
Andrew Vagin74eec262011-11-28 12:03:31 +0300874
875int perf_event__synthesize_sample(union perf_event *event, u64 type,
876 const struct perf_sample *sample,
877 bool swapped)
878{
879 u64 *array;
880
881 /*
882 * used for cross-endian analysis. See git commit 65014ab3
883 * for why this goofiness is needed.
884 */
Jiri Olsa6a11f922012-05-16 08:59:04 +0200885 union u64_swap u;
Andrew Vagin74eec262011-11-28 12:03:31 +0300886
887 array = event->sample.array;
888
889 if (type & PERF_SAMPLE_IP) {
890 event->ip.ip = sample->ip;
891 array++;
892 }
893
894 if (type & PERF_SAMPLE_TID) {
895 u.val32[0] = sample->pid;
896 u.val32[1] = sample->tid;
897 if (swapped) {
898 /*
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300899 * Inverse of what is done in perf_evsel__parse_sample
Andrew Vagin74eec262011-11-28 12:03:31 +0300900 */
901 u.val32[0] = bswap_32(u.val32[0]);
902 u.val32[1] = bswap_32(u.val32[1]);
903 u.val64 = bswap_64(u.val64);
904 }
905
906 *array = u.val64;
907 array++;
908 }
909
910 if (type & PERF_SAMPLE_TIME) {
911 *array = sample->time;
912 array++;
913 }
914
915 if (type & PERF_SAMPLE_ADDR) {
916 *array = sample->addr;
917 array++;
918 }
919
920 if (type & PERF_SAMPLE_ID) {
921 *array = sample->id;
922 array++;
923 }
924
925 if (type & PERF_SAMPLE_STREAM_ID) {
926 *array = sample->stream_id;
927 array++;
928 }
929
930 if (type & PERF_SAMPLE_CPU) {
931 u.val32[0] = sample->cpu;
932 if (swapped) {
933 /*
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300934 * Inverse of what is done in perf_evsel__parse_sample
Andrew Vagin74eec262011-11-28 12:03:31 +0300935 */
936 u.val32[0] = bswap_32(u.val32[0]);
937 u.val64 = bswap_64(u.val64);
938 }
939 *array = u.val64;
940 array++;
941 }
942
943 if (type & PERF_SAMPLE_PERIOD) {
944 *array = sample->period;
945 array++;
946 }
947
948 return 0;
949}