blob: 816d930d774e726200b74abc5b60e7929d9160a0 [file] [log] [blame]
Thomas Gleixner91007042019-05-29 07:12:25 -07001// SPDX-License-Identifier: GPL-2.0-only
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02002/*
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from builtin-{top,stat,record}.c, see those files for further
6 * copyright notes.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02007 */
8
David Ahern936be502011-09-06 09:12:26 -06009#include <byteswap.h>
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030010#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030011#include <inttypes.h>
Jiri Olsa0f6a3012012-08-07 15:20:45 +020012#include <linux/bitops.h>
Arnaldo Carvalho de Melo2157f6e2017-06-20 12:05:38 -030013#include <api/fs/fs.h>
Jiri Olsa4605eab2015-09-02 09:56:43 +020014#include <api/fs/tracing_path.h>
Robert Richter4e319022013-06-11 17:29:18 +020015#include <traceevent/event-parse.h>
16#include <linux/hw_breakpoint.h>
17#include <linux/perf_event.h>
Arnaldo Carvalho de Melo03536312017-06-16 12:18:27 -030018#include <linux/compiler.h>
Jiri Olsa8dd2a132015-09-07 10:38:06 +020019#include <linux/err.h>
Arnaldo Carvalho de Melo7f7c5362019-07-04 11:32:27 -030020#include <linux/zalloc.h>
Arnaldo Carvalho de Melo86a5e0c2017-04-19 19:03:14 -030021#include <sys/ioctl.h>
Andi Kleenbec19672013-08-04 19:41:26 -070022#include <sys/resource.h>
Arnaldo Carvalho de Melo2157f6e2017-06-20 12:05:38 -030023#include <sys/types.h>
24#include <dirent.h>
Arnaldo Carvalho de Melof2a39fe2019-08-30 14:45:20 -030025#include <stdlib.h>
Jiri Olsab04c5972019-07-21 13:24:24 +020026#include <perf/evsel.h>
Robert Richter4e319022013-06-11 17:29:18 +020027#include "asm/bug.h"
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030028#include "callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030029#include "cgroup.h"
Arnaldo Carvalho de Meloddee6882019-08-21 14:20:54 -030030#include "counts.h"
Arnaldo Carvalho de Melo5ab8c682017-04-25 15:30:47 -030031#include "event.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020032#include "evsel.h"
Arnaldo Carvalho de Melo9db0e362019-09-30 11:48:32 -030033#include "util/env.h"
Arnaldo Carvalho de Melo95be9d12019-09-24 15:56:14 -030034#include "util/evsel_config.h"
Arnaldo Carvalho de Meloca125272019-09-24 15:41:51 -030035#include "util/evsel_fprintf.h"
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -020036#include "evlist.h"
Arnaldo Carvalho de Melo87ffb6c2019-09-10 16:29:02 +010037#include <perf/cpumap.h>
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020038#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090039#include "target.h"
Jiri Olsa26d33022012-08-07 15:20:47 +020040#include "perf_regs.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030041#include "record.h"
Adrian Huntere3e1a542013-08-14 15:48:24 +030042#include "debug.h"
Jiri Olsa97978b32013-12-03 14:09:24 +010043#include "trace-event.h"
Jiri Olsaa9a3a4d2015-06-14 10:19:26 +020044#include "stat.h"
Arnaldo Carvalho de Melo6a9fa4e2019-06-25 17:31:26 -030045#include "string2.h"
Jiri Olsaf9d8adb2017-11-29 19:43:46 +010046#include "memswap.h"
Arnaldo Carvalho de Melo2da39f12019-08-27 11:51:18 -030047#include "util.h"
Arnaldo Carvalho de Melo91854f92019-08-29 14:59:50 -030048#include "../perf-sys.h"
Andi Kleenac12f672016-10-12 14:02:06 -070049#include "util/parse-branch-options.h"
Arnaldo Carvalho de Melo76466022019-08-21 11:30:29 -030050#include <internal/xyarray.h>
Arnaldo Carvalho de Melofb71c86c2019-09-03 10:56:06 -030051#include <internal/lib.h>
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020052
Arnaldo Carvalho de Melo3052ba52019-06-25 17:27:31 -030053#include <linux/ctype.h>
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -030054
Arnaldo Carvalho de Melo9a831b32018-02-02 11:27:25 -030055struct perf_missing_features perf_missing_features;
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -030056
Peter Zijlstra814c8c32015-03-31 00:19:31 +020057static clockid_t clockid;
58
Jiri Olsa32dcd022019-07-21 13:23:51 +020059static int perf_evsel__no_extra_init(struct evsel *evsel __maybe_unused)
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -030060{
61 return 0;
62}
63
Jiri Olsa10213e22017-07-03 16:50:18 +020064void __weak test_attr__ready(void) { }
65
Jiri Olsa32dcd022019-07-21 13:23:51 +020066static void perf_evsel__no_extra_fini(struct evsel *evsel __maybe_unused)
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -030067{
68}
69
70static struct {
71 size_t size;
Jiri Olsa32dcd022019-07-21 13:23:51 +020072 int (*init)(struct evsel *evsel);
73 void (*fini)(struct evsel *evsel);
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -030074} perf_evsel__object = {
Jiri Olsa32dcd022019-07-21 13:23:51 +020075 .size = sizeof(struct evsel),
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -030076 .init = perf_evsel__no_extra_init,
77 .fini = perf_evsel__no_extra_fini,
78};
79
80int perf_evsel__object_config(size_t object_size,
Jiri Olsa32dcd022019-07-21 13:23:51 +020081 int (*init)(struct evsel *evsel),
82 void (*fini)(struct evsel *evsel))
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -030083{
84
85 if (object_size == 0)
86 goto set_methods;
87
88 if (perf_evsel__object.size > object_size)
89 return -EINVAL;
90
91 perf_evsel__object.size = object_size;
92
93set_methods:
94 if (init != NULL)
95 perf_evsel__object.init = init;
96
97 if (fini != NULL)
98 perf_evsel__object.fini = fini;
99
100 return 0;
101}
102
Jiri Olsa9dfcb752019-07-21 13:24:45 +0200103#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200104
Adrian Hunter75562572013-08-27 11:23:09 +0300105int __perf_evsel__sample_size(u64 sample_type)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300106{
107 u64 mask = sample_type & PERF_SAMPLE_MASK;
108 int size = 0;
109 int i;
110
111 for (i = 0; i < 64; i++) {
112 if (mask & (1ULL << i))
113 size++;
114 }
115
116 size *= sizeof(u64);
117
118 return size;
119}
120
Adrian Hunter75562572013-08-27 11:23:09 +0300121/**
122 * __perf_evsel__calc_id_pos - calculate id_pos.
123 * @sample_type: sample type
124 *
125 * This function returns the position of the event id (PERF_SAMPLE_ID or
126 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
Arnaldo Carvalho de Melo69d81f02019-08-26 19:02:31 -0300127 * perf_record_sample.
Adrian Hunter75562572013-08-27 11:23:09 +0300128 */
129static int __perf_evsel__calc_id_pos(u64 sample_type)
130{
131 int idx = 0;
132
133 if (sample_type & PERF_SAMPLE_IDENTIFIER)
134 return 0;
135
136 if (!(sample_type & PERF_SAMPLE_ID))
137 return -1;
138
139 if (sample_type & PERF_SAMPLE_IP)
140 idx += 1;
141
142 if (sample_type & PERF_SAMPLE_TID)
143 idx += 1;
144
145 if (sample_type & PERF_SAMPLE_TIME)
146 idx += 1;
147
148 if (sample_type & PERF_SAMPLE_ADDR)
149 idx += 1;
150
151 return idx;
152}
153
154/**
155 * __perf_evsel__calc_is_pos - calculate is_pos.
156 * @sample_type: sample type
157 *
158 * This function returns the position (counting backwards) of the event id
159 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
160 * sample_id_all is used there is an id sample appended to non-sample events.
161 */
162static int __perf_evsel__calc_is_pos(u64 sample_type)
163{
164 int idx = 1;
165
166 if (sample_type & PERF_SAMPLE_IDENTIFIER)
167 return 1;
168
169 if (!(sample_type & PERF_SAMPLE_ID))
170 return -1;
171
172 if (sample_type & PERF_SAMPLE_CPU)
173 idx += 1;
174
175 if (sample_type & PERF_SAMPLE_STREAM_ID)
176 idx += 1;
177
178 return idx;
179}
180
Jiri Olsa32dcd022019-07-21 13:23:51 +0200181void perf_evsel__calc_id_pos(struct evsel *evsel)
Adrian Hunter75562572013-08-27 11:23:09 +0300182{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200183 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type);
184 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type);
Adrian Hunter75562572013-08-27 11:23:09 +0300185}
186
Jiri Olsa32dcd022019-07-21 13:23:51 +0200187void __perf_evsel__set_sample_bit(struct evsel *evsel,
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300188 enum perf_event_sample_format bit)
189{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200190 if (!(evsel->core.attr.sample_type & bit)) {
191 evsel->core.attr.sample_type |= bit;
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300192 evsel->sample_size += sizeof(u64);
Adrian Hunter75562572013-08-27 11:23:09 +0300193 perf_evsel__calc_id_pos(evsel);
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300194 }
195}
196
Jiri Olsa32dcd022019-07-21 13:23:51 +0200197void __perf_evsel__reset_sample_bit(struct evsel *evsel,
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300198 enum perf_event_sample_format bit)
199{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200200 if (evsel->core.attr.sample_type & bit) {
201 evsel->core.attr.sample_type &= ~bit;
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300202 evsel->sample_size -= sizeof(u64);
Adrian Hunter75562572013-08-27 11:23:09 +0300203 perf_evsel__calc_id_pos(evsel);
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300204 }
205}
206
Jiri Olsa32dcd022019-07-21 13:23:51 +0200207void perf_evsel__set_sample_id(struct evsel *evsel,
Adrian Hunter75562572013-08-27 11:23:09 +0300208 bool can_sample_identifier)
Arnaldo Carvalho de Melo7a5a5ca2012-12-10 15:21:30 -0300209{
Adrian Hunter75562572013-08-27 11:23:09 +0300210 if (can_sample_identifier) {
211 perf_evsel__reset_sample_bit(evsel, ID);
212 perf_evsel__set_sample_bit(evsel, IDENTIFIER);
213 } else {
214 perf_evsel__set_sample_bit(evsel, ID);
215 }
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200216 evsel->core.attr.read_format |= PERF_FORMAT_ID;
Arnaldo Carvalho de Melo7a5a5ca2012-12-10 15:21:30 -0300217}
218
Arnaldo Carvalho de Melo5496bc02016-07-07 11:51:47 -0300219/**
220 * perf_evsel__is_function_event - Return whether given evsel is a function
221 * trace event
222 *
223 * @evsel - evsel selector to be tested
224 *
225 * Return %true if event is function trace event
226 */
Jiri Olsa32dcd022019-07-21 13:23:51 +0200227bool perf_evsel__is_function_event(struct evsel *evsel)
Arnaldo Carvalho de Melo5496bc02016-07-07 11:51:47 -0300228{
229#define FUNCTION_EVENT "ftrace:function"
230
231 return evsel->name &&
232 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
233
234#undef FUNCTION_EVENT
235}
236
Jiri Olsab4b62ee2019-07-21 13:23:53 +0200237void evsel__init(struct evsel *evsel,
238 struct perf_event_attr *attr, int idx)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200239{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200240 perf_evsel__init(&evsel->core, attr);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200241 evsel->idx = idx;
Adrian Hunter60b08962014-07-31 09:00:52 +0300242 evsel->tracking = !idx;
Namhyung Kim2cfda562012-11-29 15:38:29 +0900243 evsel->leader = evsel;
Stephane Eranian410136f2013-11-12 17:58:49 +0100244 evsel->unit = "";
245 evsel->scale = 1.0;
Arnaldo Carvalho de Melo2fda5ad2018-10-19 15:47:34 -0300246 evsel->max_events = ULONG_MAX;
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400247 evsel->evlist = NULL;
Arnaldo Carvalho de Meloaf4a0992019-07-15 16:22:57 -0300248 evsel->bpf_obj = NULL;
Wang Nan1f45b1d2015-10-14 12:41:18 +0000249 evsel->bpf_fd = -1;
Jiri Olsa930a2e22015-07-29 05:42:10 -0400250 INIT_LIST_HEAD(&evsel->config_terms);
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -0300251 perf_evsel__object.init(evsel);
Arnaldo Carvalho de Melobde09462012-08-01 18:53:11 -0300252 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
Adrian Hunter75562572013-08-27 11:23:09 +0300253 perf_evsel__calc_id_pos(evsel);
Wang Nan15bfd2c2015-07-10 07:36:09 +0000254 evsel->cmdline_group_boundary = false;
Andi Kleen37932c12017-03-20 13:17:08 -0700255 evsel->metric_expr = NULL;
Andi Kleen96284812017-03-20 13:17:10 -0700256 evsel->metric_name = NULL;
Andi Kleen37932c12017-03-20 13:17:08 -0700257 evsel->metric_events = NULL;
258 evsel->collect_stat = false;
Agustin Vega-Frias8c5421c2018-03-06 09:04:43 -0500259 evsel->pmu_name = NULL;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200260}
261
Jiri Olsa32dcd022019-07-21 13:23:51 +0200262struct evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200263{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200264 struct evsel *evsel = zalloc(perf_evsel__object.size);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200265
Hisao Tanabefd8d2702018-08-25 00:45:56 +0900266 if (!evsel)
267 return NULL;
Jiri Olsab4b62ee2019-07-21 13:23:53 +0200268 evsel__init(evsel, attr, idx);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200269
Wang Nan03e0a7d2016-02-22 09:10:37 +0000270 if (perf_evsel__is_bpf_output(evsel)) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200271 evsel->core.attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
Wang Nand37ba882016-04-01 13:26:42 +0000272 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200273 evsel->core.attr.sample_period = 1;
Wang Nan03e0a7d2016-02-22 09:10:37 +0000274 }
275
Jiri Olsa0aa802a2018-07-20 13:00:34 +0200276 if (perf_evsel__is_clock(evsel)) {
277 /*
278 * The evsel->unit points to static alias->unit
279 * so it's ok to use static string in here.
280 */
281 static const char *unit = "msec";
282
283 evsel->unit = unit;
284 evsel->scale = 1e-6;
285 }
286
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200287 return evsel;
288}
289
Arnaldo Carvalho de Melof1e52f12017-09-22 15:41:44 -0300290static bool perf_event_can_profile_kernel(void)
291{
Igor Lubashevaa972932019-08-26 21:39:14 -0400292 return perf_event_paranoid_check(1);
Arnaldo Carvalho de Melof1e52f12017-09-22 15:41:44 -0300293}
294
Jiri Olsa32dcd022019-07-21 13:23:51 +0200295struct evsel *perf_evsel__new_cycles(bool precise)
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300296{
297 struct perf_event_attr attr = {
298 .type = PERF_TYPE_HARDWARE,
299 .config = PERF_COUNT_HW_CPU_CYCLES,
Arnaldo Carvalho de Melof1e52f12017-09-22 15:41:44 -0300300 .exclude_kernel = !perf_event_can_profile_kernel(),
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300301 };
Jiri Olsa32dcd022019-07-21 13:23:51 +0200302 struct evsel *evsel;
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300303
304 event_attr_init(&attr);
Arnaldo Carvalho de Melo30269dc2017-07-03 13:05:43 -0300305
306 if (!precise)
307 goto new_event;
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300308
Arnaldo Carvalho de Melo7a1ac112017-06-09 16:54:28 -0300309 /*
310 * Now let the usual logic to set up the perf_event_attr defaults
311 * to kick in when we return and before perf_evsel__open() is called.
312 */
Arnaldo Carvalho de Melo30269dc2017-07-03 13:05:43 -0300313new_event:
Jiri Olsa365c3ae2019-07-21 13:23:58 +0200314 evsel = evsel__new(&attr);
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300315 if (evsel == NULL)
316 goto out;
317
Jiri Olsa4e8a5c12019-03-14 15:00:10 +0100318 evsel->precise_max = true;
319
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300320 /* use asprintf() because free(evsel) assumes name is allocated */
Arnaldo Carvalho de Meloede56262017-07-10 16:19:25 -0300321 if (asprintf(&evsel->name, "cycles%s%s%.*s",
322 (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
323 attr.exclude_kernel ? "u" : "",
324 attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0)
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300325 goto error_free;
326out:
327 return evsel;
328error_free:
Jiri Olsa5eb2dd22019-07-21 13:23:57 +0200329 evsel__delete(evsel);
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300330 evsel = NULL;
331 goto out;
332}
333
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200334/*
335 * Returns pointer with encoded error via <linux/err.h> interface.
336 */
Jiri Olsa32dcd022019-07-21 13:23:51 +0200337struct evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300338{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200339 struct evsel *evsel = zalloc(perf_evsel__object.size);
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200340 int err = -ENOMEM;
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300341
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200342 if (evsel == NULL) {
343 goto out_err;
344 } else {
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300345 struct perf_event_attr attr = {
Arnaldo Carvalho de Melo0b80f8b32012-09-26 12:28:26 -0300346 .type = PERF_TYPE_TRACEPOINT,
347 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
348 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300349 };
350
Arnaldo Carvalho de Meloe48ffe22012-09-26 17:11:38 -0300351 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
352 goto out_free;
353
Jiri Olsa97978b32013-12-03 14:09:24 +0100354 evsel->tp_format = trace_event__tp_format(sys, name);
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200355 if (IS_ERR(evsel->tp_format)) {
356 err = PTR_ERR(evsel->tp_format);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300357 goto out_free;
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200358 }
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300359
Arnaldo Carvalho de Melo0b80f8b32012-09-26 12:28:26 -0300360 event_attr_init(&attr);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300361 attr.config = evsel->tp_format->id;
Arnaldo Carvalho de Melo0b80f8b32012-09-26 12:28:26 -0300362 attr.sample_period = 1;
Jiri Olsab4b62ee2019-07-21 13:23:53 +0200363 evsel__init(evsel, &attr, idx);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300364 }
365
366 return evsel;
367
368out_free:
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300369 zfree(&evsel->name);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300370 free(evsel);
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200371out_err:
372 return ERR_PTR(err);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300373}
374
Arnaldo Carvalho de Melo8ad70132012-09-06 13:11:18 -0300375const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300376 "cycles",
377 "instructions",
378 "cache-references",
379 "cache-misses",
380 "branches",
381 "branch-misses",
382 "bus-cycles",
383 "stalled-cycles-frontend",
384 "stalled-cycles-backend",
385 "ref-cycles",
386};
387
Arnaldo Carvalho de Melodd4f5222012-06-13 15:52:42 -0300388static const char *__perf_evsel__hw_name(u64 config)
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300389{
390 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
391 return perf_evsel__hw_names[config];
392
393 return "unknown-hardware";
394}
395
Jiri Olsa32dcd022019-07-21 13:23:51 +0200396static int perf_evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300397{
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300398 int colon = 0, r = 0;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200399 struct perf_event_attr *attr = &evsel->core.attr;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300400 bool exclude_guest_default = false;
401
402#define MOD_PRINT(context, mod) do { \
403 if (!attr->exclude_##context) { \
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300404 if (!colon) colon = ++r; \
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300405 r += scnprintf(bf + r, size - r, "%c", mod); \
406 } } while(0)
407
408 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
409 MOD_PRINT(kernel, 'k');
410 MOD_PRINT(user, 'u');
411 MOD_PRINT(hv, 'h');
412 exclude_guest_default = true;
413 }
414
415 if (attr->precise_ip) {
416 if (!colon)
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300417 colon = ++r;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300418 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
419 exclude_guest_default = true;
420 }
421
422 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
423 MOD_PRINT(host, 'H');
424 MOD_PRINT(guest, 'G');
425 }
426#undef MOD_PRINT
427 if (colon)
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300428 bf[colon - 1] = ':';
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300429 return r;
430}
431
Jiri Olsa32dcd022019-07-21 13:23:51 +0200432static int perf_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300433{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200434 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->core.attr.config));
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300435 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
436}
437
Arnaldo Carvalho de Melo8ad70132012-09-06 13:11:18 -0300438const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300439 "cpu-clock",
440 "task-clock",
441 "page-faults",
442 "context-switches",
Arnaldo Carvalho de Melo8ad70132012-09-06 13:11:18 -0300443 "cpu-migrations",
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300444 "minor-faults",
445 "major-faults",
446 "alignment-faults",
447 "emulation-faults",
Adrian Hunterd22d1a22013-08-31 21:50:52 +0300448 "dummy",
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300449};
450
Arnaldo Carvalho de Melodd4f5222012-06-13 15:52:42 -0300451static const char *__perf_evsel__sw_name(u64 config)
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300452{
453 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
454 return perf_evsel__sw_names[config];
455 return "unknown-software";
456}
457
Jiri Olsa32dcd022019-07-21 13:23:51 +0200458static int perf_evsel__sw_name(struct evsel *evsel, char *bf, size_t size)
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300459{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200460 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->core.attr.config));
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300461 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
462}
463
Jiri Olsa287e74a2012-06-28 23:18:49 +0200464static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
465{
466 int r;
467
468 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
469
470 if (type & HW_BREAKPOINT_R)
471 r += scnprintf(bf + r, size - r, "r");
472
473 if (type & HW_BREAKPOINT_W)
474 r += scnprintf(bf + r, size - r, "w");
475
476 if (type & HW_BREAKPOINT_X)
477 r += scnprintf(bf + r, size - r, "x");
478
479 return r;
480}
481
Jiri Olsa32dcd022019-07-21 13:23:51 +0200482static int perf_evsel__bp_name(struct evsel *evsel, char *bf, size_t size)
Jiri Olsa287e74a2012-06-28 23:18:49 +0200483{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200484 struct perf_event_attr *attr = &evsel->core.attr;
Jiri Olsa287e74a2012-06-28 23:18:49 +0200485 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
486 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
487}
488
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300489const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
490 [PERF_EVSEL__MAX_ALIASES] = {
491 { "L1-dcache", "l1-d", "l1d", "L1-data", },
492 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
493 { "LLC", "L2", },
494 { "dTLB", "d-tlb", "Data-TLB", },
495 { "iTLB", "i-tlb", "Instruction-TLB", },
496 { "branch", "branches", "bpu", "btb", "bpc", },
497 { "node", },
498};
499
500const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
501 [PERF_EVSEL__MAX_ALIASES] = {
502 { "load", "loads", "read", },
503 { "store", "stores", "write", },
504 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
505};
506
507const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
508 [PERF_EVSEL__MAX_ALIASES] = {
509 { "refs", "Reference", "ops", "access", },
510 { "misses", "miss", },
511};
512
513#define C(x) PERF_COUNT_HW_CACHE_##x
514#define CACHE_READ (1 << C(OP_READ))
515#define CACHE_WRITE (1 << C(OP_WRITE))
516#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
517#define COP(x) (1 << x)
518
519/*
520 * cache operartion stat
521 * L1I : Read and prefetch only
522 * ITLB and BPU : Read-only
523 */
524static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
525 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
526 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
527 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
528 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
529 [C(ITLB)] = (CACHE_READ),
530 [C(BPU)] = (CACHE_READ),
531 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
532};
533
534bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
535{
536 if (perf_evsel__hw_cache_stat[type] & COP(op))
537 return true; /* valid */
538 else
539 return false; /* invalid */
540}
541
542int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
543 char *bf, size_t size)
544{
545 if (result) {
546 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
547 perf_evsel__hw_cache_op[op][0],
548 perf_evsel__hw_cache_result[result][0]);
549 }
550
551 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
552 perf_evsel__hw_cache_op[op][1]);
553}
554
Arnaldo Carvalho de Melodd4f5222012-06-13 15:52:42 -0300555static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300556{
557 u8 op, result, type = (config >> 0) & 0xff;
558 const char *err = "unknown-ext-hardware-cache-type";
559
Arnaldo Carvalho de Meloc53412e2016-08-18 16:30:28 -0300560 if (type >= PERF_COUNT_HW_CACHE_MAX)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300561 goto out_err;
562
563 op = (config >> 8) & 0xff;
564 err = "unknown-ext-hardware-cache-op";
Arnaldo Carvalho de Meloc53412e2016-08-18 16:30:28 -0300565 if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300566 goto out_err;
567
568 result = (config >> 16) & 0xff;
569 err = "unknown-ext-hardware-cache-result";
Arnaldo Carvalho de Meloc53412e2016-08-18 16:30:28 -0300570 if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300571 goto out_err;
572
573 err = "invalid-cache";
574 if (!perf_evsel__is_cache_op_valid(type, op))
575 goto out_err;
576
577 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
578out_err:
579 return scnprintf(bf, size, "%s", err);
580}
581
Jiri Olsa32dcd022019-07-21 13:23:51 +0200582static int perf_evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300583{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200584 int ret = __perf_evsel__hw_cache_name(evsel->core.attr.config, bf, size);
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300585 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
586}
587
Jiri Olsa32dcd022019-07-21 13:23:51 +0200588static int perf_evsel__raw_name(struct evsel *evsel, char *bf, size_t size)
Arnaldo Carvalho de Melo6eef3d92012-06-13 11:53:37 -0300589{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200590 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config);
Arnaldo Carvalho de Melo6eef3d92012-06-13 11:53:37 -0300591 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
592}
593
Andi Kleen3371f382019-03-26 15:18:22 -0700594static int perf_evsel__tool_name(char *bf, size_t size)
595{
596 int ret = scnprintf(bf, size, "duration_time");
597 return ret;
598}
599
Jiri Olsa32dcd022019-07-21 13:23:51 +0200600const char *perf_evsel__name(struct evsel *evsel)
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300601{
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300602 char bf[128];
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300603
Arnaldo Carvalho de Melofdbdd7e2019-06-17 14:32:53 -0300604 if (!evsel)
605 goto out_unknown;
606
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300607 if (evsel->name)
608 return evsel->name;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300609
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200610 switch (evsel->core.attr.type) {
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300611 case PERF_TYPE_RAW:
Arnaldo Carvalho de Melo6eef3d92012-06-13 11:53:37 -0300612 perf_evsel__raw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300613 break;
614
615 case PERF_TYPE_HARDWARE:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300616 perf_evsel__hw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300617 break;
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300618
619 case PERF_TYPE_HW_CACHE:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300620 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300621 break;
622
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300623 case PERF_TYPE_SOFTWARE:
Andi Kleen3371f382019-03-26 15:18:22 -0700624 if (evsel->tool_event)
625 perf_evsel__tool_name(bf, sizeof(bf));
626 else
627 perf_evsel__sw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300628 break;
629
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300630 case PERF_TYPE_TRACEPOINT:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300631 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300632 break;
633
Jiri Olsa287e74a2012-06-28 23:18:49 +0200634 case PERF_TYPE_BREAKPOINT:
635 perf_evsel__bp_name(evsel, bf, sizeof(bf));
636 break;
637
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300638 default:
Robert Richterca1b1452012-08-16 21:10:18 +0200639 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200640 evsel->core.attr.type);
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300641 break;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300642 }
643
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300644 evsel->name = strdup(bf);
645
Arnaldo Carvalho de Melofdbdd7e2019-06-17 14:32:53 -0300646 if (evsel->name)
647 return evsel->name;
648out_unknown:
649 return "unknown";
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300650}
651
Jiri Olsa32dcd022019-07-21 13:23:51 +0200652const char *perf_evsel__group_name(struct evsel *evsel)
Namhyung Kim717e2632013-01-22 18:09:44 +0900653{
654 return evsel->group_name ?: "anon group";
655}
656
Jiri Olsa8ef278b2018-03-07 16:50:02 +0100657/*
658 * Returns the group details for the specified leader,
659 * with following rules.
660 *
661 * For record -e '{cycles,instructions}'
662 * 'anon group { cycles:u, instructions:u }'
663 *
664 * For record -e 'cycles,instructions' and report --group
665 * 'cycles:u, instructions:u'
666 */
Jiri Olsa32dcd022019-07-21 13:23:51 +0200667int perf_evsel__group_desc(struct evsel *evsel, char *buf, size_t size)
Namhyung Kim717e2632013-01-22 18:09:44 +0900668{
Jiri Olsa8ef278b2018-03-07 16:50:02 +0100669 int ret = 0;
Jiri Olsa32dcd022019-07-21 13:23:51 +0200670 struct evsel *pos;
Namhyung Kim717e2632013-01-22 18:09:44 +0900671 const char *group_name = perf_evsel__group_name(evsel);
672
Jiri Olsa8ef278b2018-03-07 16:50:02 +0100673 if (!evsel->forced_leader)
674 ret = scnprintf(buf, size, "%s { ", group_name);
Namhyung Kim717e2632013-01-22 18:09:44 +0900675
Jiri Olsa8ef278b2018-03-07 16:50:02 +0100676 ret += scnprintf(buf + ret, size - ret, "%s",
Namhyung Kim717e2632013-01-22 18:09:44 +0900677 perf_evsel__name(evsel));
678
679 for_each_group_member(pos, evsel)
680 ret += scnprintf(buf + ret, size - ret, ", %s",
681 perf_evsel__name(pos));
682
Jiri Olsa8ef278b2018-03-07 16:50:02 +0100683 if (!evsel->forced_leader)
684 ret += scnprintf(buf + ret, size - ret, " }");
Namhyung Kim717e2632013-01-22 18:09:44 +0900685
686 return ret;
687}
688
Jiri Olsa32dcd022019-07-21 13:23:51 +0200689static void __perf_evsel__config_callchain(struct evsel *evsel,
Arnaldo Carvalho de Melo1688c2f2018-01-12 16:21:04 -0300690 struct record_opts *opts,
691 struct callchain_param *param)
Jiri Olsa6bedfab2014-03-02 16:56:40 +0100692{
693 bool function = perf_evsel__is_function_event(evsel);
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200694 struct perf_event_attr *attr = &evsel->core.attr;
Jiri Olsa6bedfab2014-03-02 16:56:40 +0100695
696 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
697
Arnaldo Carvalho de Melo792d48b2016-04-28 19:03:42 -0300698 attr->sample_max_stack = param->max_stack;
699
yuzhoujian53651b22019-05-30 14:29:22 +0100700 if (opts->kernel_callchains)
701 attr->exclude_callchain_user = 1;
702 if (opts->user_callchains)
703 attr->exclude_callchain_kernel = 1;
Kan Liangc3a6a8c2015-08-04 04:30:20 -0400704 if (param->record_mode == CALLCHAIN_LBR) {
Kan Liangaad2b212015-01-05 13:23:04 -0500705 if (!opts->branch_stack) {
706 if (attr->exclude_user) {
707 pr_warning("LBR callstack option is only available "
708 "to get user callchain information. "
709 "Falling back to framepointers.\n");
710 } else {
711 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
712 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
Andi Kleenbd0f8892015-12-11 16:12:24 -0800713 PERF_SAMPLE_BRANCH_CALL_STACK |
714 PERF_SAMPLE_BRANCH_NO_CYCLES |
Kan Liangd3f85432020-02-28 08:30:01 -0800715 PERF_SAMPLE_BRANCH_NO_FLAGS |
716 PERF_SAMPLE_BRANCH_HW_INDEX;
Kan Liangaad2b212015-01-05 13:23:04 -0500717 }
718 } else
719 pr_warning("Cannot use LBR callstack with branch stack. "
720 "Falling back to framepointers.\n");
721 }
722
Kan Liangc3a6a8c2015-08-04 04:30:20 -0400723 if (param->record_mode == CALLCHAIN_DWARF) {
Jiri Olsa6bedfab2014-03-02 16:56:40 +0100724 if (!function) {
725 perf_evsel__set_sample_bit(evsel, REGS_USER);
726 perf_evsel__set_sample_bit(evsel, STACK_USER);
Alexey Budankovd194d8f2019-05-30 22:03:36 +0300727 if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) {
728 attr->sample_regs_user |= DWARF_MINIMAL_REGS;
729 pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
730 "specifying a subset with --user-regs may render DWARF unwinding unreliable, "
731 "so the minimal registers set (IP, SP) is explicitly forced.\n");
732 } else {
733 attr->sample_regs_user |= PERF_REGS_MASK;
734 }
Kan Liangc3a6a8c2015-08-04 04:30:20 -0400735 attr->sample_stack_user = param->dump_size;
Jiri Olsa6bedfab2014-03-02 16:56:40 +0100736 attr->exclude_callchain_user = 1;
737 } else {
738 pr_info("Cannot use DWARF unwind for function trace event,"
739 " falling back to framepointers.\n");
740 }
741 }
742
743 if (function) {
744 pr_info("Disabling user space callchains for function trace event.\n");
745 attr->exclude_callchain_user = 1;
746 }
747}
748
Jiri Olsa32dcd022019-07-21 13:23:51 +0200749void perf_evsel__config_callchain(struct evsel *evsel,
Arnaldo Carvalho de Melo1688c2f2018-01-12 16:21:04 -0300750 struct record_opts *opts,
751 struct callchain_param *param)
752{
753 if (param->enabled)
754 return __perf_evsel__config_callchain(evsel, opts, param);
755}
756
Kan Liangd457c962015-08-11 06:30:47 -0400757static void
Jiri Olsa32dcd022019-07-21 13:23:51 +0200758perf_evsel__reset_callgraph(struct evsel *evsel,
Kan Liangd457c962015-08-11 06:30:47 -0400759 struct callchain_param *param)
760{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200761 struct perf_event_attr *attr = &evsel->core.attr;
Kan Liangd457c962015-08-11 06:30:47 -0400762
763 perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
764 if (param->record_mode == CALLCHAIN_LBR) {
765 perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
766 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
Kan Liangd3f85432020-02-28 08:30:01 -0800767 PERF_SAMPLE_BRANCH_CALL_STACK |
768 PERF_SAMPLE_BRANCH_HW_INDEX);
Kan Liangd457c962015-08-11 06:30:47 -0400769 }
770 if (param->record_mode == CALLCHAIN_DWARF) {
771 perf_evsel__reset_sample_bit(evsel, REGS_USER);
772 perf_evsel__reset_sample_bit(evsel, STACK_USER);
773 }
774}
775
Jiri Olsa32dcd022019-07-21 13:23:51 +0200776static void apply_config_terms(struct evsel *evsel,
Arnaldo Carvalho de Melo0d3dcc02018-01-16 11:16:25 -0300777 struct record_opts *opts, bool track)
Jiri Olsa930a2e22015-07-29 05:42:10 -0400778{
779 struct perf_evsel_config_term *term;
Kan Liang32067712015-08-04 04:30:19 -0400780 struct list_head *config_terms = &evsel->config_terms;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200781 struct perf_event_attr *attr = &evsel->core.attr;
Arnaldo Carvalho de Melo249d98e2018-01-15 11:07:58 -0300782 /* callgraph default */
783 struct callchain_param param = {
784 .record_mode = callchain_param.record_mode,
785 };
Kan Liangd457c962015-08-11 06:30:47 -0400786 u32 dump_size = 0;
Arnaldo Carvalho de Melo792d48b2016-04-28 19:03:42 -0300787 int max_stack = 0;
788 const char *callgraph_buf = NULL;
Kan Liangd457c962015-08-11 06:30:47 -0400789
Jiri Olsa930a2e22015-07-29 05:42:10 -0400790 list_for_each_entry(term, config_terms, list) {
791 switch (term->type) {
Jiri Olsaee4c7582015-07-29 05:42:11 -0400792 case PERF_EVSEL__CONFIG_TERM_PERIOD:
Andi Kleenc2f1cea2017-10-20 13:27:55 -0700793 if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
794 attr->sample_period = term->val.period;
795 attr->freq = 0;
Jiri Olsa49c0ae82018-02-01 09:38:10 +0100796 perf_evsel__reset_sample_bit(evsel, PERIOD);
Andi Kleenc2f1cea2017-10-20 13:27:55 -0700797 }
Kan Liang32067712015-08-04 04:30:19 -0400798 break;
Namhyung Kim09af2a52015-08-09 15:45:23 +0900799 case PERF_EVSEL__CONFIG_TERM_FREQ:
Andi Kleenc2f1cea2017-10-20 13:27:55 -0700800 if (!(term->weak && opts->user_freq != UINT_MAX)) {
801 attr->sample_freq = term->val.freq;
802 attr->freq = 1;
Jiri Olsa49c0ae82018-02-01 09:38:10 +0100803 perf_evsel__set_sample_bit(evsel, PERIOD);
Andi Kleenc2f1cea2017-10-20 13:27:55 -0700804 }
Namhyung Kim09af2a52015-08-09 15:45:23 +0900805 break;
Kan Liang32067712015-08-04 04:30:19 -0400806 case PERF_EVSEL__CONFIG_TERM_TIME:
807 if (term->val.time)
808 perf_evsel__set_sample_bit(evsel, TIME);
809 else
810 perf_evsel__reset_sample_bit(evsel, TIME);
811 break;
Kan Liangd457c962015-08-11 06:30:47 -0400812 case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
Leo Yane8846022020-01-17 13:52:50 +0800813 callgraph_buf = term->val.str;
Kan Liangd457c962015-08-11 06:30:47 -0400814 break;
Andi Kleenac12f672016-10-12 14:02:06 -0700815 case PERF_EVSEL__CONFIG_TERM_BRANCH:
Leo Yane8846022020-01-17 13:52:50 +0800816 if (term->val.str && strcmp(term->val.str, "no")) {
Andi Kleenac12f672016-10-12 14:02:06 -0700817 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
Leo Yane8846022020-01-17 13:52:50 +0800818 parse_branch_str(term->val.str,
Andi Kleenac12f672016-10-12 14:02:06 -0700819 &attr->branch_sample_type);
820 } else
821 perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
822 break;
Kan Liangd457c962015-08-11 06:30:47 -0400823 case PERF_EVSEL__CONFIG_TERM_STACK_USER:
824 dump_size = term->val.stack_user;
825 break;
Arnaldo Carvalho de Melo792d48b2016-04-28 19:03:42 -0300826 case PERF_EVSEL__CONFIG_TERM_MAX_STACK:
827 max_stack = term->val.max_stack;
828 break;
Arnaldo Carvalho de Melo2fda5ad2018-10-19 15:47:34 -0300829 case PERF_EVSEL__CONFIG_TERM_MAX_EVENTS:
830 evsel->max_events = term->val.max_events;
831 break;
Wang Nan374ce932015-10-28 10:55:02 +0000832 case PERF_EVSEL__CONFIG_TERM_INHERIT:
833 /*
834 * attr->inherit should has already been set by
835 * perf_evsel__config. If user explicitly set
836 * inherit using config terms, override global
837 * opt->no_inherit setting.
838 */
839 attr->inherit = term->val.inherit ? 1 : 0;
840 break;
Wang Nan626a6b72016-07-14 08:34:45 +0000841 case PERF_EVSEL__CONFIG_TERM_OVERWRITE:
842 attr->write_backward = term->val.overwrite ? 1 : 0;
843 break;
Andi Kleend0565132017-10-20 13:27:54 -0700844 case PERF_EVSEL__CONFIG_TERM_DRV_CFG:
Mathieu Poirier21787902018-01-10 13:46:51 -0700845 break;
Jin Yao064b4e82019-04-12 21:59:47 +0800846 case PERF_EVSEL__CONFIG_TERM_PERCORE:
847 break;
Adrian Hunter1b992152019-08-06 11:46:05 +0300848 case PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT:
849 attr->aux_output = term->val.aux_output ? 1 : 0;
850 break;
Adrian Huntereb7a52d2019-11-15 14:42:17 +0200851 case PERF_EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE:
852 /* Already applied by auxtrace */
853 break;
Adrian Huntera1ac7de2019-11-15 14:42:22 +0200854 case PERF_EVSEL__CONFIG_TERM_CFG_CHG:
855 break;
Jiri Olsa930a2e22015-07-29 05:42:10 -0400856 default:
857 break;
858 }
859 }
Kan Liangd457c962015-08-11 06:30:47 -0400860
861 /* User explicitly set per-event callgraph, clear the old setting and reset. */
Arnaldo Carvalho de Melo792d48b2016-04-28 19:03:42 -0300862 if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
Arnaldo Carvalho de Melo0d3dcc02018-01-16 11:16:25 -0300863 bool sample_address = false;
864
Arnaldo Carvalho de Melo792d48b2016-04-28 19:03:42 -0300865 if (max_stack) {
866 param.max_stack = max_stack;
867 if (callgraph_buf == NULL)
868 callgraph_buf = "fp";
869 }
Kan Liangd457c962015-08-11 06:30:47 -0400870
871 /* parse callgraph parameters */
872 if (callgraph_buf != NULL) {
Kan Liangf9db0d02015-08-11 06:30:48 -0400873 if (!strcmp(callgraph_buf, "no")) {
874 param.enabled = false;
875 param.record_mode = CALLCHAIN_NONE;
876 } else {
877 param.enabled = true;
878 if (parse_callchain_record(callgraph_buf, &param)) {
879 pr_err("per-event callgraph setting for %s failed. "
880 "Apply callgraph global setting for it\n",
881 evsel->name);
882 return;
883 }
Arnaldo Carvalho de Melo0d3dcc02018-01-16 11:16:25 -0300884 if (param.record_mode == CALLCHAIN_DWARF)
885 sample_address = true;
Kan Liangd457c962015-08-11 06:30:47 -0400886 }
887 }
888 if (dump_size > 0) {
889 dump_size = round_up(dump_size, sizeof(u64));
890 param.dump_size = dump_size;
891 }
892
893 /* If global callgraph set, clear it */
894 if (callchain_param.enabled)
895 perf_evsel__reset_callgraph(evsel, &callchain_param);
896
897 /* set perf-event callgraph */
Arnaldo Carvalho de Melo0d3dcc02018-01-16 11:16:25 -0300898 if (param.enabled) {
899 if (sample_address) {
900 perf_evsel__set_sample_bit(evsel, ADDR);
901 perf_evsel__set_sample_bit(evsel, DATA_SRC);
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200902 evsel->core.attr.mmap_data = track;
Arnaldo Carvalho de Melo0d3dcc02018-01-16 11:16:25 -0300903 }
Arnaldo Carvalho de Melo01e0d502016-04-11 18:39:37 -0300904 perf_evsel__config_callchain(evsel, opts, &param);
Arnaldo Carvalho de Melo0d3dcc02018-01-16 11:16:25 -0300905 }
Kan Liangd457c962015-08-11 06:30:47 -0400906 }
Jiri Olsa930a2e22015-07-29 05:42:10 -0400907}
908
Jiri Olsa32dcd022019-07-21 13:23:51 +0200909static bool is_dummy_event(struct evsel *evsel)
Kan Liang95035c52018-07-09 07:15:22 -0700910{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200911 return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
912 (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
Kan Liang95035c52018-07-09 07:15:22 -0700913}
914
Adrian Huntereb7a52d2019-11-15 14:42:17 +0200915struct perf_evsel_config_term *__perf_evsel__get_config_term(struct evsel *evsel,
916 enum evsel_term_type type)
917{
918 struct perf_evsel_config_term *term, *found_term = NULL;
919
920 list_for_each_entry(term, &evsel->config_terms, list) {
921 if (term->type == type)
922 found_term = term;
923 }
924
925 return found_term;
926}
927
Jiri Olsa774cb492012-11-12 18:34:01 +0100928/*
929 * The enable_on_exec/disabled value strategy:
930 *
931 * 1) For any type of traced program:
932 * - all independent events and group leaders are disabled
933 * - all group members are enabled
934 *
935 * Group members are ruled by group leaders. They need to
936 * be enabled, because the group scheduling relies on that.
937 *
938 * 2) For traced programs executed by perf:
939 * - all independent events and group leaders have
940 * enable_on_exec set
941 * - we don't specifically enable or disable any event during
942 * the record command
943 *
944 * Independent events and group leaders are initially disabled
945 * and get enabled by exec. Group members are ruled by group
946 * leaders as stated in 1).
947 *
948 * 3) For traced programs attached by perf (pid/tid):
949 * - we specifically enable or disable all events during
950 * the record command
951 *
952 * When attaching events to already running traced we
953 * enable/disable events specifically, as there's no
954 * initial traced exec call.
955 */
Jiri Olsa32dcd022019-07-21 13:23:51 +0200956void perf_evsel__config(struct evsel *evsel, struct record_opts *opts,
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300957 struct callchain_param *callchain)
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200958{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200959 struct evsel *leader = evsel->leader;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200960 struct perf_event_attr *attr = &evsel->core.attr;
Adrian Hunter60b08962014-07-31 09:00:52 +0300961 int track = evsel->tracking;
Adrian Hunter3aa59392013-11-15 15:52:29 +0200962 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200963
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -0300964 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200965 attr->inherit = !opts->no_inherit;
Wang Nan626a6b72016-07-14 08:34:45 +0000966 attr->write_backward = opts->overwrite ? 1 : 0;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200967
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300968 perf_evsel__set_sample_bit(evsel, IP);
969 perf_evsel__set_sample_bit(evsel, TID);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200970
Jiri Olsa3c176312012-10-10 17:39:03 +0200971 if (evsel->sample_read) {
972 perf_evsel__set_sample_bit(evsel, READ);
973
974 /*
975 * We need ID even in case of single event, because
976 * PERF_SAMPLE_READ process ID specific data.
977 */
Adrian Hunter75562572013-08-27 11:23:09 +0300978 perf_evsel__set_sample_id(evsel, false);
Jiri Olsa3c176312012-10-10 17:39:03 +0200979
980 /*
981 * Apply group format only if we belong to group
982 * with more than one members.
983 */
Jiri Olsa5643b1a2019-07-21 13:24:46 +0200984 if (leader->core.nr_members > 1) {
Jiri Olsa3c176312012-10-10 17:39:03 +0200985 attr->read_format |= PERF_FORMAT_GROUP;
986 attr->inherit = 0;
987 }
988 }
989
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200990 /*
Namhyung Kim17314e22014-06-09 14:43:37 +0900991 * We default some events to have a default interval. But keep
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200992 * it a weak assumption overridable by the user.
993 */
Namhyung Kim17314e22014-06-09 14:43:37 +0900994 if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200995 opts->user_interval != ULLONG_MAX)) {
996 if (opts->freq) {
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300997 perf_evsel__set_sample_bit(evsel, PERIOD);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200998 attr->freq = 1;
999 attr->sample_freq = opts->freq;
1000 } else {
1001 attr->sample_period = opts->default_interval;
1002 }
1003 }
1004
Jiri Olsa3c176312012-10-10 17:39:03 +02001005 /*
1006 * Disable sampling for all group members other
1007 * than leader in case leader 'leads' the sampling.
1008 */
1009 if ((leader != evsel) && leader->sample_read) {
Jiri Olsae9add8b2018-04-23 11:08:19 +02001010 attr->freq = 0;
1011 attr->sample_freq = 0;
1012 attr->sample_period = 0;
1013 attr->write_backward = 0;
Jiri Olsa6e7e8b92019-02-20 13:27:56 +01001014
1015 /*
1016 * We don't get sample for slave events, we make them
1017 * when delivering group leader sample. Set the slave
1018 * event to follow the master sample_type to ease up
1019 * report.
1020 */
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001021 attr->sample_type = leader->core.attr.sample_type;
Jiri Olsa3c176312012-10-10 17:39:03 +02001022 }
1023
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001024 if (opts->no_samples)
1025 attr->sample_freq = 0;
1026
Jiri Olsaa17f06972017-08-24 18:27:31 +02001027 if (opts->inherit_stat) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001028 evsel->core.attr.read_format |=
Jiri Olsaa17f06972017-08-24 18:27:31 +02001029 PERF_FORMAT_TOTAL_TIME_ENABLED |
1030 PERF_FORMAT_TOTAL_TIME_RUNNING |
1031 PERF_FORMAT_ID;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001032 attr->inherit_stat = 1;
Jiri Olsaa17f06972017-08-24 18:27:31 +02001033 }
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001034
1035 if (opts->sample_address) {
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -03001036 perf_evsel__set_sample_bit(evsel, ADDR);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001037 attr->mmap_data = track;
1038 }
1039
Jiri Olsaf1403732014-11-13 18:21:03 +01001040 /*
1041 * We don't allow user space callchains for function trace
1042 * event, due to issues with page faults while tracing page
1043 * fault handler and its overall trickiness nature.
1044 */
1045 if (perf_evsel__is_function_event(evsel))
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001046 evsel->core.attr.exclude_callchain_user = 1;
Jiri Olsaf1403732014-11-13 18:21:03 +01001047
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -03001048 if (callchain && callchain->enabled && !evsel->no_aux_samples)
Arnaldo Carvalho de Melo01e0d502016-04-11 18:39:37 -03001049 perf_evsel__config_callchain(evsel, opts, callchain);
Jiri Olsa26d33022012-08-07 15:20:47 +02001050
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02001051 if (opts->sample_intr_regs) {
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02001052 attr->sample_regs_intr = opts->sample_intr_regs;
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02001053 perf_evsel__set_sample_bit(evsel, REGS_INTR);
1054 }
1055
Andi Kleen84c41742017-09-05 10:00:28 -07001056 if (opts->sample_user_regs) {
1057 attr->sample_regs_user |= opts->sample_user_regs;
1058 perf_evsel__set_sample_bit(evsel, REGS_USER);
1059 }
1060
Jiri Olsab6f35ed2016-08-01 20:02:35 +02001061 if (target__has_cpu(&opts->target) || opts->sample_cpu)
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -03001062 perf_evsel__set_sample_bit(evsel, CPU);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001063
Andi Kleen8affc2b2014-07-31 14:45:04 +08001064 /*
Adam Buchbinderbd1a0be52016-02-24 10:02:25 -08001065 * When the user explicitly disabled time don't force it here.
Andi Kleen8affc2b2014-07-31 14:45:04 +08001066 */
1067 if (opts->sample_time &&
1068 (!perf_missing_features.sample_id_all &&
Adrian Hunter3abebc52015-07-06 14:51:01 +03001069 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
1070 opts->sample_time_set)))
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -03001071 perf_evsel__set_sample_bit(evsel, TIME);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001072
Adrian Hunter6ff1ce72014-07-14 13:02:56 +03001073 if (opts->raw_samples && !evsel->no_aux_samples) {
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -03001074 perf_evsel__set_sample_bit(evsel, TIME);
1075 perf_evsel__set_sample_bit(evsel, RAW);
1076 perf_evsel__set_sample_bit(evsel, CPU);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001077 }
1078
Stephane Eranianccf49bf2013-01-24 16:10:37 +01001079 if (opts->sample_address)
Adrian Hunter1e7ed5e2013-11-01 15:51:35 +02001080 perf_evsel__set_sample_bit(evsel, DATA_SRC);
Stephane Eranianccf49bf2013-01-24 16:10:37 +01001081
Kan Liang3b0a5da2017-08-29 13:11:08 -04001082 if (opts->sample_phys_addr)
1083 perf_evsel__set_sample_bit(evsel, PHYS_ADDR);
1084
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001085 if (opts->no_buffering) {
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001086 attr->watermark = 0;
1087 attr->wakeup_events = 1;
1088 }
Adrian Hunter6ff1ce72014-07-14 13:02:56 +03001089 if (opts->branch_stack && !evsel->no_aux_samples) {
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -03001090 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01001091 attr->branch_sample_type = opts->branch_stack;
1092 }
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001093
Andi Kleen05484292013-01-24 16:10:29 +01001094 if (opts->sample_weight)
Adrian Hunter1e7ed5e2013-11-01 15:51:35 +02001095 perf_evsel__set_sample_bit(evsel, WEIGHT);
Andi Kleen05484292013-01-24 16:10:29 +01001096
Namhyung Kim62e503b2015-01-29 17:06:46 +09001097 attr->task = track;
Stephane Eranian5c5e8542013-08-21 12:10:25 +02001098 attr->mmap = track;
Don Zickusa5a5ba72014-05-30 10:49:42 -04001099 attr->mmap2 = track && !perf_missing_features.mmap2;
Stephane Eranian5c5e8542013-08-21 12:10:25 +02001100 attr->comm = track;
Song Liu9aa0bfa2019-01-17 08:15:17 -08001101 attr->ksymbol = track && !perf_missing_features.ksymbol;
Arnaldo Carvalho de Melo74a1e862019-08-26 19:31:06 -03001102 attr->bpf_event = track && !opts->no_bpf_event && !perf_missing_features.bpf;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001103
Hari Bathinif3b36142017-03-08 02:11:43 +05301104 if (opts->record_namespaces)
1105 attr->namespaces = track;
1106
Adrian Hunterb757bb02015-07-21 12:44:04 +03001107 if (opts->record_switch_events)
1108 attr->context_switch = track;
1109
Andi Kleen475eeab2013-09-20 07:40:43 -07001110 if (opts->sample_transaction)
Adrian Hunter1e7ed5e2013-11-01 15:51:35 +02001111 perf_evsel__set_sample_bit(evsel, TRANSACTION);
Andi Kleen475eeab2013-09-20 07:40:43 -07001112
Andi Kleen85c273d2015-02-24 15:13:40 -08001113 if (opts->running_time) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001114 evsel->core.attr.read_format |=
Andi Kleen85c273d2015-02-24 15:13:40 -08001115 PERF_FORMAT_TOTAL_TIME_ENABLED |
1116 PERF_FORMAT_TOTAL_TIME_RUNNING;
1117 }
1118
Jiri Olsa774cb492012-11-12 18:34:01 +01001119 /*
1120 * XXX see the function comment above
1121 *
1122 * Disabling only independent events or group leaders,
1123 * keeping group members enabled.
1124 */
Namhyung Kim823254e2012-11-29 15:38:30 +09001125 if (perf_evsel__is_group_leader(evsel))
Jiri Olsa774cb492012-11-12 18:34:01 +01001126 attr->disabled = 1;
1127
1128 /*
1129 * Setting enable_on_exec for independent events and
1130 * group leaders for traced executed by perf.
1131 */
Andi Kleen6619a532014-01-11 13:38:27 -08001132 if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
1133 !opts->initial_delay)
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001134 attr->enable_on_exec = 1;
Adrian Hunter2afd2bc2014-07-14 13:02:57 +03001135
1136 if (evsel->immediate) {
1137 attr->disabled = 0;
1138 attr->enable_on_exec = 0;
1139 }
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001140
1141 clockid = opts->clockid;
1142 if (opts->use_clockid) {
1143 attr->use_clockid = 1;
1144 attr->clockid = opts->clockid;
1145 }
Jiri Olsa930a2e22015-07-29 05:42:10 -04001146
Jiri Olsa7f94af72015-10-05 20:06:05 +02001147 if (evsel->precise_max)
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001148 attr->precise_ip = 3;
Jiri Olsa7f94af72015-10-05 20:06:05 +02001149
Jiri Olsa85723882016-02-15 09:34:31 +01001150 if (opts->all_user) {
1151 attr->exclude_kernel = 1;
1152 attr->exclude_user = 0;
1153 }
1154
1155 if (opts->all_kernel) {
1156 attr->exclude_kernel = 0;
1157 attr->exclude_user = 1;
1158 }
1159
Jiri Olsafe1f61b2019-07-21 13:24:38 +02001160 if (evsel->core.own_cpus || evsel->unit)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001161 evsel->core.attr.read_format |= PERF_FORMAT_ID;
Jiri Olsa4ab84552018-10-03 23:20:52 +02001162
Jiri Olsa930a2e22015-07-29 05:42:10 -04001163 /*
1164 * Apply event specific term settings,
1165 * it overloads any global configuration.
1166 */
Arnaldo Carvalho de Melo0d3dcc02018-01-16 11:16:25 -03001167 apply_config_terms(evsel, opts, track);
Jiri Olsaa359c172016-12-13 08:46:22 +01001168
1169 evsel->ignore_missing_thread = opts->ignore_missing_thread;
Jiri Olsaf290aa12018-02-01 09:38:11 +01001170
1171 /* The --period option takes the precedence. */
1172 if (opts->period_set) {
1173 if (opts->period)
1174 perf_evsel__set_sample_bit(evsel, PERIOD);
1175 else
1176 perf_evsel__reset_sample_bit(evsel, PERIOD);
1177 }
Kan Liang95035c52018-07-09 07:15:22 -07001178
1179 /*
1180 * For initial_delay, a dummy event is added implicitly.
1181 * The software event will trigger -EOPNOTSUPP error out,
1182 * if BRANCH_STACK bit is set.
1183 */
1184 if (opts->initial_delay && is_dummy_event(evsel))
1185 perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001186}
1187
Jiri Olsa32dcd022019-07-21 13:23:51 +02001188int perf_evsel__set_filter(struct evsel *evsel, const char *filter)
Arnaldo Carvalho de Melo12467ae2015-07-03 17:05:50 -03001189{
1190 char *new_filter = strdup(filter);
1191
1192 if (new_filter != NULL) {
1193 free(evsel->filter);
1194 evsel->filter = new_filter;
1195 return 0;
1196 }
1197
1198 return -1;
1199}
1200
Jiri Olsa32dcd022019-07-21 13:23:51 +02001201static int perf_evsel__append_filter(struct evsel *evsel,
Mathieu Poirier3541c032016-09-16 08:44:04 -06001202 const char *fmt, const char *filter)
Arnaldo Carvalho de Melo64ec84f2015-07-04 12:19:13 -03001203{
1204 char *new_filter;
1205
1206 if (evsel->filter == NULL)
1207 return perf_evsel__set_filter(evsel, filter);
1208
Mathieu Poirierb15d0a42016-09-16 08:44:03 -06001209 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) {
Arnaldo Carvalho de Melo64ec84f2015-07-04 12:19:13 -03001210 free(evsel->filter);
1211 evsel->filter = new_filter;
1212 return 0;
1213 }
1214
1215 return -1;
1216}
1217
Jiri Olsa32dcd022019-07-21 13:23:51 +02001218int perf_evsel__append_tp_filter(struct evsel *evsel, const char *filter)
Mathieu Poirier3541c032016-09-16 08:44:04 -06001219{
1220 return perf_evsel__append_filter(evsel, "(%s) && (%s)", filter);
1221}
1222
Jiri Olsa32dcd022019-07-21 13:23:51 +02001223int perf_evsel__append_addr_filter(struct evsel *evsel, const char *filter)
Mathieu Poirier1e857482016-09-16 08:44:05 -06001224{
1225 return perf_evsel__append_filter(evsel, "%s,%s", filter);
1226}
1227
Andi Kleen363fb122019-11-20 16:15:21 -08001228/* Caller has to clear disabled after going through all CPUs. */
1229int evsel__enable_cpu(struct evsel *evsel, int cpu)
1230{
1231 return perf_evsel__enable_cpu(&evsel->core, cpu);
1232}
1233
Jiri Olsaec7f24e2019-07-21 13:24:02 +02001234int evsel__enable(struct evsel *evsel)
Andi Kleene2407be2013-08-02 17:41:10 -07001235{
Jiri Olsaa00571f2019-07-21 13:24:52 +02001236 int err = perf_evsel__enable(&evsel->core);
Arnaldo Carvalho de Melob7e84522018-10-20 09:04:41 -03001237
1238 if (!err)
1239 evsel->disabled = false;
Arnaldo Carvalho de Melob7e84522018-10-20 09:04:41 -03001240 return err;
Andi Kleene2407be2013-08-02 17:41:10 -07001241}
1242
Andi Kleen363fb122019-11-20 16:15:21 -08001243/* Caller has to set disabled after going through all CPUs. */
1244int evsel__disable_cpu(struct evsel *evsel, int cpu)
1245{
1246 return perf_evsel__disable_cpu(&evsel->core, cpu);
1247}
1248
Jiri Olsa9a10bb22019-07-21 13:24:03 +02001249int evsel__disable(struct evsel *evsel)
Jiri Olsae98a4cb2015-12-03 10:06:41 +01001250{
Jiri Olsaa00571f2019-07-21 13:24:52 +02001251 int err = perf_evsel__disable(&evsel->core);
Arnaldo Carvalho de Melob7e84522018-10-20 09:04:41 -03001252 /*
1253 * We mark it disabled here so that tools that disable a event can
1254 * ignore events after they disable it. I.e. the ring buffer may have
1255 * already a few more events queued up before the kernel got the stop
1256 * request.
1257 */
1258 if (!err)
1259 evsel->disabled = true;
1260
1261 return err;
Jiri Olsae98a4cb2015-12-03 10:06:41 +01001262}
1263
Jiri Olsa32dcd022019-07-21 13:23:51 +02001264static void perf_evsel__free_config_terms(struct evsel *evsel)
Jiri Olsa930a2e22015-07-29 05:42:10 -04001265{
1266 struct perf_evsel_config_term *term, *h;
1267
1268 list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
Arnaldo Carvalho de Meloe56fbc92019-07-04 12:13:46 -03001269 list_del_init(&term->list);
Leo Yan3220fb82020-01-17 13:52:51 +08001270 if (term->free_str)
1271 zfree(&term->val.str);
Jiri Olsa930a2e22015-07-29 05:42:10 -04001272 free(term);
1273 }
1274}
1275
Jiri Olsa32dcd022019-07-21 13:23:51 +02001276void perf_evsel__exit(struct evsel *evsel)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001277{
Jiri Olsab27c4ec2019-07-21 13:24:22 +02001278 assert(list_empty(&evsel->core.node));
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -04001279 assert(evsel->evlist == NULL);
Arnaldo Carvalho de Melo42dfa452019-03-18 16:41:28 -03001280 perf_evsel__free_counts(evsel);
Jiri Olsa88761fa2019-07-21 13:24:50 +02001281 perf_evsel__free_fd(&evsel->core);
Jiri Olsa70c20362019-09-03 10:34:29 +02001282 perf_evsel__free_id(&evsel->core);
Jiri Olsa930a2e22015-07-29 05:42:10 -04001283 perf_evsel__free_config_terms(evsel);
Arnaldo Carvalho de Meloa53b6462018-03-06 10:10:45 -03001284 cgroup__put(evsel->cgrp);
Jiri Olsad400bd32019-07-21 13:24:37 +02001285 perf_cpu_map__put(evsel->core.cpus);
Jiri Olsafe1f61b2019-07-21 13:24:38 +02001286 perf_cpu_map__put(evsel->core.own_cpus);
Jiri Olsaaf663bd2019-07-21 13:24:39 +02001287 perf_thread_map__put(evsel->core.threads);
Arnaldo Carvalho de Melo597e48c2014-10-16 13:25:01 -03001288 zfree(&evsel->group_name);
Arnaldo Carvalho de Melo597e48c2014-10-16 13:25:01 -03001289 zfree(&evsel->name);
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -03001290 perf_evsel__object.fini(evsel);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -02001291}
1292
Jiri Olsa5eb2dd22019-07-21 13:23:57 +02001293void evsel__delete(struct evsel *evsel)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -02001294{
1295 perf_evsel__exit(evsel);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001296 free(evsel);
1297}
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -02001298
Jiri Olsa32dcd022019-07-21 13:23:51 +02001299void perf_evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
Jiri Olsa857a94a2014-11-21 10:31:05 +01001300 struct perf_counts_values *count)
Stephane Eranianc7a79c42013-01-29 12:47:43 +01001301{
1302 struct perf_counts_values tmp;
1303
1304 if (!evsel->prev_raw_counts)
1305 return;
1306
1307 if (cpu == -1) {
1308 tmp = evsel->prev_raw_counts->aggr;
1309 evsel->prev_raw_counts->aggr = *count;
1310 } else {
Jiri Olsaa6fa0032015-06-26 11:29:11 +02001311 tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
1312 *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
Stephane Eranianc7a79c42013-01-29 12:47:43 +01001313 }
1314
1315 count->val = count->val - tmp.val;
1316 count->ena = count->ena - tmp.ena;
1317 count->run = count->run - tmp.run;
1318}
1319
Jiri Olsa13112bb2014-11-21 10:31:06 +01001320void perf_counts_values__scale(struct perf_counts_values *count,
1321 bool scale, s8 *pscaled)
1322{
1323 s8 scaled = 0;
1324
1325 if (scale) {
1326 if (count->run == 0) {
1327 scaled = -1;
1328 count->val = 0;
1329 } else if (count->run < count->ena) {
1330 scaled = 1;
Andi Kleen42a58642019-03-14 15:50:02 -07001331 count->val = (u64)((double) count->val * count->ena / count->run);
Jiri Olsa13112bb2014-11-21 10:31:06 +01001332 }
Andi Kleen75998bb2019-03-14 15:50:01 -07001333 }
Jiri Olsa13112bb2014-11-21 10:31:06 +01001334
1335 if (pscaled)
1336 *pscaled = scaled;
1337}
1338
Jiri Olsaf7794d52017-07-26 14:02:05 +02001339static int
Jiri Olsa32dcd022019-07-21 13:23:51 +02001340perf_evsel__read_one(struct evsel *evsel, int cpu, int thread)
Jiri Olsaf7794d52017-07-26 14:02:05 +02001341{
1342 struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread);
1343
Jiri Olsa5c30af92019-07-21 13:24:51 +02001344 return perf_evsel__read(&evsel->core, cpu, thread, count);
Jiri Olsaf7794d52017-07-26 14:02:05 +02001345}
1346
1347static void
Jiri Olsa32dcd022019-07-21 13:23:51 +02001348perf_evsel__set_count(struct evsel *counter, int cpu, int thread,
Jiri Olsaf7794d52017-07-26 14:02:05 +02001349 u64 val, u64 ena, u64 run)
1350{
1351 struct perf_counts_values *count;
1352
1353 count = perf_counts(counter->counts, cpu, thread);
1354
1355 count->val = val;
1356 count->ena = ena;
1357 count->run = run;
Jiri Olsadf1d6852019-07-21 13:23:48 +02001358
1359 perf_counts__set_loaded(counter->counts, cpu, thread, true);
Jiri Olsaf7794d52017-07-26 14:02:05 +02001360}
1361
1362static int
Jiri Olsa32dcd022019-07-21 13:23:51 +02001363perf_evsel__process_group_data(struct evsel *leader,
Jiri Olsaf7794d52017-07-26 14:02:05 +02001364 int cpu, int thread, u64 *data)
1365{
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001366 u64 read_format = leader->core.attr.read_format;
Jiri Olsaf7794d52017-07-26 14:02:05 +02001367 struct sample_read_value *v;
1368 u64 nr, ena = 0, run = 0, i;
1369
1370 nr = *data++;
1371
Jiri Olsa5643b1a2019-07-21 13:24:46 +02001372 if (nr != (u64) leader->core.nr_members)
Jiri Olsaf7794d52017-07-26 14:02:05 +02001373 return -EINVAL;
1374
1375 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1376 ena = *data++;
1377
1378 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1379 run = *data++;
1380
1381 v = (struct sample_read_value *) data;
1382
1383 perf_evsel__set_count(leader, cpu, thread,
1384 v[0].value, ena, run);
1385
1386 for (i = 1; i < nr; i++) {
Jiri Olsa32dcd022019-07-21 13:23:51 +02001387 struct evsel *counter;
Jiri Olsaf7794d52017-07-26 14:02:05 +02001388
1389 counter = perf_evlist__id2evsel(leader->evlist, v[i].id);
1390 if (!counter)
1391 return -EINVAL;
1392
1393 perf_evsel__set_count(counter, cpu, thread,
1394 v[i].value, ena, run);
1395 }
1396
1397 return 0;
1398}
1399
1400static int
Jiri Olsa32dcd022019-07-21 13:23:51 +02001401perf_evsel__read_group(struct evsel *leader, int cpu, int thread)
Jiri Olsaf7794d52017-07-26 14:02:05 +02001402{
Arnaldo Carvalho de Melo82806c32017-11-09 12:03:40 -03001403 struct perf_stat_evsel *ps = leader->stats;
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001404 u64 read_format = leader->core.attr.read_format;
Jiri Olsa5c30af92019-07-21 13:24:51 +02001405 int size = perf_evsel__read_size(&leader->core);
Jiri Olsaf7794d52017-07-26 14:02:05 +02001406 u64 *data = ps->group_data;
1407
1408 if (!(read_format & PERF_FORMAT_ID))
1409 return -EINVAL;
1410
1411 if (!perf_evsel__is_group_leader(leader))
1412 return -EINVAL;
1413
1414 if (!data) {
1415 data = zalloc(size);
1416 if (!data)
1417 return -ENOMEM;
1418
1419 ps->group_data = data;
1420 }
1421
1422 if (FD(leader, cpu, thread) < 0)
1423 return -EINVAL;
1424
1425 if (readn(FD(leader, cpu, thread), data, size) <= 0)
1426 return -errno;
1427
1428 return perf_evsel__process_group_data(leader, cpu, thread, data);
1429}
1430
Jiri Olsa32dcd022019-07-21 13:23:51 +02001431int perf_evsel__read_counter(struct evsel *evsel, int cpu, int thread)
Jiri Olsaf7794d52017-07-26 14:02:05 +02001432{
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001433 u64 read_format = evsel->core.attr.read_format;
Jiri Olsaf7794d52017-07-26 14:02:05 +02001434
1435 if (read_format & PERF_FORMAT_GROUP)
1436 return perf_evsel__read_group(evsel, cpu, thread);
1437 else
1438 return perf_evsel__read_one(evsel, cpu, thread);
1439}
1440
Jiri Olsa32dcd022019-07-21 13:23:51 +02001441int __perf_evsel__read_on_cpu(struct evsel *evsel,
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -02001442 int cpu, int thread, bool scale)
1443{
1444 struct perf_counts_values count;
1445 size_t nv = scale ? 3 : 1;
1446
1447 if (FD(evsel, cpu, thread) < 0)
1448 return -EINVAL;
1449
Jiri Olsaa6fa0032015-06-26 11:29:11 +02001450 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -02001451 return -ENOMEM;
1452
Stephane Eraniandb49a712017-04-12 11:23:01 -07001453 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0)
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -02001454 return -errno;
1455
Jiri Olsaa6fa0032015-06-26 11:29:11 +02001456 perf_evsel__compute_deltas(evsel, cpu, thread, &count);
Jiri Olsa13112bb2014-11-21 10:31:06 +01001457 perf_counts_values__scale(&count, scale, NULL);
Jiri Olsaa6fa0032015-06-26 11:29:11 +02001458 *perf_counts(evsel->counts, cpu, thread) = count;
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -02001459 return 0;
1460}
1461
Jiri Olsa32dcd022019-07-21 13:23:51 +02001462static int get_group_fd(struct evsel *evsel, int cpu, int thread)
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001463{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001464 struct evsel *leader = evsel->leader;
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001465 int fd;
1466
Namhyung Kim823254e2012-11-29 15:38:30 +09001467 if (perf_evsel__is_group_leader(evsel))
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001468 return -1;
1469
1470 /*
1471 * Leader must be already processed/open,
1472 * if not it's a bug.
1473 */
Jiri Olsa9dfcb752019-07-21 13:24:45 +02001474 BUG_ON(!leader->core.fd);
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001475
1476 fd = FD(leader, cpu, thread);
1477 BUG_ON(fd == -1);
1478
1479 return fd;
1480}
1481
Jiri Olsa32dcd022019-07-21 13:23:51 +02001482static void perf_evsel__remove_fd(struct evsel *pos,
Mengting Zhangca800062017-12-13 15:01:53 +08001483 int nr_cpus, int nr_threads,
1484 int thread_idx)
1485{
1486 for (int cpu = 0; cpu < nr_cpus; cpu++)
1487 for (int thread = thread_idx; thread < nr_threads - 1; thread++)
1488 FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
1489}
1490
Jiri Olsa32dcd022019-07-21 13:23:51 +02001491static int update_fds(struct evsel *evsel,
Mengting Zhangca800062017-12-13 15:01:53 +08001492 int nr_cpus, int cpu_idx,
1493 int nr_threads, int thread_idx)
1494{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001495 struct evsel *pos;
Mengting Zhangca800062017-12-13 15:01:53 +08001496
1497 if (cpu_idx >= nr_cpus || thread_idx >= nr_threads)
1498 return -EINVAL;
1499
1500 evlist__for_each_entry(evsel->evlist, pos) {
1501 nr_cpus = pos != evsel ? nr_cpus : cpu_idx;
1502
1503 perf_evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
1504
1505 /*
1506 * Since fds for next evsel has not been created,
1507 * there is no need to iterate whole event list.
1508 */
1509 if (pos == evsel)
1510 break;
1511 }
1512 return 0;
1513}
1514
Jiri Olsa32dcd022019-07-21 13:23:51 +02001515static bool ignore_missing_thread(struct evsel *evsel,
Mengting Zhangca800062017-12-13 15:01:53 +08001516 int nr_cpus, int cpu,
Jiri Olsa9749b902019-07-21 13:23:50 +02001517 struct perf_thread_map *threads,
Jiri Olsaa359c172016-12-13 08:46:22 +01001518 int thread, int err)
1519{
Jiri Olsaa2f354e2019-08-22 13:11:41 +02001520 pid_t ignore_pid = perf_thread_map__pid(threads, thread);
Mengting Zhangca800062017-12-13 15:01:53 +08001521
Jiri Olsaa359c172016-12-13 08:46:22 +01001522 if (!evsel->ignore_missing_thread)
1523 return false;
1524
1525 /* The system wide setup does not work with threads. */
Jiri Olsa648b5af2019-08-06 11:35:19 +02001526 if (evsel->core.system_wide)
Jiri Olsaa359c172016-12-13 08:46:22 +01001527 return false;
1528
1529 /* The -ESRCH is perf event syscall errno for pid's not found. */
1530 if (err != -ESRCH)
1531 return false;
1532
1533 /* If there's only one thread, let it fail. */
1534 if (threads->nr == 1)
1535 return false;
1536
Mengting Zhangca800062017-12-13 15:01:53 +08001537 /*
1538 * We should remove fd for missing_thread first
1539 * because thread_map__remove() will decrease threads->nr.
1540 */
1541 if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread))
1542 return false;
1543
Jiri Olsaa359c172016-12-13 08:46:22 +01001544 if (thread_map__remove(threads, thread))
1545 return false;
1546
1547 pr_warning("WARNING: Ignored open failure for pid %d\n",
Mengting Zhangca800062017-12-13 15:01:53 +08001548 ignore_pid);
Jiri Olsaa359c172016-12-13 08:46:22 +01001549 return true;
1550}
1551
Arnaldo Carvalho de Meloca125272019-09-24 15:41:51 -03001552static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
1553 void *priv __maybe_unused)
1554{
1555 return fprintf(fp, " %-32s %s\n", name, val);
1556}
1557
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001558static void display_attr(struct perf_event_attr *attr)
1559{
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301560 if (verbose >= 2 || debug_peo_args) {
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001561 fprintf(stderr, "%.60s\n", graph_dotted_line);
1562 fprintf(stderr, "perf_event_attr:\n");
1563 perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
1564 fprintf(stderr, "%.60s\n", graph_dotted_line);
1565 }
1566}
1567
Jiri Olsa32dcd022019-07-21 13:23:51 +02001568static int perf_event_open(struct evsel *evsel,
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001569 pid_t pid, int cpu, int group_fd,
1570 unsigned long flags)
1571{
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001572 int precise_ip = evsel->core.attr.precise_ip;
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001573 int fd;
1574
1575 while (1) {
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301576 pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001577 pid, cpu, group_fd, flags);
1578
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001579 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, group_fd, flags);
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001580 if (fd >= 0)
1581 break;
1582
Jiri Olsacd136182019-07-03 10:09:49 +02001583 /* Do not try less precise if not requested. */
1584 if (!evsel->precise_max)
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001585 break;
1586
1587 /*
1588 * We tried all the precise_ip values, and it's
1589 * still failing, so leave it to standard fallback.
1590 */
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001591 if (!evsel->core.attr.precise_ip) {
1592 evsel->core.attr.precise_ip = precise_ip;
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001593 break;
1594 }
1595
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301596 pr_debug2_peo("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001597 evsel->core.attr.precise_ip--;
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301598 pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001599 display_attr(&evsel->core.attr);
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001600 }
1601
1602 return fd;
1603}
1604
Andi Kleen4804e012019-11-20 16:15:19 -08001605static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
1606 struct perf_thread_map *threads,
1607 int start_cpu, int end_cpu)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001608{
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001609 int cpu, thread, nthreads;
Yann Droneaud57480d22014-06-30 22:28:47 +02001610 unsigned long flags = PERF_FLAG_FD_CLOEXEC;
Andi Kleen796c01a2019-10-20 10:51:54 -07001611 int pid = -1, err, old_errno;
Andi Kleenbec19672013-08-04 19:41:26 -07001612 enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001613
Arnaldo Carvalho de Meloacb9f2d2019-08-13 11:06:38 -03001614 if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
1615 (perf_missing_features.aux_output && evsel->core.attr.aux_output))
Arnaldo Carvalho de Melo32a951b2016-07-14 08:34:33 +00001616 return -EINVAL;
1617
Arnaldo Carvalho de Meloc24ae6d2017-02-14 10:59:04 -03001618 if (cpus == NULL) {
Jiri Olsaf8548392019-07-21 13:23:49 +02001619 static struct perf_cpu_map *empty_cpu_map;
Arnaldo Carvalho de Meloc24ae6d2017-02-14 10:59:04 -03001620
1621 if (empty_cpu_map == NULL) {
Jiri Olsa397721e2019-07-21 13:24:16 +02001622 empty_cpu_map = perf_cpu_map__dummy_new();
Arnaldo Carvalho de Meloc24ae6d2017-02-14 10:59:04 -03001623 if (empty_cpu_map == NULL)
1624 return -ENOMEM;
1625 }
1626
1627 cpus = empty_cpu_map;
1628 }
1629
1630 if (threads == NULL) {
Jiri Olsa9749b902019-07-21 13:23:50 +02001631 static struct perf_thread_map *empty_thread_map;
Arnaldo Carvalho de Meloc24ae6d2017-02-14 10:59:04 -03001632
1633 if (empty_thread_map == NULL) {
1634 empty_thread_map = thread_map__new_by_tid(-1);
1635 if (empty_thread_map == NULL)
1636 return -ENOMEM;
1637 }
1638
1639 threads = empty_thread_map;
1640 }
1641
Jiri Olsa648b5af2019-08-06 11:35:19 +02001642 if (evsel->core.system_wide)
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001643 nthreads = 1;
1644 else
1645 nthreads = threads->nr;
1646
Jiri Olsa9dfcb752019-07-21 13:24:45 +02001647 if (evsel->core.fd == NULL &&
Jiri Olsab8eca4d2019-07-21 13:24:48 +02001648 perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001649 return -ENOMEM;
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -02001650
Stephane Eranian023695d2011-02-14 11:20:01 +02001651 if (evsel->cgrp) {
Yann Droneaud57480d22014-06-30 22:28:47 +02001652 flags |= PERF_FLAG_PID_CGROUP;
Stephane Eranian023695d2011-02-14 11:20:01 +02001653 pid = evsel->cgrp->fd;
1654 }
1655
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001656fallback_missing_features:
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001657 if (perf_missing_features.clockid_wrong)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001658 evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001659 if (perf_missing_features.clockid) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001660 evsel->core.attr.use_clockid = 0;
1661 evsel->core.attr.clockid = 0;
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001662 }
Yann Droneaud57480d22014-06-30 22:28:47 +02001663 if (perf_missing_features.cloexec)
1664 flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
Stephane Eranian5c5e8542013-08-21 12:10:25 +02001665 if (perf_missing_features.mmap2)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001666 evsel->core.attr.mmap2 = 0;
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001667 if (perf_missing_features.exclude_guest)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001668 evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0;
Andi Kleenbd0f8892015-12-11 16:12:24 -08001669 if (perf_missing_features.lbr_flags)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001670 evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
Andi Kleenbd0f8892015-12-11 16:12:24 -08001671 PERF_SAMPLE_BRANCH_NO_CYCLES);
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001672 if (perf_missing_features.group_read && evsel->core.attr.inherit)
1673 evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
Song Liu9aa0bfa2019-01-17 08:15:17 -08001674 if (perf_missing_features.ksymbol)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001675 evsel->core.attr.ksymbol = 0;
Arnaldo Carvalho de Melo74a1e862019-08-26 19:31:06 -03001676 if (perf_missing_features.bpf)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001677 evsel->core.attr.bpf_event = 0;
Kan Liangd3f85432020-02-28 08:30:01 -08001678 if (perf_missing_features.branch_hw_idx)
1679 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX;
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001680retry_sample_id:
1681 if (perf_missing_features.sample_id_all)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001682 evsel->core.attr.sample_id_all = 0;
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001683
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001684 display_attr(&evsel->core.attr);
Adrian Huntere3e1a542013-08-14 15:48:24 +03001685
Andi Kleen4804e012019-11-20 16:15:19 -08001686 for (cpu = start_cpu; cpu < end_cpu; cpu++) {
Arnaldo Carvalho de Melo9d04f172011-01-12 00:08:18 -02001687
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001688 for (thread = 0; thread < nthreads; thread++) {
Jiri Olsa83c2e4f32016-12-12 11:35:40 +01001689 int fd, group_fd;
Stephane Eranian023695d2011-02-14 11:20:01 +02001690
Jiri Olsa648b5af2019-08-06 11:35:19 +02001691 if (!evsel->cgrp && !evsel->core.system_wide)
Jiri Olsaa2f354e2019-08-22 13:11:41 +02001692 pid = perf_thread_map__pid(threads, thread);
Stephane Eranian023695d2011-02-14 11:20:01 +02001693
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001694 group_fd = get_group_fd(evsel, cpu, thread);
Andi Kleenbec19672013-08-04 19:41:26 -07001695retry_open:
Jiri Olsa10213e22017-07-03 16:50:18 +02001696 test_attr__ready();
1697
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001698 fd = perf_event_open(evsel, pid, cpus->map[cpu],
1699 group_fd, flags);
Jiri Olsa83c2e4f32016-12-12 11:35:40 +01001700
1701 FD(evsel, cpu, thread) = fd;
1702
1703 if (fd < 0) {
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001704 err = -errno;
Jiri Olsaa359c172016-12-13 08:46:22 +01001705
Mengting Zhangca800062017-12-13 15:01:53 +08001706 if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
Jiri Olsaa359c172016-12-13 08:46:22 +01001707 /*
1708 * We just removed 1 thread, so take a step
1709 * back on thread index and lower the upper
1710 * nthreads limit.
1711 */
1712 nthreads--;
1713 thread--;
1714
1715 /* ... and pretend like nothing have happened. */
1716 err = 0;
1717 continue;
1718 }
1719
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301720 pr_debug2_peo("\nsys_perf_event_open failed, error %d\n",
Adrian Hunterf852fd62013-11-01 15:51:29 +02001721 err);
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001722 goto try_fallback;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001723 }
Wang Nan1f45b1d2015-10-14 12:41:18 +00001724
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301725 pr_debug2_peo(" = %d\n", fd);
Jiri Olsa7b4b82bc2016-11-21 22:33:26 +01001726
Wang Nan1f45b1d2015-10-14 12:41:18 +00001727 if (evsel->bpf_fd >= 0) {
Jiri Olsa83c2e4f32016-12-12 11:35:40 +01001728 int evt_fd = fd;
Wang Nan1f45b1d2015-10-14 12:41:18 +00001729 int bpf_fd = evsel->bpf_fd;
1730
1731 err = ioctl(evt_fd,
1732 PERF_EVENT_IOC_SET_BPF,
1733 bpf_fd);
1734 if (err && errno != EEXIST) {
1735 pr_err("failed to attach bpf fd %d: %s\n",
1736 bpf_fd, strerror(errno));
1737 err = -EINVAL;
1738 goto out_close;
1739 }
1740 }
1741
Andi Kleenbec19672013-08-04 19:41:26 -07001742 set_rlimit = NO_CHANGE;
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001743
1744 /*
1745 * If we succeeded but had to kill clockid, fail and
1746 * have perf_evsel__open_strerror() print us a nice
1747 * error.
1748 */
1749 if (perf_missing_features.clockid ||
1750 perf_missing_features.clockid_wrong) {
1751 err = -EINVAL;
1752 goto out_close;
1753 }
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001754 }
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001755 }
1756
1757 return 0;
1758
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001759try_fallback:
Andi Kleenbec19672013-08-04 19:41:26 -07001760 /*
1761 * perf stat needs between 5 and 22 fds per CPU. When we run out
1762 * of them try to increase the limits.
1763 */
1764 if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
1765 struct rlimit l;
Andi Kleenbec19672013-08-04 19:41:26 -07001766
Andi Kleen796c01a2019-10-20 10:51:54 -07001767 old_errno = errno;
Andi Kleenbec19672013-08-04 19:41:26 -07001768 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
1769 if (set_rlimit == NO_CHANGE)
1770 l.rlim_cur = l.rlim_max;
1771 else {
1772 l.rlim_cur = l.rlim_max + 1000;
1773 l.rlim_max = l.rlim_cur;
1774 }
1775 if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
1776 set_rlimit++;
1777 errno = old_errno;
1778 goto retry_open;
1779 }
1780 }
1781 errno = old_errno;
1782 }
1783
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001784 if (err != -EINVAL || cpu > 0 || thread > 0)
1785 goto out_close;
1786
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001787 /*
1788 * Must probe features in the order they were added to the
1789 * perf_event_attr interface.
1790 */
Kan Liangd3f85432020-02-28 08:30:01 -08001791 if (!perf_missing_features.branch_hw_idx &&
1792 (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) {
1793 perf_missing_features.branch_hw_idx = true;
1794 pr_debug2("switching off branch HW index support\n");
1795 goto fallback_missing_features;
1796 } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) {
Arnaldo Carvalho de Meloacb9f2d2019-08-13 11:06:38 -03001797 perf_missing_features.aux_output = true;
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301798 pr_debug2_peo("Kernel has no attr.aux_output support, bailing out\n");
Arnaldo Carvalho de Meloacb9f2d2019-08-13 11:06:38 -03001799 goto out_close;
Arnaldo Carvalho de Melo74a1e862019-08-26 19:31:06 -03001800 } else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) {
1801 perf_missing_features.bpf = true;
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301802 pr_debug2_peo("switching off bpf_event\n");
Song Liu45178a92019-01-17 08:15:18 -08001803 goto fallback_missing_features;
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001804 } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) {
Song Liu9aa0bfa2019-01-17 08:15:17 -08001805 perf_missing_features.ksymbol = true;
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301806 pr_debug2_peo("switching off ksymbol\n");
Song Liu9aa0bfa2019-01-17 08:15:17 -08001807 goto fallback_missing_features;
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001808 } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) {
Arnaldo Carvalho de Melo7da36e92016-06-20 10:47:18 +00001809 perf_missing_features.write_backward = true;
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301810 pr_debug2_peo("switching off write_backward\n");
Arnaldo Carvalho de Melo32a951b2016-07-14 08:34:33 +00001811 goto out_close;
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001812 } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) {
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001813 perf_missing_features.clockid_wrong = true;
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301814 pr_debug2_peo("switching off clockid\n");
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001815 goto fallback_missing_features;
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001816 } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) {
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001817 perf_missing_features.clockid = true;
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301818 pr_debug2_peo("switching off use_clockid\n");
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001819 goto fallback_missing_features;
1820 } else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
Yann Droneaud57480d22014-06-30 22:28:47 +02001821 perf_missing_features.cloexec = true;
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301822 pr_debug2_peo("switching off cloexec flag\n");
Yann Droneaud57480d22014-06-30 22:28:47 +02001823 goto fallback_missing_features;
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001824 } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) {
Stephane Eranian5c5e8542013-08-21 12:10:25 +02001825 perf_missing_features.mmap2 = true;
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301826 pr_debug2_peo("switching off mmap2\n");
Stephane Eranian5c5e8542013-08-21 12:10:25 +02001827 goto fallback_missing_features;
1828 } else if (!perf_missing_features.exclude_guest &&
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001829 (evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host)) {
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001830 perf_missing_features.exclude_guest = true;
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301831 pr_debug2_peo("switching off exclude_guest, exclude_host\n");
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001832 goto fallback_missing_features;
1833 } else if (!perf_missing_features.sample_id_all) {
1834 perf_missing_features.sample_id_all = true;
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301835 pr_debug2_peo("switching off sample_id_all\n");
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001836 goto retry_sample_id;
Andi Kleenbd0f8892015-12-11 16:12:24 -08001837 } else if (!perf_missing_features.lbr_flags &&
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001838 (evsel->core.attr.branch_sample_type &
Andi Kleenbd0f8892015-12-11 16:12:24 -08001839 (PERF_SAMPLE_BRANCH_NO_CYCLES |
1840 PERF_SAMPLE_BRANCH_NO_FLAGS))) {
1841 perf_missing_features.lbr_flags = true;
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301842 pr_debug2_peo("switching off branch sample type no (cycles/flags)\n");
Andi Kleenbd0f8892015-12-11 16:12:24 -08001843 goto fallback_missing_features;
Jiri Olsa82bf3112017-07-26 14:02:06 +02001844 } else if (!perf_missing_features.group_read &&
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001845 evsel->core.attr.inherit &&
1846 (evsel->core.attr.read_format & PERF_FORMAT_GROUP) &&
Kan Liang121f3252018-04-24 11:20:12 -07001847 perf_evsel__is_group_leader(evsel)) {
Jiri Olsa82bf3112017-07-26 14:02:06 +02001848 perf_missing_features.group_read = true;
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301849 pr_debug2_peo("switching off group read\n");
Jiri Olsa82bf3112017-07-26 14:02:06 +02001850 goto fallback_missing_features;
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001851 }
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001852out_close:
Jin Yaoab6c79b2018-01-16 23:43:08 +08001853 if (err)
1854 threads->err_thread = thread;
1855
Andi Kleen796c01a2019-10-20 10:51:54 -07001856 old_errno = errno;
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001857 do {
1858 while (--thread >= 0) {
Andi Kleen2ccfb8b2019-10-20 10:51:55 -07001859 if (FD(evsel, cpu, thread) >= 0)
1860 close(FD(evsel, cpu, thread));
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001861 FD(evsel, cpu, thread) = -1;
1862 }
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001863 thread = nthreads;
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001864 } while (--cpu >= 0);
Andi Kleen796c01a2019-10-20 10:51:54 -07001865 errno = old_errno;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001866 return err;
1867}
1868
Andi Kleen4804e012019-11-20 16:15:19 -08001869int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
1870 struct perf_thread_map *threads)
1871{
1872 return evsel__open_cpu(evsel, cpus, threads, 0, cpus ? cpus->nr : 1);
1873}
1874
Jiri Olsa88761fa2019-07-21 13:24:50 +02001875void evsel__close(struct evsel *evsel)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001876{
Jiri Olsa88761fa2019-07-21 13:24:50 +02001877 perf_evsel__close(&evsel->core);
Jiri Olsa70c20362019-09-03 10:34:29 +02001878 perf_evsel__free_id(&evsel->core);
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001879}
1880
Jiri Olsa32dcd022019-07-21 13:23:51 +02001881int perf_evsel__open_per_cpu(struct evsel *evsel,
Andi Kleen4804e012019-11-20 16:15:19 -08001882 struct perf_cpu_map *cpus,
1883 int cpu)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001884{
Andi Kleen4804e012019-11-20 16:15:19 -08001885 if (cpu == -1)
1886 return evsel__open_cpu(evsel, cpus, NULL, 0,
1887 cpus ? cpus->nr : 1);
1888
1889 return evsel__open_cpu(evsel, cpus, NULL, cpu, cpu + 1);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001890}
1891
Jiri Olsa32dcd022019-07-21 13:23:51 +02001892int perf_evsel__open_per_thread(struct evsel *evsel,
Jiri Olsa9749b902019-07-21 13:23:50 +02001893 struct perf_thread_map *threads)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001894{
Jiri Olsa5972d1e2019-07-21 13:24:01 +02001895 return evsel__open(evsel, NULL, threads);
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001896}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -02001897
Jiri Olsa32dcd022019-07-21 13:23:51 +02001898static int perf_evsel__parse_id_sample(const struct evsel *evsel,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001899 const union perf_event *event,
1900 struct perf_sample *sample)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001901{
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001902 u64 type = evsel->core.attr.sample_type;
Jiri Olsab1fcd192019-08-25 20:17:52 +02001903 const __u64 *array = event->sample.array;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001904 bool swapped = evsel->needs_swap;
Jiri Olsa37073f92012-05-30 14:23:44 +02001905 union u64_swap u;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001906
1907 array += ((event->header.size -
1908 sizeof(event->header)) / sizeof(u64)) - 1;
1909
Adrian Hunter75562572013-08-27 11:23:09 +03001910 if (type & PERF_SAMPLE_IDENTIFIER) {
1911 sample->id = *array;
1912 array--;
1913 }
1914
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001915 if (type & PERF_SAMPLE_CPU) {
Jiri Olsa37073f92012-05-30 14:23:44 +02001916 u.val64 = *array;
1917 if (swapped) {
1918 /* undo swap of u64, then swap on individual u32s */
1919 u.val64 = bswap_64(u.val64);
1920 u.val32[0] = bswap_32(u.val32[0]);
1921 }
1922
1923 sample->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001924 array--;
1925 }
1926
1927 if (type & PERF_SAMPLE_STREAM_ID) {
1928 sample->stream_id = *array;
1929 array--;
1930 }
1931
1932 if (type & PERF_SAMPLE_ID) {
1933 sample->id = *array;
1934 array--;
1935 }
1936
1937 if (type & PERF_SAMPLE_TIME) {
1938 sample->time = *array;
1939 array--;
1940 }
1941
1942 if (type & PERF_SAMPLE_TID) {
Jiri Olsa37073f92012-05-30 14:23:44 +02001943 u.val64 = *array;
1944 if (swapped) {
1945 /* undo swap of u64, then swap on individual u32s */
1946 u.val64 = bswap_64(u.val64);
1947 u.val32[0] = bswap_32(u.val32[0]);
1948 u.val32[1] = bswap_32(u.val32[1]);
1949 }
1950
1951 sample->pid = u.val32[0];
1952 sample->tid = u.val32[1];
Adrian Hunterdd44bc62013-10-18 15:29:01 +03001953 array--;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001954 }
1955
1956 return 0;
1957}
1958
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001959static inline bool overflow(const void *endp, u16 max_size, const void *offset,
1960 u64 size)
Frederic Weisbecker98e1da92011-05-21 20:08:15 +02001961{
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001962 return size > max_size || offset + size > endp;
Frederic Weisbecker98e1da92011-05-21 20:08:15 +02001963}
1964
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001965#define OVERFLOW_CHECK(offset, size, max_size) \
1966 do { \
1967 if (overflow(endp, (max_size), (offset), (size))) \
1968 return -EFAULT; \
1969 } while (0)
1970
1971#define OVERFLOW_CHECK_u64(offset) \
1972 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1973
Jiri Olsa01468122017-08-03 13:10:28 +02001974static int
1975perf_event__check_size(union perf_event *event, unsigned int sample_size)
1976{
1977 /*
1978 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1979 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
1980 * check the format does not go past the end of the event.
1981 */
1982 if (sample_size + sizeof(event->header) > event->header.size)
1983 return -EFAULT;
1984
1985 return 0;
1986}
1987
Jiri Olsa32dcd022019-07-21 13:23:51 +02001988int perf_evsel__parse_sample(struct evsel *evsel, union perf_event *event,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001989 struct perf_sample *data)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001990{
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001991 u64 type = evsel->core.attr.sample_type;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001992 bool swapped = evsel->needs_swap;
Jiri Olsab1fcd192019-08-25 20:17:52 +02001993 const __u64 *array;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001994 u16 max_size = event->header.size;
1995 const void *endp = (void *)event + max_size;
1996 u64 sz;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001997
David Ahern936be502011-09-06 09:12:26 -06001998 /*
1999 * used for cross-endian analysis. See git commit 65014ab3
2000 * for why this goofiness is needed.
2001 */
Jiri Olsa6a11f922012-05-16 08:59:04 +02002002 union u64_swap u;
David Ahern936be502011-09-06 09:12:26 -06002003
Robert Richterf3bda2c2011-12-15 17:32:39 +01002004 memset(data, 0, sizeof(*data));
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002005 data->cpu = data->pid = data->tid = -1;
2006 data->stream_id = data->id = data->time = -1ULL;
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002007 data->period = evsel->core.attr.sample_period;
Arnaldo Carvalho de Melo473398a2016-03-22 18:23:43 -03002008 data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
Jiri Olsa28a0b392018-01-07 17:03:52 +01002009 data->misc = event->header.misc;
Jiri Olsa3ad31d82017-08-03 16:07:05 +02002010 data->id = -1ULL;
2011 data->data_src = PERF_MEM_DATA_SRC_NONE;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002012
2013 if (event->header.type != PERF_RECORD_SAMPLE) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002014 if (!evsel->core.attr.sample_id_all)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002015 return 0;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03002016 return perf_evsel__parse_id_sample(evsel, event, data);
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002017 }
2018
2019 array = event->sample.array;
2020
Jiri Olsa01468122017-08-03 13:10:28 +02002021 if (perf_event__check_size(event, evsel->sample_size))
Frederic Weisbeckera2854122011-05-21 19:33:04 +02002022 return -EFAULT;
2023
Adrian Hunter75562572013-08-27 11:23:09 +03002024 if (type & PERF_SAMPLE_IDENTIFIER) {
2025 data->id = *array;
2026 array++;
2027 }
2028
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002029 if (type & PERF_SAMPLE_IP) {
Adrian Hunteref893252013-08-27 11:23:06 +03002030 data->ip = *array;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002031 array++;
2032 }
2033
2034 if (type & PERF_SAMPLE_TID) {
David Ahern936be502011-09-06 09:12:26 -06002035 u.val64 = *array;
2036 if (swapped) {
2037 /* undo swap of u64, then swap on individual u32s */
2038 u.val64 = bswap_64(u.val64);
2039 u.val32[0] = bswap_32(u.val32[0]);
2040 u.val32[1] = bswap_32(u.val32[1]);
2041 }
2042
2043 data->pid = u.val32[0];
2044 data->tid = u.val32[1];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002045 array++;
2046 }
2047
2048 if (type & PERF_SAMPLE_TIME) {
2049 data->time = *array;
2050 array++;
2051 }
2052
2053 if (type & PERF_SAMPLE_ADDR) {
2054 data->addr = *array;
2055 array++;
2056 }
2057
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002058 if (type & PERF_SAMPLE_ID) {
2059 data->id = *array;
2060 array++;
2061 }
2062
2063 if (type & PERF_SAMPLE_STREAM_ID) {
2064 data->stream_id = *array;
2065 array++;
2066 }
2067
2068 if (type & PERF_SAMPLE_CPU) {
David Ahern936be502011-09-06 09:12:26 -06002069
2070 u.val64 = *array;
2071 if (swapped) {
2072 /* undo swap of u64, then swap on individual u32s */
2073 u.val64 = bswap_64(u.val64);
2074 u.val32[0] = bswap_32(u.val32[0]);
2075 }
2076
2077 data->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002078 array++;
2079 }
2080
2081 if (type & PERF_SAMPLE_PERIOD) {
2082 data->period = *array;
2083 array++;
2084 }
2085
2086 if (type & PERF_SAMPLE_READ) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002087 u64 read_format = evsel->core.attr.read_format;
Jiri Olsa9ede4732012-10-10 17:38:13 +02002088
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002089 OVERFLOW_CHECK_u64(array);
Jiri Olsa9ede4732012-10-10 17:38:13 +02002090 if (read_format & PERF_FORMAT_GROUP)
2091 data->read.group.nr = *array;
2092 else
2093 data->read.one.value = *array;
2094
2095 array++;
2096
2097 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002098 OVERFLOW_CHECK_u64(array);
Jiri Olsa9ede4732012-10-10 17:38:13 +02002099 data->read.time_enabled = *array;
2100 array++;
2101 }
2102
2103 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002104 OVERFLOW_CHECK_u64(array);
Jiri Olsa9ede4732012-10-10 17:38:13 +02002105 data->read.time_running = *array;
2106 array++;
2107 }
2108
2109 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2110 if (read_format & PERF_FORMAT_GROUP) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002111 const u64 max_group_nr = UINT64_MAX /
2112 sizeof(struct sample_read_value);
2113
2114 if (data->read.group.nr > max_group_nr)
2115 return -EFAULT;
2116 sz = data->read.group.nr *
2117 sizeof(struct sample_read_value);
2118 OVERFLOW_CHECK(array, sz, max_size);
2119 data->read.group.values =
2120 (struct sample_read_value *)array;
2121 array = (void *)array + sz;
Jiri Olsa9ede4732012-10-10 17:38:13 +02002122 } else {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002123 OVERFLOW_CHECK_u64(array);
Jiri Olsa9ede4732012-10-10 17:38:13 +02002124 data->read.one.id = *array;
2125 array++;
2126 }
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002127 }
2128
Arnaldo Carvalho de Melo27de9b22018-05-28 16:00:29 -03002129 if (evsel__has_callchain(evsel)) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002130 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
2131
2132 OVERFLOW_CHECK_u64(array);
2133 data->callchain = (struct ip_callchain *)array++;
2134 if (data->callchain->nr > max_callchain_nr)
Frederic Weisbecker98e1da92011-05-21 20:08:15 +02002135 return -EFAULT;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002136 sz = data->callchain->nr * sizeof(u64);
2137 OVERFLOW_CHECK(array, sz, max_size);
2138 array = (void *)array + sz;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002139 }
2140
2141 if (type & PERF_SAMPLE_RAW) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002142 OVERFLOW_CHECK_u64(array);
David Ahern936be502011-09-06 09:12:26 -06002143 u.val64 = *array;
Jiri Olsaf9d8adb2017-11-29 19:43:46 +01002144
2145 /*
2146 * Undo swap of u64, then swap on individual u32s,
2147 * get the size of the raw area and undo all of the
2148 * swap. The pevent interface handles endianity by
2149 * itself.
2150 */
2151 if (swapped) {
David Ahern936be502011-09-06 09:12:26 -06002152 u.val64 = bswap_64(u.val64);
2153 u.val32[0] = bswap_32(u.val32[0]);
2154 u.val32[1] = bswap_32(u.val32[1]);
2155 }
David Ahern936be502011-09-06 09:12:26 -06002156 data->raw_size = u.val32[0];
Jiri Olsaf9d8adb2017-11-29 19:43:46 +01002157
2158 /*
2159 * The raw data is aligned on 64bits including the
2160 * u32 size, so it's safe to use mem_bswap_64.
2161 */
2162 if (swapped)
2163 mem_bswap_64((void *) array, data->raw_size);
2164
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002165 array = (void *)array + sizeof(u32);
Frederic Weisbecker98e1da92011-05-21 20:08:15 +02002166
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002167 OVERFLOW_CHECK(array, data->raw_size, max_size);
2168 data->raw_data = (void *)array;
2169 array = (void *)array + data->raw_size;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002170 }
2171
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01002172 if (type & PERF_SAMPLE_BRANCH_STACK) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002173 const u64 max_branch_nr = UINT64_MAX /
2174 sizeof(struct branch_entry);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01002175
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002176 OVERFLOW_CHECK_u64(array);
2177 data->branch_stack = (struct branch_stack *)array++;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01002178
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002179 if (data->branch_stack->nr > max_branch_nr)
2180 return -EFAULT;
Kan Liang42bbabe2020-02-28 08:30:00 -08002181
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01002182 sz = data->branch_stack->nr * sizeof(struct branch_entry);
Kan Liang42bbabe2020-02-28 08:30:00 -08002183 if (perf_evsel__has_branch_hw_idx(evsel))
2184 sz += sizeof(u64);
2185 else
2186 data->no_hw_idx = true;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002187 OVERFLOW_CHECK(array, sz, max_size);
2188 array = (void *)array + sz;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01002189 }
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002190
2191 if (type & PERF_SAMPLE_REGS_USER) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002192 OVERFLOW_CHECK_u64(array);
Adrian Hunter5b95a4a32013-08-27 11:23:10 +03002193 data->user_regs.abi = *array;
2194 array++;
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002195
Adrian Hunter5b95a4a32013-08-27 11:23:10 +03002196 if (data->user_regs.abi) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002197 u64 mask = evsel->core.attr.sample_regs_user;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002198
Mao Han3a5b64f2019-04-10 16:16:43 +08002199 sz = hweight64(mask) * sizeof(u64);
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002200 OVERFLOW_CHECK(array, sz, max_size);
Jiri Olsa352ea452014-01-07 13:47:25 +01002201 data->user_regs.mask = mask;
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002202 data->user_regs.regs = (u64 *)array;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002203 array = (void *)array + sz;
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002204 }
2205 }
2206
2207 if (type & PERF_SAMPLE_STACK_USER) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002208 OVERFLOW_CHECK_u64(array);
2209 sz = *array++;
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002210
2211 data->user_stack.offset = ((char *)(array - 1)
2212 - (char *) event);
2213
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002214 if (!sz) {
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002215 data->user_stack.size = 0;
2216 } else {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002217 OVERFLOW_CHECK(array, sz, max_size);
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002218 data->user_stack.data = (char *)array;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002219 array = (void *)array + sz;
2220 OVERFLOW_CHECK_u64(array);
Adrian Hunter54bd2692013-07-04 16:20:34 +03002221 data->user_stack.size = *array++;
Jiri Olsaa65cb4b2013-10-02 15:46:39 +02002222 if (WARN_ONCE(data->user_stack.size > sz,
2223 "user stack dump failure\n"))
2224 return -EFAULT;
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002225 }
2226 }
2227
Andi Kleen05484292013-01-24 16:10:29 +01002228 if (type & PERF_SAMPLE_WEIGHT) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002229 OVERFLOW_CHECK_u64(array);
Andi Kleen05484292013-01-24 16:10:29 +01002230 data->weight = *array;
2231 array++;
2232 }
2233
Stephane Eranian98a3b322013-01-24 16:10:35 +01002234 if (type & PERF_SAMPLE_DATA_SRC) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002235 OVERFLOW_CHECK_u64(array);
Stephane Eranian98a3b322013-01-24 16:10:35 +01002236 data->data_src = *array;
2237 array++;
2238 }
2239
Andi Kleen475eeab2013-09-20 07:40:43 -07002240 if (type & PERF_SAMPLE_TRANSACTION) {
Adrian Hunter87b95522013-11-01 15:51:36 +02002241 OVERFLOW_CHECK_u64(array);
Andi Kleen475eeab2013-09-20 07:40:43 -07002242 data->transaction = *array;
2243 array++;
2244 }
2245
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02002246 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
2247 if (type & PERF_SAMPLE_REGS_INTR) {
2248 OVERFLOW_CHECK_u64(array);
2249 data->intr_regs.abi = *array;
2250 array++;
2251
2252 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002253 u64 mask = evsel->core.attr.sample_regs_intr;
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02002254
Mao Han3a5b64f2019-04-10 16:16:43 +08002255 sz = hweight64(mask) * sizeof(u64);
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02002256 OVERFLOW_CHECK(array, sz, max_size);
2257 data->intr_regs.mask = mask;
2258 data->intr_regs.regs = (u64 *)array;
2259 array = (void *)array + sz;
2260 }
2261 }
2262
Kan Liang3b0a5da2017-08-29 13:11:08 -04002263 data->phys_addr = 0;
2264 if (type & PERF_SAMPLE_PHYS_ADDR) {
2265 data->phys_addr = *array;
2266 array++;
2267 }
2268
Adrian Hunter98dcf142019-11-15 14:42:11 +02002269 if (type & PERF_SAMPLE_AUX) {
2270 OVERFLOW_CHECK_u64(array);
2271 sz = *array++;
2272
2273 OVERFLOW_CHECK(array, sz, max_size);
2274 /* Undo swap of data */
2275 if (swapped)
2276 mem_bswap_64((char *)array, sz);
2277 data->aux_sample.size = sz;
2278 data->aux_sample.data = (char *)array;
2279 array = (void *)array + sz;
2280 }
2281
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002282 return 0;
2283}
Andrew Vagin74eec262011-11-28 12:03:31 +03002284
Jiri Olsa32dcd022019-07-21 13:23:51 +02002285int perf_evsel__parse_sample_timestamp(struct evsel *evsel,
Jiri Olsa01468122017-08-03 13:10:28 +02002286 union perf_event *event,
2287 u64 *timestamp)
2288{
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002289 u64 type = evsel->core.attr.sample_type;
Jiri Olsab1fcd192019-08-25 20:17:52 +02002290 const __u64 *array;
Jiri Olsa01468122017-08-03 13:10:28 +02002291
2292 if (!(type & PERF_SAMPLE_TIME))
2293 return -1;
2294
2295 if (event->header.type != PERF_RECORD_SAMPLE) {
2296 struct perf_sample data = {
2297 .time = -1ULL,
2298 };
2299
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002300 if (!evsel->core.attr.sample_id_all)
Jiri Olsa01468122017-08-03 13:10:28 +02002301 return -1;
2302 if (perf_evsel__parse_id_sample(evsel, event, &data))
2303 return -1;
2304
2305 *timestamp = data.time;
2306 return 0;
2307 }
2308
2309 array = event->sample.array;
2310
2311 if (perf_event__check_size(event, evsel->sample_size))
2312 return -EFAULT;
2313
2314 if (type & PERF_SAMPLE_IDENTIFIER)
2315 array++;
2316
2317 if (type & PERF_SAMPLE_IP)
2318 array++;
2319
2320 if (type & PERF_SAMPLE_TID)
2321 array++;
2322
2323 if (type & PERF_SAMPLE_TIME)
2324 *timestamp = *array;
2325
2326 return 0;
2327}
2328
Jiri Olsa32dcd022019-07-21 13:23:51 +02002329struct tep_format_field *perf_evsel__field(struct evsel *evsel, const char *name)
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03002330{
Tzvetomir Stoyanov (VMware)af85cd12018-08-08 14:02:50 -04002331 return tep_find_field(evsel->tp_format, name);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03002332}
2333
Jiri Olsa32dcd022019-07-21 13:23:51 +02002334void *perf_evsel__rawptr(struct evsel *evsel, struct perf_sample *sample,
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03002335 const char *name)
2336{
Tzvetomir Stoyanov (VMware)2c92f982018-09-19 14:56:45 -04002337 struct tep_format_field *field = perf_evsel__field(evsel, name);
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03002338 int offset;
2339
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03002340 if (!field)
2341 return NULL;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03002342
2343 offset = field->offset;
2344
Tzvetomir Stoyanov (VMware)bb39ccb2018-09-19 14:56:46 -04002345 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03002346 offset = *(int *)(sample->raw_data + field->offset);
2347 offset &= 0xffff;
2348 }
2349
2350 return sample->raw_data + offset;
2351}
2352
Tzvetomir Stoyanov (VMware)2c92f982018-09-19 14:56:45 -04002353u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample,
Arnaldo Carvalho de Melo90525172016-05-31 12:47:46 -03002354 bool needs_swap)
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03002355{
Arnaldo Carvalho de Meloe6b6f672012-09-26 13:13:04 -03002356 u64 value;
Arnaldo Carvalho de Melo90525172016-05-31 12:47:46 -03002357 void *ptr = sample->raw_data + field->offset;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03002358
Arnaldo Carvalho de Meloe6b6f672012-09-26 13:13:04 -03002359 switch (field->size) {
2360 case 1:
2361 return *(u8 *)ptr;
2362 case 2:
2363 value = *(u16 *)ptr;
2364 break;
2365 case 4:
2366 value = *(u32 *)ptr;
2367 break;
2368 case 8:
David Aherne94eeda2015-03-24 16:14:09 -04002369 memcpy(&value, ptr, sizeof(u64));
Arnaldo Carvalho de Meloe6b6f672012-09-26 13:13:04 -03002370 break;
2371 default:
2372 return 0;
2373 }
2374
Arnaldo Carvalho de Melo90525172016-05-31 12:47:46 -03002375 if (!needs_swap)
Arnaldo Carvalho de Meloe6b6f672012-09-26 13:13:04 -03002376 return value;
2377
2378 switch (field->size) {
2379 case 2:
2380 return bswap_16(value);
2381 case 4:
2382 return bswap_32(value);
2383 case 8:
2384 return bswap_64(value);
2385 default:
2386 return 0;
2387 }
2388
2389 return 0;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03002390}
Arnaldo Carvalho de Melo0698aed2012-12-10 18:17:08 -03002391
Jiri Olsa32dcd022019-07-21 13:23:51 +02002392u64 perf_evsel__intval(struct evsel *evsel, struct perf_sample *sample,
Arnaldo Carvalho de Melo90525172016-05-31 12:47:46 -03002393 const char *name)
2394{
Tzvetomir Stoyanov (VMware)2c92f982018-09-19 14:56:45 -04002395 struct tep_format_field *field = perf_evsel__field(evsel, name);
Arnaldo Carvalho de Melo90525172016-05-31 12:47:46 -03002396
2397 if (!field)
2398 return 0;
2399
2400 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
2401}
2402
Jiri Olsa32dcd022019-07-21 13:23:51 +02002403bool perf_evsel__fallback(struct evsel *evsel, int err,
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03002404 char *msg, size_t msgsize)
2405{
Arnaldo Carvalho de Melo08094822016-05-12 16:07:47 -03002406 int paranoid;
2407
David Ahern2b821cc2013-07-18 17:27:59 -06002408 if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002409 evsel->core.attr.type == PERF_TYPE_HARDWARE &&
2410 evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03002411 /*
2412 * If it's cycles then fall back to hrtimer based
2413 * cpu-clock-tick sw counter, which is always available even if
2414 * no PMU support.
2415 *
2416 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2417 * b0a873e).
2418 */
2419 scnprintf(msg, msgsize, "%s",
2420"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2421
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002422 evsel->core.attr.type = PERF_TYPE_SOFTWARE;
2423 evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK;
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03002424
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -03002425 zfree(&evsel->name);
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03002426 return true;
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002427 } else if (err == EACCES && !evsel->core.attr.exclude_kernel &&
Arnaldo Carvalho de Melo08094822016-05-12 16:07:47 -03002428 (paranoid = perf_event_paranoid()) > 1) {
2429 const char *name = perf_evsel__name(evsel);
2430 char *new_name;
Jiri Olsa129193b2018-04-23 11:08:17 +02002431 const char *sep = ":";
Arnaldo Carvalho de Melo08094822016-05-12 16:07:47 -03002432
Jiri Olsa129193b2018-04-23 11:08:17 +02002433 /* Is there already the separator in the name. */
2434 if (strchr(name, '/') ||
2435 strchr(name, ':'))
2436 sep = "";
2437
2438 if (asprintf(&new_name, "%s%su", name, sep) < 0)
Arnaldo Carvalho de Melo08094822016-05-12 16:07:47 -03002439 return false;
2440
2441 if (evsel->name)
2442 free(evsel->name);
2443 evsel->name = new_name;
Stephane Eranian4ec8d982019-09-20 16:03:56 -07002444 scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying "
2445 "to fall back to excluding kernel and hypervisor "
2446 " samples", paranoid);
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002447 evsel->core.attr.exclude_kernel = 1;
Stephane Eranian4ec8d982019-09-20 16:03:56 -07002448 evsel->core.attr.exclude_hv = 1;
Arnaldo Carvalho de Melo08094822016-05-12 16:07:47 -03002449
2450 return true;
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03002451 }
2452
2453 return false;
2454}
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002455
Arnaldo Carvalho de Melo2157f6e2017-06-20 12:05:38 -03002456static bool find_process(const char *name)
2457{
2458 size_t len = strlen(name);
2459 DIR *dir;
2460 struct dirent *d;
2461 int ret = -1;
2462
2463 dir = opendir(procfs__mountpoint());
2464 if (!dir)
2465 return false;
2466
2467 /* Walk through the directory. */
2468 while (ret && (d = readdir(dir)) != NULL) {
2469 char path[PATH_MAX];
2470 char *data;
2471 size_t size;
2472
2473 if ((d->d_type != DT_DIR) ||
2474 !strcmp(".", d->d_name) ||
2475 !strcmp("..", d->d_name))
2476 continue;
2477
2478 scnprintf(path, sizeof(path), "%s/%s/comm",
2479 procfs__mountpoint(), d->d_name);
2480
2481 if (filename__read_str(path, &data, &size))
2482 continue;
2483
2484 ret = strncmp(name, data, len);
2485 free(data);
2486 }
2487
2488 closedir(dir);
2489 return ret ? false : true;
2490}
2491
Jiri Olsa32dcd022019-07-21 13:23:51 +02002492int perf_evsel__open_strerror(struct evsel *evsel, struct target *target,
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002493 int err, char *msg, size_t size)
2494{
Masami Hiramatsu6e81c742014-08-14 02:22:36 +00002495 char sbuf[STRERR_BUFSIZE];
Jin Yao32ccb132017-04-07 20:08:52 +08002496 int printed = 0;
Masami Hiramatsu6e81c742014-08-14 02:22:36 +00002497
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002498 switch (err) {
2499 case EPERM:
2500 case EACCES:
Jin Yao32ccb132017-04-07 20:08:52 +08002501 if (err == EPERM)
2502 printed = scnprintf(msg, size,
2503 "No permission to enable %s event.\n\n",
2504 perf_evsel__name(evsel));
2505
2506 return scnprintf(msg + printed, size - printed,
Ben Hutchings3379e0c2016-01-19 21:35:15 +00002507 "You may not have permission to collect %sstats.\n\n"
2508 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
2509 "which controls use of the performance events system by\n"
2510 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
Arnaldo Carvalho de Melo7d173912016-05-12 15:44:55 -03002511 "The current value is %d:\n\n"
Ben Hutchings3379e0c2016-01-19 21:35:15 +00002512 " -1: Allow use of (almost) all events by all users\n"
Konstantin Khlebnikovac0bb6b2017-08-20 14:39:20 +03002513 " Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
2514 ">= 0: Disallow ftrace function tracepoint by users without CAP_SYS_ADMIN\n"
2515 " Disallow raw tracepoint access by users without CAP_SYS_ADMIN\n"
Ben Hutchings3379e0c2016-01-19 21:35:15 +00002516 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -03002517 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN\n\n"
2518 "To make this setting permanent, edit /etc/sysctl.conf too, e.g.:\n\n"
2519 " kernel.perf_event_paranoid = -1\n" ,
Arnaldo Carvalho de Melo7d173912016-05-12 15:44:55 -03002520 target->system_wide ? "system-wide " : "",
2521 perf_event_paranoid());
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002522 case ENOENT:
2523 return scnprintf(msg, size, "The %s event is not supported.",
2524 perf_evsel__name(evsel));
2525 case EMFILE:
2526 return scnprintf(msg, size, "%s",
2527 "Too many events are opened.\n"
Jiri Olsa18ffdfe2015-05-25 22:51:54 +02002528 "Probably the maximum number of open file descriptors has been reached.\n"
2529 "Hint: Try again after reducing the number of events.\n"
2530 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
Arnaldo Carvalho de Melode46d522016-04-27 17:51:45 -03002531 case ENOMEM:
Arnaldo Carvalho de Melo27de9b22018-05-28 16:00:29 -03002532 if (evsel__has_callchain(evsel) &&
Arnaldo Carvalho de Melode46d522016-04-27 17:51:45 -03002533 access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
2534 return scnprintf(msg, size,
2535 "Not enough memory to setup event with callchain.\n"
2536 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
Arnaldo Carvalho de Melo029c75e2018-05-17 16:31:32 -03002537 "Hint: Current value: %d", sysctl__max_stack());
Arnaldo Carvalho de Melode46d522016-04-27 17:51:45 -03002538 break;
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002539 case ENODEV:
2540 if (target->cpu_list)
2541 return scnprintf(msg, size, "%s",
Arnaldo Carvalho de Melo81d64f42016-04-27 17:56:53 -03002542 "No such device - did you specify an out-of-range profile CPU?");
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002543 break;
2544 case EOPNOTSUPP:
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002545 if (evsel->core.attr.sample_period != 0)
Kim Phillips114bc192017-11-14 15:04:52 -06002546 return scnprintf(msg, size,
2547 "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
2548 perf_evsel__name(evsel));
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002549 if (evsel->core.attr.precise_ip)
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002550 return scnprintf(msg, size, "%s",
2551 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2552#if defined(__i386__) || defined(__x86_64__)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002553 if (evsel->core.attr.type == PERF_TYPE_HARDWARE)
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002554 return scnprintf(msg, size, "%s",
Andi Kleenccbb6af2018-04-06 13:38:12 -07002555 "No hardware sampling interrupt available.\n");
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002556#endif
2557 break;
Jiri Olsa63914ac2014-08-01 17:46:54 +02002558 case EBUSY:
2559 if (find_process("oprofiled"))
2560 return scnprintf(msg, size,
2561 "The PMU counters are busy/taken by another profiler.\n"
2562 "We found oprofile daemon running, please stop it and try again.");
2563 break;
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002564 case EINVAL:
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002565 if (evsel->core.attr.write_backward && perf_missing_features.write_backward)
Arnaldo Carvalho de Melo7da36e92016-06-20 10:47:18 +00002566 return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002567 if (perf_missing_features.clockid)
2568 return scnprintf(msg, size, "clockid feature not supported.");
2569 if (perf_missing_features.clockid_wrong)
2570 return scnprintf(msg, size, "wrong clockid (%d).", clockid);
Arnaldo Carvalho de Meloacb9f2d2019-08-13 11:06:38 -03002571 if (perf_missing_features.aux_output)
2572 return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel.");
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002573 break;
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002574 default:
2575 break;
2576 }
2577
2578 return scnprintf(msg, size,
Masami Hiramatsu6e81c742014-08-14 02:22:36 +00002579 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
Andi Kleenec394842018-04-06 13:38:11 -07002580 "/bin/dmesg | grep -i perf may provide additional information.\n",
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03002581 err, str_error_r(err, sbuf, sizeof(sbuf)),
Masami Hiramatsu6e81c742014-08-14 02:22:36 +00002582 perf_evsel__name(evsel));
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002583}
Ravi Bangoriaf4e47f92016-06-30 11:44:19 +05302584
Jiri Olsa32dcd022019-07-21 13:23:51 +02002585struct perf_env *perf_evsel__env(struct evsel *evsel)
Jin Yao69fb09f2017-07-07 13:06:34 +08002586{
Arnaldo Carvalho de Melo5449f132017-12-11 12:46:11 -03002587 if (evsel && evsel->evlist)
2588 return evsel->evlist->env;
Arnaldo Carvalho de Melo9db0e362019-09-30 11:48:32 -03002589 return &perf_env;
Jin Yao69fb09f2017-07-07 13:06:34 +08002590}
Jiri Olsa650d6222018-08-30 08:32:16 +02002591
Jiri Olsa63503db2019-07-21 13:23:52 +02002592static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
Jiri Olsa650d6222018-08-30 08:32:16 +02002593{
2594 int cpu, thread;
2595
Jiri Olsa9dfcb752019-07-21 13:24:45 +02002596 for (cpu = 0; cpu < xyarray__max_x(evsel->core.fd); cpu++) {
2597 for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
Jiri Olsa650d6222018-08-30 08:32:16 +02002598 thread++) {
2599 int fd = FD(evsel, cpu, thread);
2600
Jiri Olsad5a99482019-09-03 11:19:56 +02002601 if (perf_evlist__id_add_fd(&evlist->core, &evsel->core,
Jiri Olsa650d6222018-08-30 08:32:16 +02002602 cpu, thread, fd) < 0)
2603 return -1;
2604 }
2605 }
2606
2607 return 0;
2608}
2609
Jiri Olsa63503db2019-07-21 13:23:52 +02002610int perf_evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
Jiri Olsa650d6222018-08-30 08:32:16 +02002611{
Jiri Olsad400bd32019-07-21 13:24:37 +02002612 struct perf_cpu_map *cpus = evsel->core.cpus;
Jiri Olsaaf663bd2019-07-21 13:24:39 +02002613 struct perf_thread_map *threads = evsel->core.threads;
Jiri Olsa650d6222018-08-30 08:32:16 +02002614
Jiri Olsa70c20362019-09-03 10:34:29 +02002615 if (perf_evsel__alloc_id(&evsel->core, cpus->nr, threads->nr))
Jiri Olsa650d6222018-08-30 08:32:16 +02002616 return -ENOMEM;
2617
2618 return store_evsel_ids(evsel, evlist);
2619}