blob: 22d3267ce29413dfd212c55b09bca871d0367d15 [file] [log] [blame]
Thomas Gleixner91007042019-05-29 07:12:25 -07001// SPDX-License-Identifier: GPL-2.0-only
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02002/*
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from builtin-{top,stat,record}.c, see those files for further
6 * copyright notes.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02007 */
8
David Ahern936be502011-09-06 09:12:26 -06009#include <byteswap.h>
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030010#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030011#include <inttypes.h>
Jiri Olsa0f6a3012012-08-07 15:20:45 +020012#include <linux/bitops.h>
Arnaldo Carvalho de Melo2157f6e2017-06-20 12:05:38 -030013#include <api/fs/fs.h>
Jiri Olsa4605eab2015-09-02 09:56:43 +020014#include <api/fs/tracing_path.h>
Robert Richter4e319022013-06-11 17:29:18 +020015#include <traceevent/event-parse.h>
16#include <linux/hw_breakpoint.h>
17#include <linux/perf_event.h>
Arnaldo Carvalho de Melo03536312017-06-16 12:18:27 -030018#include <linux/compiler.h>
Jiri Olsa8dd2a132015-09-07 10:38:06 +020019#include <linux/err.h>
Arnaldo Carvalho de Melo7f7c5362019-07-04 11:32:27 -030020#include <linux/zalloc.h>
Arnaldo Carvalho de Melo86a5e0c2017-04-19 19:03:14 -030021#include <sys/ioctl.h>
Andi Kleenbec19672013-08-04 19:41:26 -070022#include <sys/resource.h>
Arnaldo Carvalho de Melo2157f6e2017-06-20 12:05:38 -030023#include <sys/types.h>
24#include <dirent.h>
Arnaldo Carvalho de Melof2a39fe2019-08-30 14:45:20 -030025#include <stdlib.h>
Jiri Olsab04c5972019-07-21 13:24:24 +020026#include <perf/evsel.h>
Robert Richter4e319022013-06-11 17:29:18 +020027#include "asm/bug.h"
Song Liufa853c42020-12-29 13:42:14 -080028#include "bpf_counter.h"
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030029#include "callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030030#include "cgroup.h"
Arnaldo Carvalho de Meloddee6882019-08-21 14:20:54 -030031#include "counts.h"
Arnaldo Carvalho de Melo5ab8c682017-04-25 15:30:47 -030032#include "event.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020033#include "evsel.h"
Arnaldo Carvalho de Melo9db0e362019-09-30 11:48:32 -030034#include "util/env.h"
Arnaldo Carvalho de Melo95be9d12019-09-24 15:56:14 -030035#include "util/evsel_config.h"
Arnaldo Carvalho de Meloca125272019-09-24 15:41:51 -030036#include "util/evsel_fprintf.h"
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -020037#include "evlist.h"
Arnaldo Carvalho de Melo87ffb6c2019-09-10 16:29:02 +010038#include <perf/cpumap.h>
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020039#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090040#include "target.h"
Jiri Olsa26d33022012-08-07 15:20:47 +020041#include "perf_regs.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -030042#include "record.h"
Adrian Huntere3e1a542013-08-14 15:48:24 +030043#include "debug.h"
Jiri Olsa97978b32013-12-03 14:09:24 +010044#include "trace-event.h"
Jiri Olsaa9a3a4d2015-06-14 10:19:26 +020045#include "stat.h"
Arnaldo Carvalho de Melo6a9fa4e2019-06-25 17:31:26 -030046#include "string2.h"
Jiri Olsaf9d8adb2017-11-29 19:43:46 +010047#include "memswap.h"
Arnaldo Carvalho de Melo2da39f12019-08-27 11:51:18 -030048#include "util.h"
Jin Yao034f7ee2021-01-28 09:34:17 +080049#include "hashmap.h"
Jin Yao660e5332021-04-27 15:01:29 +080050#include "pmu-hybrid.h"
Arnaldo Carvalho de Melo91854f92019-08-29 14:59:50 -030051#include "../perf-sys.h"
Andi Kleenac12f672016-10-12 14:02:06 -070052#include "util/parse-branch-options.h"
Arnaldo Carvalho de Melo76466022019-08-21 11:30:29 -030053#include <internal/xyarray.h>
Arnaldo Carvalho de Melofb71c86c2019-09-03 10:56:06 -030054#include <internal/lib.h>
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020055
Arnaldo Carvalho de Melo3052ba52019-06-25 17:27:31 -030056#include <linux/ctype.h>
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -030057
Arnaldo Carvalho de Melo9a831b32018-02-02 11:27:25 -030058struct perf_missing_features perf_missing_features;
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -030059
Peter Zijlstra814c8c32015-03-31 00:19:31 +020060static clockid_t clockid;
61
Arnaldo Carvalho de Melo4c703822020-05-06 12:55:06 -030062static int evsel__no_extra_init(struct evsel *evsel __maybe_unused)
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -030063{
64 return 0;
65}
66
Jiri Olsa10213e22017-07-03 16:50:18 +020067void __weak test_attr__ready(void) { }
68
Arnaldo Carvalho de Melo4c703822020-05-06 12:55:06 -030069static void evsel__no_extra_fini(struct evsel *evsel __maybe_unused)
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -030070{
71}
72
73static struct {
74 size_t size;
Jiri Olsa32dcd022019-07-21 13:23:51 +020075 int (*init)(struct evsel *evsel);
76 void (*fini)(struct evsel *evsel);
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -030077} perf_evsel__object = {
Jiri Olsa32dcd022019-07-21 13:23:51 +020078 .size = sizeof(struct evsel),
Arnaldo Carvalho de Melo4c703822020-05-06 12:55:06 -030079 .init = evsel__no_extra_init,
80 .fini = evsel__no_extra_fini,
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -030081};
82
Arnaldo Carvalho de Melo4c703822020-05-06 12:55:06 -030083int evsel__object_config(size_t object_size, int (*init)(struct evsel *evsel),
84 void (*fini)(struct evsel *evsel))
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -030085{
86
87 if (object_size == 0)
88 goto set_methods;
89
90 if (perf_evsel__object.size > object_size)
91 return -EINVAL;
92
93 perf_evsel__object.size = object_size;
94
95set_methods:
96 if (init != NULL)
97 perf_evsel__object.init = init;
98
99 if (fini != NULL)
100 perf_evsel__object.fini = fini;
101
102 return 0;
103}
104
Jiri Olsa9dfcb752019-07-21 13:24:45 +0200105#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200106
Arnaldo Carvalho de Melo2aaefde2020-04-29 16:00:27 -0300107int __evsel__sample_size(u64 sample_type)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300108{
109 u64 mask = sample_type & PERF_SAMPLE_MASK;
110 int size = 0;
111 int i;
112
113 for (i = 0; i < 64; i++) {
114 if (mask & (1ULL << i))
115 size++;
116 }
117
118 size *= sizeof(u64);
119
120 return size;
121}
122
Adrian Hunter75562572013-08-27 11:23:09 +0300123/**
124 * __perf_evsel__calc_id_pos - calculate id_pos.
125 * @sample_type: sample type
126 *
127 * This function returns the position of the event id (PERF_SAMPLE_ID or
128 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
Arnaldo Carvalho de Melo69d81f02019-08-26 19:02:31 -0300129 * perf_record_sample.
Adrian Hunter75562572013-08-27 11:23:09 +0300130 */
131static int __perf_evsel__calc_id_pos(u64 sample_type)
132{
133 int idx = 0;
134
135 if (sample_type & PERF_SAMPLE_IDENTIFIER)
136 return 0;
137
138 if (!(sample_type & PERF_SAMPLE_ID))
139 return -1;
140
141 if (sample_type & PERF_SAMPLE_IP)
142 idx += 1;
143
144 if (sample_type & PERF_SAMPLE_TID)
145 idx += 1;
146
147 if (sample_type & PERF_SAMPLE_TIME)
148 idx += 1;
149
150 if (sample_type & PERF_SAMPLE_ADDR)
151 idx += 1;
152
153 return idx;
154}
155
156/**
157 * __perf_evsel__calc_is_pos - calculate is_pos.
158 * @sample_type: sample type
159 *
160 * This function returns the position (counting backwards) of the event id
161 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
162 * sample_id_all is used there is an id sample appended to non-sample events.
163 */
164static int __perf_evsel__calc_is_pos(u64 sample_type)
165{
166 int idx = 1;
167
168 if (sample_type & PERF_SAMPLE_IDENTIFIER)
169 return 1;
170
171 if (!(sample_type & PERF_SAMPLE_ID))
172 return -1;
173
174 if (sample_type & PERF_SAMPLE_CPU)
175 idx += 1;
176
177 if (sample_type & PERF_SAMPLE_STREAM_ID)
178 idx += 1;
179
180 return idx;
181}
182
Arnaldo Carvalho de Melo4b5e87b2020-04-29 15:58:40 -0300183void evsel__calc_id_pos(struct evsel *evsel)
Adrian Hunter75562572013-08-27 11:23:09 +0300184{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200185 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type);
186 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type);
Adrian Hunter75562572013-08-27 11:23:09 +0300187}
188
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300189void __evsel__set_sample_bit(struct evsel *evsel,
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300190 enum perf_event_sample_format bit)
191{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200192 if (!(evsel->core.attr.sample_type & bit)) {
193 evsel->core.attr.sample_type |= bit;
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300194 evsel->sample_size += sizeof(u64);
Arnaldo Carvalho de Melo4b5e87b2020-04-29 15:58:40 -0300195 evsel__calc_id_pos(evsel);
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300196 }
197}
198
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300199void __evsel__reset_sample_bit(struct evsel *evsel,
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300200 enum perf_event_sample_format bit)
201{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200202 if (evsel->core.attr.sample_type & bit) {
203 evsel->core.attr.sample_type &= ~bit;
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300204 evsel->sample_size -= sizeof(u64);
Arnaldo Carvalho de Melo4b5e87b2020-04-29 15:58:40 -0300205 evsel__calc_id_pos(evsel);
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300206 }
207}
208
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300209void evsel__set_sample_id(struct evsel *evsel,
Adrian Hunter75562572013-08-27 11:23:09 +0300210 bool can_sample_identifier)
Arnaldo Carvalho de Melo7a5a5ca2012-12-10 15:21:30 -0300211{
Adrian Hunter75562572013-08-27 11:23:09 +0300212 if (can_sample_identifier) {
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300213 evsel__reset_sample_bit(evsel, ID);
214 evsel__set_sample_bit(evsel, IDENTIFIER);
Adrian Hunter75562572013-08-27 11:23:09 +0300215 } else {
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300216 evsel__set_sample_bit(evsel, ID);
Adrian Hunter75562572013-08-27 11:23:09 +0300217 }
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200218 evsel->core.attr.read_format |= PERF_FORMAT_ID;
Arnaldo Carvalho de Melo7a5a5ca2012-12-10 15:21:30 -0300219}
220
Arnaldo Carvalho de Melo5496bc02016-07-07 11:51:47 -0300221/**
Arnaldo Carvalho de Meloc754c382020-04-30 10:51:16 -0300222 * evsel__is_function_event - Return whether given evsel is a function
Arnaldo Carvalho de Melo5496bc02016-07-07 11:51:47 -0300223 * trace event
224 *
225 * @evsel - evsel selector to be tested
226 *
227 * Return %true if event is function trace event
228 */
Arnaldo Carvalho de Meloc754c382020-04-30 10:51:16 -0300229bool evsel__is_function_event(struct evsel *evsel)
Arnaldo Carvalho de Melo5496bc02016-07-07 11:51:47 -0300230{
231#define FUNCTION_EVENT "ftrace:function"
232
233 return evsel->name &&
234 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
235
236#undef FUNCTION_EVENT
237}
238
Jiri Olsab4b62ee2019-07-21 13:23:53 +0200239void evsel__init(struct evsel *evsel,
240 struct perf_event_attr *attr, int idx)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200241{
Jiri Olsa38fe0e02021-07-06 17:16:59 +0200242 perf_evsel__init(&evsel->core, attr, idx);
Adrian Hunter60b08962014-07-31 09:00:52 +0300243 evsel->tracking = !idx;
Ian Rogersb194c9c2021-11-18 00:47:49 -0800244 evsel->unit = strdup("");
Stephane Eranian410136f2013-11-12 17:58:49 +0100245 evsel->scale = 1.0;
Arnaldo Carvalho de Melo2fda5ad2018-10-19 15:47:34 -0300246 evsel->max_events = ULONG_MAX;
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400247 evsel->evlist = NULL;
Arnaldo Carvalho de Meloaf4a0992019-07-15 16:22:57 -0300248 evsel->bpf_obj = NULL;
Wang Nan1f45b1d2015-10-14 12:41:18 +0000249 evsel->bpf_fd = -1;
Jiri Olsa930a2e22015-07-29 05:42:10 -0400250 INIT_LIST_HEAD(&evsel->config_terms);
Song Liufa853c42020-12-29 13:42:14 -0800251 INIT_LIST_HEAD(&evsel->bpf_counter_list);
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -0300252 perf_evsel__object.init(evsel);
Arnaldo Carvalho de Melo2aaefde2020-04-29 16:00:27 -0300253 evsel->sample_size = __evsel__sample_size(attr->sample_type);
Arnaldo Carvalho de Melo4b5e87b2020-04-29 15:58:40 -0300254 evsel__calc_id_pos(evsel);
Wang Nan15bfd2c2015-07-10 07:36:09 +0000255 evsel->cmdline_group_boundary = false;
Andi Kleen37932c12017-03-20 13:17:08 -0700256 evsel->metric_expr = NULL;
Andi Kleen96284812017-03-20 13:17:10 -0700257 evsel->metric_name = NULL;
Andi Kleen37932c12017-03-20 13:17:08 -0700258 evsel->metric_events = NULL;
Arnaldo Carvalho de Melof0aef472020-05-13 11:00:04 -0300259 evsel->per_pkg_mask = NULL;
Andi Kleen37932c12017-03-20 13:17:08 -0700260 evsel->collect_stat = false;
Agustin Vega-Frias8c5421c2018-03-06 09:04:43 -0500261 evsel->pmu_name = NULL;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200262}
263
Arnaldo Carvalho de Melo8f6725a2020-05-06 13:27:04 -0300264struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200265{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200266 struct evsel *evsel = zalloc(perf_evsel__object.size);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200267
Hisao Tanabefd8d2702018-08-25 00:45:56 +0900268 if (!evsel)
269 return NULL;
Jiri Olsab4b62ee2019-07-21 13:23:53 +0200270 evsel__init(evsel, attr, idx);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200271
Arnaldo Carvalho de Meloc754c382020-04-30 10:51:16 -0300272 if (evsel__is_bpf_output(evsel)) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200273 evsel->core.attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
Wang Nand37ba882016-04-01 13:26:42 +0000274 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200275 evsel->core.attr.sample_period = 1;
Wang Nan03e0a7d2016-02-22 09:10:37 +0000276 }
277
Arnaldo Carvalho de Meloc754c382020-04-30 10:51:16 -0300278 if (evsel__is_clock(evsel)) {
Ian Rogersb194c9c2021-11-18 00:47:49 -0800279 free((char *)evsel->unit);
280 evsel->unit = strdup("msec");
Jiri Olsa0aa802a2018-07-20 13:00:34 +0200281 evsel->scale = 1e-6;
282 }
283
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200284 return evsel;
285}
286
Arnaldo Carvalho de Melof1e52f12017-09-22 15:41:44 -0300287static bool perf_event_can_profile_kernel(void)
288{
Igor Lubashevaa972932019-08-26 21:39:14 -0400289 return perf_event_paranoid_check(1);
Arnaldo Carvalho de Melof1e52f12017-09-22 15:41:44 -0300290}
291
Ravi Bangoriaeb39bf32021-11-02 11:01:12 +0530292struct evsel *evsel__new_cycles(bool precise __maybe_unused, __u32 type, __u64 config)
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300293{
294 struct perf_event_attr attr = {
Jin Yaob53a0752021-04-27 15:01:26 +0800295 .type = type,
296 .config = config,
Arnaldo Carvalho de Melof1e52f12017-09-22 15:41:44 -0300297 .exclude_kernel = !perf_event_can_profile_kernel(),
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300298 };
Jiri Olsa32dcd022019-07-21 13:23:51 +0200299 struct evsel *evsel;
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300300
301 event_attr_init(&attr);
Arnaldo Carvalho de Melo30269dc2017-07-03 13:05:43 -0300302
Arnaldo Carvalho de Melo7a1ac112017-06-09 16:54:28 -0300303 /*
304 * Now let the usual logic to set up the perf_event_attr defaults
305 * to kick in when we return and before perf_evsel__open() is called.
306 */
Jiri Olsa365c3ae2019-07-21 13:23:58 +0200307 evsel = evsel__new(&attr);
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300308 if (evsel == NULL)
309 goto out;
310
Ravi Bangoriaeb39bf32021-11-02 11:01:12 +0530311 arch_evsel__fixup_new_cycles(&evsel->core.attr);
312
Jiri Olsa4e8a5c12019-03-14 15:00:10 +0100313 evsel->precise_max = true;
314
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300315 /* use asprintf() because free(evsel) assumes name is allocated */
Arnaldo Carvalho de Meloede56262017-07-10 16:19:25 -0300316 if (asprintf(&evsel->name, "cycles%s%s%.*s",
317 (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
318 attr.exclude_kernel ? "u" : "",
319 attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0)
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300320 goto error_free;
321out:
322 return evsel;
323error_free:
Jiri Olsa5eb2dd22019-07-21 13:23:57 +0200324 evsel__delete(evsel);
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300325 evsel = NULL;
326 goto out;
327}
328
Adrian Huntera7d212f2021-09-09 15:55:07 +0300329int copy_config_terms(struct list_head *dst, struct list_head *src)
Namhyung Kim7fedd9b2020-09-24 21:44:51 +0900330{
331 struct evsel_config_term *pos, *tmp;
332
Adrian Huntera7d212f2021-09-09 15:55:07 +0300333 list_for_each_entry(pos, src, list) {
Namhyung Kim7fedd9b2020-09-24 21:44:51 +0900334 tmp = malloc(sizeof(*tmp));
335 if (tmp == NULL)
336 return -ENOMEM;
337
338 *tmp = *pos;
339 if (tmp->free_str) {
340 tmp->val.str = strdup(pos->val.str);
341 if (tmp->val.str == NULL) {
342 free(tmp);
343 return -ENOMEM;
344 }
345 }
Adrian Huntera7d212f2021-09-09 15:55:07 +0300346 list_add_tail(&tmp->list, dst);
Namhyung Kim7fedd9b2020-09-24 21:44:51 +0900347 }
348 return 0;
349}
350
Adrian Huntera7d212f2021-09-09 15:55:07 +0300351static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
352{
353 return copy_config_terms(&dst->config_terms, &src->config_terms);
354}
355
Namhyung Kim7fedd9b2020-09-24 21:44:51 +0900356/**
357 * evsel__clone - create a new evsel copied from @orig
358 * @orig: original evsel
359 *
360 * The assumption is that @orig is not configured nor opened yet.
361 * So we only care about the attributes that can be set while it's parsed.
362 */
363struct evsel *evsel__clone(struct evsel *orig)
364{
365 struct evsel *evsel;
366
367 BUG_ON(orig->core.fd);
368 BUG_ON(orig->counts);
369 BUG_ON(orig->priv);
370 BUG_ON(orig->per_pkg_mask);
371
372 /* cannot handle BPF objects for now */
373 if (orig->bpf_obj)
374 return NULL;
375
376 evsel = evsel__new(&orig->core.attr);
377 if (evsel == NULL)
378 return NULL;
379
380 evsel->core.cpus = perf_cpu_map__get(orig->core.cpus);
381 evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus);
382 evsel->core.threads = perf_thread_map__get(orig->core.threads);
383 evsel->core.nr_members = orig->core.nr_members;
384 evsel->core.system_wide = orig->core.system_wide;
385
386 if (orig->name) {
387 evsel->name = strdup(orig->name);
388 if (evsel->name == NULL)
389 goto out_err;
390 }
391 if (orig->group_name) {
392 evsel->group_name = strdup(orig->group_name);
393 if (evsel->group_name == NULL)
394 goto out_err;
395 }
396 if (orig->pmu_name) {
397 evsel->pmu_name = strdup(orig->pmu_name);
398 if (evsel->pmu_name == NULL)
399 goto out_err;
400 }
401 if (orig->filter) {
402 evsel->filter = strdup(orig->filter);
403 if (evsel->filter == NULL)
404 goto out_err;
405 }
Ian Rogers2b62b3a2021-10-15 10:21:25 -0700406 if (orig->metric_id) {
407 evsel->metric_id = strdup(orig->metric_id);
408 if (evsel->metric_id == NULL)
409 goto out_err;
410 }
Namhyung Kim7fedd9b2020-09-24 21:44:51 +0900411 evsel->cgrp = cgroup__get(orig->cgrp);
412 evsel->tp_format = orig->tp_format;
413 evsel->handler = orig->handler;
Jiri Olsafba7c862021-07-06 17:17:00 +0200414 evsel->core.leader = orig->core.leader;
Namhyung Kim7fedd9b2020-09-24 21:44:51 +0900415
416 evsel->max_events = orig->max_events;
417 evsel->tool_event = orig->tool_event;
Ian Rogersb194c9c2021-11-18 00:47:49 -0800418 free((char *)evsel->unit);
419 evsel->unit = strdup(orig->unit);
420 if (evsel->unit == NULL)
421 goto out_err;
422
Namhyung Kim7fedd9b2020-09-24 21:44:51 +0900423 evsel->scale = orig->scale;
424 evsel->snapshot = orig->snapshot;
425 evsel->per_pkg = orig->per_pkg;
426 evsel->percore = orig->percore;
427 evsel->precise_max = orig->precise_max;
428 evsel->use_uncore_alias = orig->use_uncore_alias;
429 evsel->is_libpfm_event = orig->is_libpfm_event;
430
431 evsel->exclude_GH = orig->exclude_GH;
432 evsel->sample_read = orig->sample_read;
433 evsel->auto_merge_stats = orig->auto_merge_stats;
434 evsel->collect_stat = orig->collect_stat;
435 evsel->weak_group = orig->weak_group;
Namhyung Kim2dc065e2021-06-02 14:22:40 -0700436 evsel->use_config_name = orig->use_config_name;
Namhyung Kim7fedd9b2020-09-24 21:44:51 +0900437
438 if (evsel__copy_config_terms(evsel, orig) < 0)
439 goto out_err;
440
441 return evsel;
442
443out_err:
444 evsel__delete(evsel);
445 return NULL;
446}
447
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200448/*
449 * Returns pointer with encoded error via <linux/err.h> interface.
450 */
Arnaldo Carvalho de Melo8f6725a2020-05-06 13:27:04 -0300451struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx)
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300452{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200453 struct evsel *evsel = zalloc(perf_evsel__object.size);
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200454 int err = -ENOMEM;
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300455
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200456 if (evsel == NULL) {
457 goto out_err;
458 } else {
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300459 struct perf_event_attr attr = {
Arnaldo Carvalho de Melo0b80f8b32012-09-26 12:28:26 -0300460 .type = PERF_TYPE_TRACEPOINT,
461 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
462 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300463 };
464
Arnaldo Carvalho de Meloe48ffe22012-09-26 17:11:38 -0300465 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
466 goto out_free;
467
Jiri Olsa97978b32013-12-03 14:09:24 +0100468 evsel->tp_format = trace_event__tp_format(sys, name);
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200469 if (IS_ERR(evsel->tp_format)) {
470 err = PTR_ERR(evsel->tp_format);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300471 goto out_free;
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200472 }
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300473
Arnaldo Carvalho de Melo0b80f8b32012-09-26 12:28:26 -0300474 event_attr_init(&attr);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300475 attr.config = evsel->tp_format->id;
Arnaldo Carvalho de Melo0b80f8b32012-09-26 12:28:26 -0300476 attr.sample_period = 1;
Jiri Olsab4b62ee2019-07-21 13:23:53 +0200477 evsel__init(evsel, &attr, idx);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300478 }
479
480 return evsel;
481
482out_free:
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300483 zfree(&evsel->name);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300484 free(evsel);
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200485out_err:
486 return ERR_PTR(err);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300487}
488
Arnaldo Carvalho de Meloc64e85e2020-05-06 13:32:23 -0300489const char *evsel__hw_names[PERF_COUNT_HW_MAX] = {
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300490 "cycles",
491 "instructions",
492 "cache-references",
493 "cache-misses",
494 "branches",
495 "branch-misses",
496 "bus-cycles",
497 "stalled-cycles-frontend",
498 "stalled-cycles-backend",
499 "ref-cycles",
500};
501
Song Liu112cb562021-04-25 14:43:31 -0700502char *evsel__bpf_counter_events;
503
504bool evsel__match_bpf_counter_events(const char *name)
505{
506 int name_len;
507 bool match;
508 char *ptr;
509
510 if (!evsel__bpf_counter_events)
511 return false;
512
513 ptr = strstr(evsel__bpf_counter_events, name);
514 name_len = strlen(name);
515
516 /* check name matches a full token in evsel__bpf_counter_events */
517 match = (ptr != NULL) &&
518 ((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) &&
519 ((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0'));
520
521 return match;
522}
523
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300524static const char *__evsel__hw_name(u64 config)
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300525{
Arnaldo Carvalho de Meloc64e85e2020-05-06 13:32:23 -0300526 if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config])
527 return evsel__hw_names[config];
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300528
529 return "unknown-hardware";
530}
531
Arnaldo Carvalho de Melo56933022020-11-30 09:08:24 -0300532static int evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300533{
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300534 int colon = 0, r = 0;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200535 struct perf_event_attr *attr = &evsel->core.attr;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300536 bool exclude_guest_default = false;
537
538#define MOD_PRINT(context, mod) do { \
539 if (!attr->exclude_##context) { \
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300540 if (!colon) colon = ++r; \
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300541 r += scnprintf(bf + r, size - r, "%c", mod); \
542 } } while(0)
543
544 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
545 MOD_PRINT(kernel, 'k');
546 MOD_PRINT(user, 'u');
547 MOD_PRINT(hv, 'h');
548 exclude_guest_default = true;
549 }
550
551 if (attr->precise_ip) {
552 if (!colon)
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300553 colon = ++r;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300554 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
555 exclude_guest_default = true;
556 }
557
558 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
559 MOD_PRINT(host, 'H');
560 MOD_PRINT(guest, 'G');
561 }
562#undef MOD_PRINT
563 if (colon)
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300564 bf[colon - 1] = ':';
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300565 return r;
566}
567
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300568static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300569{
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300570 int r = scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config));
Arnaldo Carvalho de Melo56933022020-11-30 09:08:24 -0300571 return r + evsel__add_modifiers(evsel, bf + r, size - r);
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300572}
573
Arnaldo Carvalho de Meloc64e85e2020-05-06 13:32:23 -0300574const char *evsel__sw_names[PERF_COUNT_SW_MAX] = {
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300575 "cpu-clock",
576 "task-clock",
577 "page-faults",
578 "context-switches",
Arnaldo Carvalho de Melo8ad70132012-09-06 13:11:18 -0300579 "cpu-migrations",
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300580 "minor-faults",
581 "major-faults",
582 "alignment-faults",
583 "emulation-faults",
Adrian Hunterd22d1a22013-08-31 21:50:52 +0300584 "dummy",
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300585};
586
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300587static const char *__evsel__sw_name(u64 config)
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300588{
Arnaldo Carvalho de Meloc64e85e2020-05-06 13:32:23 -0300589 if (config < PERF_COUNT_SW_MAX && evsel__sw_names[config])
590 return evsel__sw_names[config];
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300591 return "unknown-software";
592}
593
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300594static int evsel__sw_name(struct evsel *evsel, char *bf, size_t size)
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300595{
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300596 int r = scnprintf(bf, size, "%s", __evsel__sw_name(evsel->core.attr.config));
Arnaldo Carvalho de Melo56933022020-11-30 09:08:24 -0300597 return r + evsel__add_modifiers(evsel, bf + r, size - r);
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300598}
599
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300600static int __evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
Jiri Olsa287e74a2012-06-28 23:18:49 +0200601{
602 int r;
603
604 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
605
606 if (type & HW_BREAKPOINT_R)
607 r += scnprintf(bf + r, size - r, "r");
608
609 if (type & HW_BREAKPOINT_W)
610 r += scnprintf(bf + r, size - r, "w");
611
612 if (type & HW_BREAKPOINT_X)
613 r += scnprintf(bf + r, size - r, "x");
614
615 return r;
616}
617
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300618static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size)
Jiri Olsa287e74a2012-06-28 23:18:49 +0200619{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200620 struct perf_event_attr *attr = &evsel->core.attr;
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300621 int r = __evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
Arnaldo Carvalho de Melo56933022020-11-30 09:08:24 -0300622 return r + evsel__add_modifiers(evsel, bf + r, size - r);
Jiri Olsa287e74a2012-06-28 23:18:49 +0200623}
624
Arnaldo Carvalho de Meloc64e85e2020-05-06 13:32:23 -0300625const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = {
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300626 { "L1-dcache", "l1-d", "l1d", "L1-data", },
627 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
628 { "LLC", "L2", },
629 { "dTLB", "d-tlb", "Data-TLB", },
630 { "iTLB", "i-tlb", "Instruction-TLB", },
631 { "branch", "branches", "bpu", "btb", "bpc", },
632 { "node", },
633};
634
Arnaldo Carvalho de Meloc64e85e2020-05-06 13:32:23 -0300635const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES] = {
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300636 { "load", "loads", "read", },
637 { "store", "stores", "write", },
638 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
639};
640
Arnaldo Carvalho de Meloc64e85e2020-05-06 13:32:23 -0300641const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES] = {
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300642 { "refs", "Reference", "ops", "access", },
643 { "misses", "miss", },
644};
645
646#define C(x) PERF_COUNT_HW_CACHE_##x
647#define CACHE_READ (1 << C(OP_READ))
648#define CACHE_WRITE (1 << C(OP_WRITE))
649#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
650#define COP(x) (1 << x)
651
652/*
Ingo Molnar4d39c892021-03-23 17:09:15 +0100653 * cache operation stat
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300654 * L1I : Read and prefetch only
655 * ITLB and BPU : Read-only
656 */
Arnaldo Carvalho de Meloc64e85e2020-05-06 13:32:23 -0300657static unsigned long evsel__hw_cache_stat[C(MAX)] = {
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300658 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
659 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
660 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
661 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
662 [C(ITLB)] = (CACHE_READ),
663 [C(BPU)] = (CACHE_READ),
664 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
665};
666
Arnaldo Carvalho de Meloc754c382020-04-30 10:51:16 -0300667bool evsel__is_cache_op_valid(u8 type, u8 op)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300668{
Arnaldo Carvalho de Meloc64e85e2020-05-06 13:32:23 -0300669 if (evsel__hw_cache_stat[type] & COP(op))
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300670 return true; /* valid */
671 else
672 return false; /* invalid */
673}
674
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300675int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300676{
677 if (result) {
Arnaldo Carvalho de Meloc64e85e2020-05-06 13:32:23 -0300678 return scnprintf(bf, size, "%s-%s-%s", evsel__hw_cache[type][0],
679 evsel__hw_cache_op[op][0],
680 evsel__hw_cache_result[result][0]);
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300681 }
682
Arnaldo Carvalho de Meloc64e85e2020-05-06 13:32:23 -0300683 return scnprintf(bf, size, "%s-%s", evsel__hw_cache[type][0],
684 evsel__hw_cache_op[op][1]);
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300685}
686
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300687static int __evsel__hw_cache_name(u64 config, char *bf, size_t size)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300688{
689 u8 op, result, type = (config >> 0) & 0xff;
690 const char *err = "unknown-ext-hardware-cache-type";
691
Arnaldo Carvalho de Meloc53412e2016-08-18 16:30:28 -0300692 if (type >= PERF_COUNT_HW_CACHE_MAX)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300693 goto out_err;
694
695 op = (config >> 8) & 0xff;
696 err = "unknown-ext-hardware-cache-op";
Arnaldo Carvalho de Meloc53412e2016-08-18 16:30:28 -0300697 if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300698 goto out_err;
699
700 result = (config >> 16) & 0xff;
701 err = "unknown-ext-hardware-cache-result";
Arnaldo Carvalho de Meloc53412e2016-08-18 16:30:28 -0300702 if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300703 goto out_err;
704
705 err = "invalid-cache";
Arnaldo Carvalho de Meloc754c382020-04-30 10:51:16 -0300706 if (!evsel__is_cache_op_valid(type, op))
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300707 goto out_err;
708
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300709 return __evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300710out_err:
711 return scnprintf(bf, size, "%s", err);
712}
713
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300714static int evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300715{
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300716 int ret = __evsel__hw_cache_name(evsel->core.attr.config, bf, size);
Arnaldo Carvalho de Melo56933022020-11-30 09:08:24 -0300717 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300718}
719
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300720static int evsel__raw_name(struct evsel *evsel, char *bf, size_t size)
Arnaldo Carvalho de Melo6eef3d92012-06-13 11:53:37 -0300721{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200722 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config);
Arnaldo Carvalho de Melo56933022020-11-30 09:08:24 -0300723 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
Arnaldo Carvalho de Melo6eef3d92012-06-13 11:53:37 -0300724}
725
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300726static int evsel__tool_name(char *bf, size_t size)
Andi Kleen3371f382019-03-26 15:18:22 -0700727{
728 int ret = scnprintf(bf, size, "duration_time");
729 return ret;
730}
731
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300732const char *evsel__name(struct evsel *evsel)
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300733{
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300734 char bf[128];
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300735
Arnaldo Carvalho de Melofdbdd7e2019-06-17 14:32:53 -0300736 if (!evsel)
737 goto out_unknown;
738
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300739 if (evsel->name)
740 return evsel->name;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300741
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200742 switch (evsel->core.attr.type) {
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300743 case PERF_TYPE_RAW:
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300744 evsel__raw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300745 break;
746
747 case PERF_TYPE_HARDWARE:
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300748 evsel__hw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300749 break;
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300750
751 case PERF_TYPE_HW_CACHE:
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300752 evsel__hw_cache_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300753 break;
754
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300755 case PERF_TYPE_SOFTWARE:
Andi Kleen3371f382019-03-26 15:18:22 -0700756 if (evsel->tool_event)
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300757 evsel__tool_name(bf, sizeof(bf));
Andi Kleen3371f382019-03-26 15:18:22 -0700758 else
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300759 evsel__sw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300760 break;
761
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300762 case PERF_TYPE_TRACEPOINT:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300763 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300764 break;
765
Jiri Olsa287e74a2012-06-28 23:18:49 +0200766 case PERF_TYPE_BREAKPOINT:
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300767 evsel__bp_name(evsel, bf, sizeof(bf));
Jiri Olsa287e74a2012-06-28 23:18:49 +0200768 break;
769
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300770 default:
Robert Richterca1b1452012-08-16 21:10:18 +0200771 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200772 evsel->core.attr.type);
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300773 break;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300774 }
775
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300776 evsel->name = strdup(bf);
777
Arnaldo Carvalho de Melofdbdd7e2019-06-17 14:32:53 -0300778 if (evsel->name)
779 return evsel->name;
780out_unknown:
781 return "unknown";
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300782}
783
Ian Rogers2b62b3a2021-10-15 10:21:25 -0700784const char *evsel__metric_id(const struct evsel *evsel)
785{
786 if (evsel->metric_id)
787 return evsel->metric_id;
788
789 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && evsel->tool_event)
790 return "duration_time";
791
792 return "unknown";
793}
794
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300795const char *evsel__group_name(struct evsel *evsel)
Namhyung Kim717e2632013-01-22 18:09:44 +0900796{
797 return evsel->group_name ?: "anon group";
798}
799
Jiri Olsa8ef278b2018-03-07 16:50:02 +0100800/*
801 * Returns the group details for the specified leader,
802 * with following rules.
803 *
804 * For record -e '{cycles,instructions}'
805 * 'anon group { cycles:u, instructions:u }'
806 *
807 * For record -e 'cycles,instructions' and report --group
808 * 'cycles:u, instructions:u'
809 */
Arnaldo Carvalho de Melo347c7512020-04-29 16:09:12 -0300810int evsel__group_desc(struct evsel *evsel, char *buf, size_t size)
Namhyung Kim717e2632013-01-22 18:09:44 +0900811{
Jiri Olsa8ef278b2018-03-07 16:50:02 +0100812 int ret = 0;
Jiri Olsa32dcd022019-07-21 13:23:51 +0200813 struct evsel *pos;
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300814 const char *group_name = evsel__group_name(evsel);
Namhyung Kim717e2632013-01-22 18:09:44 +0900815
Jiri Olsa8ef278b2018-03-07 16:50:02 +0100816 if (!evsel->forced_leader)
817 ret = scnprintf(buf, size, "%s { ", group_name);
Namhyung Kim717e2632013-01-22 18:09:44 +0900818
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300819 ret += scnprintf(buf + ret, size - ret, "%s", evsel__name(evsel));
Namhyung Kim717e2632013-01-22 18:09:44 +0900820
821 for_each_group_member(pos, evsel)
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -0300822 ret += scnprintf(buf + ret, size - ret, ", %s", evsel__name(pos));
Namhyung Kim717e2632013-01-22 18:09:44 +0900823
Jiri Olsa8ef278b2018-03-07 16:50:02 +0100824 if (!evsel->forced_leader)
825 ret += scnprintf(buf + ret, size - ret, " }");
Namhyung Kim717e2632013-01-22 18:09:44 +0900826
827 return ret;
828}
829
Arnaldo Carvalho de Melo6ec17b42020-04-29 15:57:01 -0300830static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
831 struct callchain_param *param)
Jiri Olsa6bedfab2014-03-02 16:56:40 +0100832{
Arnaldo Carvalho de Meloc754c382020-04-30 10:51:16 -0300833 bool function = evsel__is_function_event(evsel);
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200834 struct perf_event_attr *attr = &evsel->core.attr;
Jiri Olsa6bedfab2014-03-02 16:56:40 +0100835
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300836 evsel__set_sample_bit(evsel, CALLCHAIN);
Jiri Olsa6bedfab2014-03-02 16:56:40 +0100837
Arnaldo Carvalho de Melo792d48b2016-04-28 19:03:42 -0300838 attr->sample_max_stack = param->max_stack;
839
yuzhoujian53651b22019-05-30 14:29:22 +0100840 if (opts->kernel_callchains)
841 attr->exclude_callchain_user = 1;
842 if (opts->user_callchains)
843 attr->exclude_callchain_kernel = 1;
Kan Liangc3a6a8c2015-08-04 04:30:20 -0400844 if (param->record_mode == CALLCHAIN_LBR) {
Kan Liangaad2b212015-01-05 13:23:04 -0500845 if (!opts->branch_stack) {
846 if (attr->exclude_user) {
847 pr_warning("LBR callstack option is only available "
848 "to get user callchain information. "
849 "Falling back to framepointers.\n");
850 } else {
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300851 evsel__set_sample_bit(evsel, BRANCH_STACK);
Kan Liangaad2b212015-01-05 13:23:04 -0500852 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
Andi Kleenbd0f8892015-12-11 16:12:24 -0800853 PERF_SAMPLE_BRANCH_CALL_STACK |
854 PERF_SAMPLE_BRANCH_NO_CYCLES |
Kan Liangd3f85432020-02-28 08:30:01 -0800855 PERF_SAMPLE_BRANCH_NO_FLAGS |
856 PERF_SAMPLE_BRANCH_HW_INDEX;
Kan Liangaad2b212015-01-05 13:23:04 -0500857 }
858 } else
859 pr_warning("Cannot use LBR callstack with branch stack. "
860 "Falling back to framepointers.\n");
861 }
862
Kan Liangc3a6a8c2015-08-04 04:30:20 -0400863 if (param->record_mode == CALLCHAIN_DWARF) {
Jiri Olsa6bedfab2014-03-02 16:56:40 +0100864 if (!function) {
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300865 evsel__set_sample_bit(evsel, REGS_USER);
866 evsel__set_sample_bit(evsel, STACK_USER);
Alexey Budankovd194d8f2019-05-30 22:03:36 +0300867 if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) {
868 attr->sample_regs_user |= DWARF_MINIMAL_REGS;
869 pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
870 "specifying a subset with --user-regs may render DWARF unwinding unreliable, "
871 "so the minimal registers set (IP, SP) is explicitly forced.\n");
872 } else {
873 attr->sample_regs_user |= PERF_REGS_MASK;
874 }
Kan Liangc3a6a8c2015-08-04 04:30:20 -0400875 attr->sample_stack_user = param->dump_size;
Jiri Olsa6bedfab2014-03-02 16:56:40 +0100876 attr->exclude_callchain_user = 1;
877 } else {
878 pr_info("Cannot use DWARF unwind for function trace event,"
879 " falling back to framepointers.\n");
880 }
881 }
882
883 if (function) {
884 pr_info("Disabling user space callchains for function trace event.\n");
885 attr->exclude_callchain_user = 1;
886 }
887}
888
Arnaldo Carvalho de Melo6ec17b42020-04-29 15:57:01 -0300889void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
890 struct callchain_param *param)
Arnaldo Carvalho de Melo1688c2f2018-01-12 16:21:04 -0300891{
892 if (param->enabled)
Arnaldo Carvalho de Melo6ec17b42020-04-29 15:57:01 -0300893 return __evsel__config_callchain(evsel, opts, param);
Arnaldo Carvalho de Melo1688c2f2018-01-12 16:21:04 -0300894}
895
Arnaldo Carvalho de Melo56933022020-11-30 09:08:24 -0300896static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *param)
Kan Liangd457c962015-08-11 06:30:47 -0400897{
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200898 struct perf_event_attr *attr = &evsel->core.attr;
Kan Liangd457c962015-08-11 06:30:47 -0400899
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300900 evsel__reset_sample_bit(evsel, CALLCHAIN);
Kan Liangd457c962015-08-11 06:30:47 -0400901 if (param->record_mode == CALLCHAIN_LBR) {
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300902 evsel__reset_sample_bit(evsel, BRANCH_STACK);
Kan Liangd457c962015-08-11 06:30:47 -0400903 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
Kan Liangd3f85432020-02-28 08:30:01 -0800904 PERF_SAMPLE_BRANCH_CALL_STACK |
905 PERF_SAMPLE_BRANCH_HW_INDEX);
Kan Liangd457c962015-08-11 06:30:47 -0400906 }
907 if (param->record_mode == CALLCHAIN_DWARF) {
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300908 evsel__reset_sample_bit(evsel, REGS_USER);
909 evsel__reset_sample_bit(evsel, STACK_USER);
Kan Liangd457c962015-08-11 06:30:47 -0400910 }
911}
912
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300913static void evsel__apply_config_terms(struct evsel *evsel,
914 struct record_opts *opts, bool track)
Jiri Olsa930a2e22015-07-29 05:42:10 -0400915{
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300916 struct evsel_config_term *term;
Kan Liang32067712015-08-04 04:30:19 -0400917 struct list_head *config_terms = &evsel->config_terms;
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200918 struct perf_event_attr *attr = &evsel->core.attr;
Arnaldo Carvalho de Melo249d98e2018-01-15 11:07:58 -0300919 /* callgraph default */
920 struct callchain_param param = {
921 .record_mode = callchain_param.record_mode,
922 };
Kan Liangd457c962015-08-11 06:30:47 -0400923 u32 dump_size = 0;
Arnaldo Carvalho de Melo792d48b2016-04-28 19:03:42 -0300924 int max_stack = 0;
925 const char *callgraph_buf = NULL;
Kan Liangd457c962015-08-11 06:30:47 -0400926
Jiri Olsa930a2e22015-07-29 05:42:10 -0400927 list_for_each_entry(term, config_terms, list) {
928 switch (term->type) {
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300929 case EVSEL__CONFIG_TERM_PERIOD:
Andi Kleenc2f1cea2017-10-20 13:27:55 -0700930 if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
931 attr->sample_period = term->val.period;
932 attr->freq = 0;
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300933 evsel__reset_sample_bit(evsel, PERIOD);
Andi Kleenc2f1cea2017-10-20 13:27:55 -0700934 }
Kan Liang32067712015-08-04 04:30:19 -0400935 break;
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300936 case EVSEL__CONFIG_TERM_FREQ:
Andi Kleenc2f1cea2017-10-20 13:27:55 -0700937 if (!(term->weak && opts->user_freq != UINT_MAX)) {
938 attr->sample_freq = term->val.freq;
939 attr->freq = 1;
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300940 evsel__set_sample_bit(evsel, PERIOD);
Andi Kleenc2f1cea2017-10-20 13:27:55 -0700941 }
Namhyung Kim09af2a52015-08-09 15:45:23 +0900942 break;
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300943 case EVSEL__CONFIG_TERM_TIME:
Kan Liang32067712015-08-04 04:30:19 -0400944 if (term->val.time)
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300945 evsel__set_sample_bit(evsel, TIME);
Kan Liang32067712015-08-04 04:30:19 -0400946 else
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300947 evsel__reset_sample_bit(evsel, TIME);
Kan Liang32067712015-08-04 04:30:19 -0400948 break;
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300949 case EVSEL__CONFIG_TERM_CALLGRAPH:
Leo Yane8846022020-01-17 13:52:50 +0800950 callgraph_buf = term->val.str;
Kan Liangd457c962015-08-11 06:30:47 -0400951 break;
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300952 case EVSEL__CONFIG_TERM_BRANCH:
Leo Yane8846022020-01-17 13:52:50 +0800953 if (term->val.str && strcmp(term->val.str, "no")) {
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300954 evsel__set_sample_bit(evsel, BRANCH_STACK);
Leo Yane8846022020-01-17 13:52:50 +0800955 parse_branch_str(term->val.str,
Andi Kleenac12f672016-10-12 14:02:06 -0700956 &attr->branch_sample_type);
957 } else
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -0300958 evsel__reset_sample_bit(evsel, BRANCH_STACK);
Andi Kleenac12f672016-10-12 14:02:06 -0700959 break;
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300960 case EVSEL__CONFIG_TERM_STACK_USER:
Kan Liangd457c962015-08-11 06:30:47 -0400961 dump_size = term->val.stack_user;
962 break;
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300963 case EVSEL__CONFIG_TERM_MAX_STACK:
Arnaldo Carvalho de Melo792d48b2016-04-28 19:03:42 -0300964 max_stack = term->val.max_stack;
965 break;
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300966 case EVSEL__CONFIG_TERM_MAX_EVENTS:
Arnaldo Carvalho de Melo2fda5ad2018-10-19 15:47:34 -0300967 evsel->max_events = term->val.max_events;
968 break;
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300969 case EVSEL__CONFIG_TERM_INHERIT:
Wang Nan374ce932015-10-28 10:55:02 +0000970 /*
971 * attr->inherit should has already been set by
Arnaldo Carvalho de Melo6ec17b42020-04-29 15:57:01 -0300972 * evsel__config. If user explicitly set
Wang Nan374ce932015-10-28 10:55:02 +0000973 * inherit using config terms, override global
974 * opt->no_inherit setting.
975 */
976 attr->inherit = term->val.inherit ? 1 : 0;
977 break;
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300978 case EVSEL__CONFIG_TERM_OVERWRITE:
Wang Nan626a6b72016-07-14 08:34:45 +0000979 attr->write_backward = term->val.overwrite ? 1 : 0;
980 break;
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300981 case EVSEL__CONFIG_TERM_DRV_CFG:
Mathieu Poirier21787902018-01-10 13:46:51 -0700982 break;
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300983 case EVSEL__CONFIG_TERM_PERCORE:
Jin Yao064b4e82019-04-12 21:59:47 +0800984 break;
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300985 case EVSEL__CONFIG_TERM_AUX_OUTPUT:
Adrian Hunter1b992152019-08-06 11:46:05 +0300986 attr->aux_output = term->val.aux_output ? 1 : 0;
987 break;
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300988 case EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE:
Adrian Huntereb7a52d2019-11-15 14:42:17 +0200989 /* Already applied by auxtrace */
990 break;
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -0300991 case EVSEL__CONFIG_TERM_CFG_CHG:
Adrian Huntera1ac7de2019-11-15 14:42:22 +0200992 break;
Jiri Olsa930a2e22015-07-29 05:42:10 -0400993 default:
994 break;
995 }
996 }
Kan Liangd457c962015-08-11 06:30:47 -0400997
998 /* User explicitly set per-event callgraph, clear the old setting and reset. */
Arnaldo Carvalho de Melo792d48b2016-04-28 19:03:42 -0300999 if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
Arnaldo Carvalho de Melo0d3dcc02018-01-16 11:16:25 -03001000 bool sample_address = false;
1001
Arnaldo Carvalho de Melo792d48b2016-04-28 19:03:42 -03001002 if (max_stack) {
1003 param.max_stack = max_stack;
1004 if (callgraph_buf == NULL)
1005 callgraph_buf = "fp";
1006 }
Kan Liangd457c962015-08-11 06:30:47 -04001007
1008 /* parse callgraph parameters */
1009 if (callgraph_buf != NULL) {
Kan Liangf9db0d02015-08-11 06:30:48 -04001010 if (!strcmp(callgraph_buf, "no")) {
1011 param.enabled = false;
1012 param.record_mode = CALLCHAIN_NONE;
1013 } else {
1014 param.enabled = true;
1015 if (parse_callchain_record(callgraph_buf, &param)) {
1016 pr_err("per-event callgraph setting for %s failed. "
1017 "Apply callgraph global setting for it\n",
1018 evsel->name);
1019 return;
1020 }
Arnaldo Carvalho de Melo0d3dcc02018-01-16 11:16:25 -03001021 if (param.record_mode == CALLCHAIN_DWARF)
1022 sample_address = true;
Kan Liangd457c962015-08-11 06:30:47 -04001023 }
1024 }
1025 if (dump_size > 0) {
1026 dump_size = round_up(dump_size, sizeof(u64));
1027 param.dump_size = dump_size;
1028 }
1029
1030 /* If global callgraph set, clear it */
1031 if (callchain_param.enabled)
Arnaldo Carvalho de Melo56933022020-11-30 09:08:24 -03001032 evsel__reset_callgraph(evsel, &callchain_param);
Kan Liangd457c962015-08-11 06:30:47 -04001033
1034 /* set perf-event callgraph */
Arnaldo Carvalho de Melo0d3dcc02018-01-16 11:16:25 -03001035 if (param.enabled) {
1036 if (sample_address) {
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001037 evsel__set_sample_bit(evsel, ADDR);
1038 evsel__set_sample_bit(evsel, DATA_SRC);
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001039 evsel->core.attr.mmap_data = track;
Arnaldo Carvalho de Melo0d3dcc02018-01-16 11:16:25 -03001040 }
Arnaldo Carvalho de Melo6ec17b42020-04-29 15:57:01 -03001041 evsel__config_callchain(evsel, opts, &param);
Arnaldo Carvalho de Melo0d3dcc02018-01-16 11:16:25 -03001042 }
Kan Liangd457c962015-08-11 06:30:47 -04001043 }
Jiri Olsa930a2e22015-07-29 05:42:10 -04001044}
1045
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -03001046struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type)
Adrian Huntereb7a52d2019-11-15 14:42:17 +02001047{
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -03001048 struct evsel_config_term *term, *found_term = NULL;
Adrian Huntereb7a52d2019-11-15 14:42:17 +02001049
1050 list_for_each_entry(term, &evsel->config_terms, list) {
1051 if (term->type == type)
1052 found_term = term;
1053 }
1054
1055 return found_term;
1056}
1057
Kan Liangea8d0ed2021-02-02 12:09:09 -08001058void __weak arch_evsel__set_sample_weight(struct evsel *evsel)
1059{
1060 evsel__set_sample_bit(evsel, WEIGHT);
1061}
1062
Ravi Bangoriaeb39bf32021-11-02 11:01:12 +05301063void __weak arch_evsel__fixup_new_cycles(struct perf_event_attr *attr __maybe_unused)
1064{
1065}
1066
German Gomez3606c0e2022-01-18 14:40:54 +00001067static void evsel__set_default_freq_period(struct record_opts *opts,
1068 struct perf_event_attr *attr)
1069{
1070 if (opts->freq) {
1071 attr->freq = 1;
1072 attr->sample_freq = opts->freq;
1073 } else {
1074 attr->sample_period = opts->default_interval;
1075 }
1076}
1077
Jiri Olsa774cb492012-11-12 18:34:01 +01001078/*
1079 * The enable_on_exec/disabled value strategy:
1080 *
1081 * 1) For any type of traced program:
1082 * - all independent events and group leaders are disabled
1083 * - all group members are enabled
1084 *
1085 * Group members are ruled by group leaders. They need to
1086 * be enabled, because the group scheduling relies on that.
1087 *
1088 * 2) For traced programs executed by perf:
1089 * - all independent events and group leaders have
1090 * enable_on_exec set
1091 * - we don't specifically enable or disable any event during
1092 * the record command
1093 *
1094 * Independent events and group leaders are initially disabled
1095 * and get enabled by exec. Group members are ruled by group
1096 * leaders as stated in 1).
1097 *
1098 * 3) For traced programs attached by perf (pid/tid):
1099 * - we specifically enable or disable all events during
1100 * the record command
1101 *
1102 * When attaching events to already running traced we
1103 * enable/disable events specifically, as there's no
1104 * initial traced exec call.
1105 */
Arnaldo Carvalho de Melo6ec17b42020-04-29 15:57:01 -03001106void evsel__config(struct evsel *evsel, struct record_opts *opts,
1107 struct callchain_param *callchain)
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001108{
Jiri Olsafba7c862021-07-06 17:17:00 +02001109 struct evsel *leader = evsel__leader(evsel);
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001110 struct perf_event_attr *attr = &evsel->core.attr;
Adrian Hunter60b08962014-07-31 09:00:52 +03001111 int track = evsel->tracking;
Adrian Hunter3aa59392013-11-15 15:52:29 +02001112 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001113
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001114 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001115 attr->inherit = !opts->no_inherit;
Wang Nan626a6b72016-07-14 08:34:45 +00001116 attr->write_backward = opts->overwrite ? 1 : 0;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001117
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001118 evsel__set_sample_bit(evsel, IP);
1119 evsel__set_sample_bit(evsel, TID);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001120
Jiri Olsa3c176312012-10-10 17:39:03 +02001121 if (evsel->sample_read) {
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001122 evsel__set_sample_bit(evsel, READ);
Jiri Olsa3c176312012-10-10 17:39:03 +02001123
1124 /*
1125 * We need ID even in case of single event, because
1126 * PERF_SAMPLE_READ process ID specific data.
1127 */
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001128 evsel__set_sample_id(evsel, false);
Jiri Olsa3c176312012-10-10 17:39:03 +02001129
1130 /*
1131 * Apply group format only if we belong to group
1132 * with more than one members.
1133 */
Jiri Olsa5643b1a2019-07-21 13:24:46 +02001134 if (leader->core.nr_members > 1) {
Jiri Olsa3c176312012-10-10 17:39:03 +02001135 attr->read_format |= PERF_FORMAT_GROUP;
1136 attr->inherit = 0;
1137 }
1138 }
1139
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001140 /*
Namhyung Kim17314e22014-06-09 14:43:37 +09001141 * We default some events to have a default interval. But keep
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001142 * it a weak assumption overridable by the user.
1143 */
German Gomez3606c0e2022-01-18 14:40:54 +00001144 if ((evsel->is_libpfm_event && !attr->sample_period) ||
1145 (!evsel->is_libpfm_event && (!attr->sample_period ||
1146 opts->user_freq != UINT_MAX ||
1147 opts->user_interval != ULLONG_MAX)))
1148 evsel__set_default_freq_period(opts, attr);
1149
David Sharpce4326d2020-09-11 19:56:52 -07001150 /*
1151 * If attr->freq was set (here or earlier), ask for period
1152 * to be sampled.
1153 */
1154 if (attr->freq)
1155 evsel__set_sample_bit(evsel, PERIOD);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001156
1157 if (opts->no_samples)
1158 attr->sample_freq = 0;
1159
Jiri Olsaa17f06972017-08-24 18:27:31 +02001160 if (opts->inherit_stat) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001161 evsel->core.attr.read_format |=
Jiri Olsaa17f06972017-08-24 18:27:31 +02001162 PERF_FORMAT_TOTAL_TIME_ENABLED |
1163 PERF_FORMAT_TOTAL_TIME_RUNNING |
1164 PERF_FORMAT_ID;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001165 attr->inherit_stat = 1;
Jiri Olsaa17f06972017-08-24 18:27:31 +02001166 }
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001167
1168 if (opts->sample_address) {
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001169 evsel__set_sample_bit(evsel, ADDR);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001170 attr->mmap_data = track;
1171 }
1172
Jiri Olsaf1403732014-11-13 18:21:03 +01001173 /*
1174 * We don't allow user space callchains for function trace
1175 * event, due to issues with page faults while tracing page
1176 * fault handler and its overall trickiness nature.
1177 */
Arnaldo Carvalho de Meloc754c382020-04-30 10:51:16 -03001178 if (evsel__is_function_event(evsel))
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001179 evsel->core.attr.exclude_callchain_user = 1;
Jiri Olsaf1403732014-11-13 18:21:03 +01001180
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -03001181 if (callchain && callchain->enabled && !evsel->no_aux_samples)
Arnaldo Carvalho de Melo6ec17b42020-04-29 15:57:01 -03001182 evsel__config_callchain(evsel, opts, callchain);
Jiri Olsa26d33022012-08-07 15:20:47 +02001183
Jin Yaoc4735d92020-07-20 09:00:13 +08001184 if (opts->sample_intr_regs && !evsel->no_aux_samples &&
1185 !evsel__is_dummy_event(evsel)) {
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02001186 attr->sample_regs_intr = opts->sample_intr_regs;
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001187 evsel__set_sample_bit(evsel, REGS_INTR);
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02001188 }
1189
Jin Yaoc4735d92020-07-20 09:00:13 +08001190 if (opts->sample_user_regs && !evsel->no_aux_samples &&
1191 !evsel__is_dummy_event(evsel)) {
Andi Kleen84c41742017-09-05 10:00:28 -07001192 attr->sample_regs_user |= opts->sample_user_regs;
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001193 evsel__set_sample_bit(evsel, REGS_USER);
Andi Kleen84c41742017-09-05 10:00:28 -07001194 }
1195
Jiri Olsab6f35ed2016-08-01 20:02:35 +02001196 if (target__has_cpu(&opts->target) || opts->sample_cpu)
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001197 evsel__set_sample_bit(evsel, CPU);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001198
Andi Kleen8affc2b2014-07-31 14:45:04 +08001199 /*
Adam Buchbinderbd1a0be52016-02-24 10:02:25 -08001200 * When the user explicitly disabled time don't force it here.
Andi Kleen8affc2b2014-07-31 14:45:04 +08001201 */
1202 if (opts->sample_time &&
1203 (!perf_missing_features.sample_id_all &&
Adrian Hunter3abebc52015-07-06 14:51:01 +03001204 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
1205 opts->sample_time_set)))
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001206 evsel__set_sample_bit(evsel, TIME);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001207
Adrian Hunter6ff1ce72014-07-14 13:02:56 +03001208 if (opts->raw_samples && !evsel->no_aux_samples) {
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001209 evsel__set_sample_bit(evsel, TIME);
1210 evsel__set_sample_bit(evsel, RAW);
1211 evsel__set_sample_bit(evsel, CPU);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001212 }
1213
Stephane Eranianccf49bf2013-01-24 16:10:37 +01001214 if (opts->sample_address)
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001215 evsel__set_sample_bit(evsel, DATA_SRC);
Stephane Eranianccf49bf2013-01-24 16:10:37 +01001216
Kan Liang3b0a5da2017-08-29 13:11:08 -04001217 if (opts->sample_phys_addr)
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001218 evsel__set_sample_bit(evsel, PHYS_ADDR);
Kan Liang3b0a5da2017-08-29 13:11:08 -04001219
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001220 if (opts->no_buffering) {
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001221 attr->watermark = 0;
1222 attr->wakeup_events = 1;
1223 }
Adrian Hunter6ff1ce72014-07-14 13:02:56 +03001224 if (opts->branch_stack && !evsel->no_aux_samples) {
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001225 evsel__set_sample_bit(evsel, BRANCH_STACK);
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01001226 attr->branch_sample_type = opts->branch_stack;
1227 }
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001228
Andi Kleen05484292013-01-24 16:10:29 +01001229 if (opts->sample_weight)
Kan Liangea8d0ed2021-02-02 12:09:09 -08001230 arch_evsel__set_sample_weight(evsel);
Andi Kleen05484292013-01-24 16:10:29 +01001231
Jiri Olsae29386c2020-12-14 11:54:57 +01001232 attr->task = track;
1233 attr->mmap = track;
1234 attr->mmap2 = track && !perf_missing_features.mmap2;
1235 attr->comm = track;
1236 attr->build_id = track && opts->build_id;
1237
Adrian Hunter246eba82020-05-12 15:19:18 +03001238 /*
1239 * ksymbol is tracked separately with text poke because it needs to be
1240 * system wide and enabled immediately.
1241 */
1242 if (!opts->text_poke)
1243 attr->ksymbol = track && !perf_missing_features.ksymbol;
Arnaldo Carvalho de Melo74a1e862019-08-26 19:31:06 -03001244 attr->bpf_event = track && !opts->no_bpf_event && !perf_missing_features.bpf;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001245
Hari Bathinif3b36142017-03-08 02:11:43 +05301246 if (opts->record_namespaces)
1247 attr->namespaces = track;
1248
Namhyung Kim8fb4b672020-03-25 21:45:34 +09001249 if (opts->record_cgroup) {
1250 attr->cgroup = track && !perf_missing_features.cgroup;
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001251 evsel__set_sample_bit(evsel, CGROUP);
Namhyung Kim8fb4b672020-03-25 21:45:34 +09001252 }
1253
Kan Liang542b88f2020-11-30 09:27:53 -08001254 if (opts->sample_data_page_size)
1255 evsel__set_sample_bit(evsel, DATA_PAGE_SIZE);
1256
Kan Liangc1de7f32021-01-05 11:57:49 -08001257 if (opts->sample_code_page_size)
1258 evsel__set_sample_bit(evsel, CODE_PAGE_SIZE);
1259
Adrian Hunterb757bb02015-07-21 12:44:04 +03001260 if (opts->record_switch_events)
1261 attr->context_switch = track;
1262
Andi Kleen475eeab2013-09-20 07:40:43 -07001263 if (opts->sample_transaction)
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001264 evsel__set_sample_bit(evsel, TRANSACTION);
Andi Kleen475eeab2013-09-20 07:40:43 -07001265
Andi Kleen85c273d2015-02-24 15:13:40 -08001266 if (opts->running_time) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001267 evsel->core.attr.read_format |=
Andi Kleen85c273d2015-02-24 15:13:40 -08001268 PERF_FORMAT_TOTAL_TIME_ENABLED |
1269 PERF_FORMAT_TOTAL_TIME_RUNNING;
1270 }
1271
Jiri Olsa774cb492012-11-12 18:34:01 +01001272 /*
1273 * XXX see the function comment above
1274 *
1275 * Disabling only independent events or group leaders,
1276 * keeping group members enabled.
1277 */
Arnaldo Carvalho de Meloc754c382020-04-30 10:51:16 -03001278 if (evsel__is_group_leader(evsel))
Jiri Olsa774cb492012-11-12 18:34:01 +01001279 attr->disabled = 1;
1280
1281 /*
1282 * Setting enable_on_exec for independent events and
1283 * group leaders for traced executed by perf.
1284 */
Arnaldo Carvalho de Meloc754c382020-04-30 10:51:16 -03001285 if (target__none(&opts->target) && evsel__is_group_leader(evsel) &&
1286 !opts->initial_delay)
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001287 attr->enable_on_exec = 1;
Adrian Hunter2afd2bc2014-07-14 13:02:57 +03001288
1289 if (evsel->immediate) {
1290 attr->disabled = 0;
1291 attr->enable_on_exec = 0;
1292 }
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001293
1294 clockid = opts->clockid;
1295 if (opts->use_clockid) {
1296 attr->use_clockid = 1;
1297 attr->clockid = opts->clockid;
1298 }
Jiri Olsa930a2e22015-07-29 05:42:10 -04001299
Jiri Olsa7f94af72015-10-05 20:06:05 +02001300 if (evsel->precise_max)
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001301 attr->precise_ip = 3;
Jiri Olsa7f94af72015-10-05 20:06:05 +02001302
Jiri Olsa85723882016-02-15 09:34:31 +01001303 if (opts->all_user) {
1304 attr->exclude_kernel = 1;
1305 attr->exclude_user = 0;
1306 }
1307
1308 if (opts->all_kernel) {
1309 attr->exclude_kernel = 0;
1310 attr->exclude_user = 1;
1311 }
1312
Jiri Olsafe1f61b2019-07-21 13:24:38 +02001313 if (evsel->core.own_cpus || evsel->unit)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001314 evsel->core.attr.read_format |= PERF_FORMAT_ID;
Jiri Olsa4ab84552018-10-03 23:20:52 +02001315
Jiri Olsa930a2e22015-07-29 05:42:10 -04001316 /*
1317 * Apply event specific term settings,
1318 * it overloads any global configuration.
1319 */
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -03001320 evsel__apply_config_terms(evsel, opts, track);
Jiri Olsaa359c172016-12-13 08:46:22 +01001321
1322 evsel->ignore_missing_thread = opts->ignore_missing_thread;
Jiri Olsaf290aa12018-02-01 09:38:11 +01001323
1324 /* The --period option takes the precedence. */
1325 if (opts->period_set) {
1326 if (opts->period)
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001327 evsel__set_sample_bit(evsel, PERIOD);
Jiri Olsaf290aa12018-02-01 09:38:11 +01001328 else
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001329 evsel__reset_sample_bit(evsel, PERIOD);
Jiri Olsaf290aa12018-02-01 09:38:11 +01001330 }
Kan Liang95035c52018-07-09 07:15:22 -07001331
1332 /*
Ian Rogers5885a202020-05-08 14:08:03 -03001333 * A dummy event never triggers any actual counter and therefore
1334 * cannot be used with branch_stack.
1335 *
Kan Liang95035c52018-07-09 07:15:22 -07001336 * For initial_delay, a dummy event is added implicitly.
1337 * The software event will trigger -EOPNOTSUPP error out,
1338 * if BRANCH_STACK bit is set.
1339 */
Adrian Hunter442ad2252020-06-29 12:19:51 +03001340 if (evsel__is_dummy_event(evsel))
Arnaldo Carvalho de Melo862b2f82020-04-29 16:12:15 -03001341 evsel__reset_sample_bit(evsel, BRANCH_STACK);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -02001342}
1343
Arnaldo Carvalho de Meload681ad2020-04-29 16:19:05 -03001344int evsel__set_filter(struct evsel *evsel, const char *filter)
Arnaldo Carvalho de Melo12467ae2015-07-03 17:05:50 -03001345{
1346 char *new_filter = strdup(filter);
1347
1348 if (new_filter != NULL) {
1349 free(evsel->filter);
1350 evsel->filter = new_filter;
1351 return 0;
1352 }
1353
1354 return -1;
1355}
1356
Arnaldo Carvalho de Meload681ad2020-04-29 16:19:05 -03001357static int evsel__append_filter(struct evsel *evsel, const char *fmt, const char *filter)
Arnaldo Carvalho de Melo64ec84f2015-07-04 12:19:13 -03001358{
1359 char *new_filter;
1360
1361 if (evsel->filter == NULL)
Arnaldo Carvalho de Meload681ad2020-04-29 16:19:05 -03001362 return evsel__set_filter(evsel, filter);
Arnaldo Carvalho de Melo64ec84f2015-07-04 12:19:13 -03001363
Mathieu Poirierb15d0a42016-09-16 08:44:03 -06001364 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) {
Arnaldo Carvalho de Melo64ec84f2015-07-04 12:19:13 -03001365 free(evsel->filter);
1366 evsel->filter = new_filter;
1367 return 0;
1368 }
1369
1370 return -1;
1371}
1372
Arnaldo Carvalho de Meload681ad2020-04-29 16:19:05 -03001373int evsel__append_tp_filter(struct evsel *evsel, const char *filter)
Mathieu Poirier3541c032016-09-16 08:44:04 -06001374{
Arnaldo Carvalho de Meload681ad2020-04-29 16:19:05 -03001375 return evsel__append_filter(evsel, "(%s) && (%s)", filter);
Mathieu Poirier3541c032016-09-16 08:44:04 -06001376}
1377
Arnaldo Carvalho de Meload681ad2020-04-29 16:19:05 -03001378int evsel__append_addr_filter(struct evsel *evsel, const char *filter)
Mathieu Poirier1e857482016-09-16 08:44:05 -06001379{
Arnaldo Carvalho de Meload681ad2020-04-29 16:19:05 -03001380 return evsel__append_filter(evsel, "%s,%s", filter);
Mathieu Poirier1e857482016-09-16 08:44:05 -06001381}
1382
Andi Kleen363fb122019-11-20 16:15:21 -08001383/* Caller has to clear disabled after going through all CPUs. */
Ian Rogers6f844b12022-01-04 22:13:42 -08001384int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx)
Andi Kleen363fb122019-11-20 16:15:21 -08001385{
Ian Rogers6f844b12022-01-04 22:13:42 -08001386 return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx);
Andi Kleen363fb122019-11-20 16:15:21 -08001387}
1388
Jiri Olsaec7f24e2019-07-21 13:24:02 +02001389int evsel__enable(struct evsel *evsel)
Andi Kleene2407be2013-08-02 17:41:10 -07001390{
Jiri Olsaa00571f2019-07-21 13:24:52 +02001391 int err = perf_evsel__enable(&evsel->core);
Arnaldo Carvalho de Melob7e84522018-10-20 09:04:41 -03001392
1393 if (!err)
1394 evsel->disabled = false;
Arnaldo Carvalho de Melob7e84522018-10-20 09:04:41 -03001395 return err;
Andi Kleene2407be2013-08-02 17:41:10 -07001396}
1397
Andi Kleen363fb122019-11-20 16:15:21 -08001398/* Caller has to set disabled after going through all CPUs. */
Ian Rogers6f844b12022-01-04 22:13:42 -08001399int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx)
Andi Kleen363fb122019-11-20 16:15:21 -08001400{
Ian Rogers6f844b12022-01-04 22:13:42 -08001401 return perf_evsel__disable_cpu(&evsel->core, cpu_map_idx);
Andi Kleen363fb122019-11-20 16:15:21 -08001402}
1403
Jiri Olsa9a10bb22019-07-21 13:24:03 +02001404int evsel__disable(struct evsel *evsel)
Jiri Olsae98a4cb2015-12-03 10:06:41 +01001405{
Jiri Olsaa00571f2019-07-21 13:24:52 +02001406 int err = perf_evsel__disable(&evsel->core);
Arnaldo Carvalho de Melob7e84522018-10-20 09:04:41 -03001407 /*
1408 * We mark it disabled here so that tools that disable a event can
1409 * ignore events after they disable it. I.e. the ring buffer may have
1410 * already a few more events queued up before the kernel got the stop
1411 * request.
1412 */
1413 if (!err)
1414 evsel->disabled = true;
1415
1416 return err;
Jiri Olsae98a4cb2015-12-03 10:06:41 +01001417}
1418
Adrian Huntera7d212f2021-09-09 15:55:07 +03001419void free_config_terms(struct list_head *config_terms)
Jiri Olsa930a2e22015-07-29 05:42:10 -04001420{
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -03001421 struct evsel_config_term *term, *h;
Jiri Olsa930a2e22015-07-29 05:42:10 -04001422
Adrian Huntera7d212f2021-09-09 15:55:07 +03001423 list_for_each_entry_safe(term, h, config_terms, list) {
Arnaldo Carvalho de Meloe56fbc92019-07-04 12:13:46 -03001424 list_del_init(&term->list);
Leo Yan3220fb82020-01-17 13:52:51 +08001425 if (term->free_str)
1426 zfree(&term->val.str);
Jiri Olsa930a2e22015-07-29 05:42:10 -04001427 free(term);
1428 }
1429}
1430
Adrian Huntera7d212f2021-09-09 15:55:07 +03001431static void evsel__free_config_terms(struct evsel *evsel)
1432{
1433 free_config_terms(&evsel->config_terms);
1434}
1435
Arnaldo Carvalho de Melo30f7c592020-04-29 15:53:17 -03001436void evsel__exit(struct evsel *evsel)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001437{
Jiri Olsab27c4ec2019-07-21 13:24:22 +02001438 assert(list_empty(&evsel->core.node));
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -04001439 assert(evsel->evlist == NULL);
Song Liufa853c42020-12-29 13:42:14 -08001440 bpf_counter__destroy(evsel);
Arnaldo Carvalho de Melo7d1e2392020-05-06 13:38:26 -03001441 evsel__free_counts(evsel);
Jiri Olsa88761fa2019-07-21 13:24:50 +02001442 perf_evsel__free_fd(&evsel->core);
Jiri Olsa70c20362019-09-03 10:34:29 +02001443 perf_evsel__free_id(&evsel->core);
Arnaldo Carvalho de Melo35ac0ca2020-05-06 13:05:08 -03001444 evsel__free_config_terms(evsel);
Arnaldo Carvalho de Meloa53b6462018-03-06 10:10:45 -03001445 cgroup__put(evsel->cgrp);
Jiri Olsad400bd32019-07-21 13:24:37 +02001446 perf_cpu_map__put(evsel->core.cpus);
Jiri Olsafe1f61b2019-07-21 13:24:38 +02001447 perf_cpu_map__put(evsel->core.own_cpus);
Jiri Olsaaf663bd2019-07-21 13:24:39 +02001448 perf_thread_map__put(evsel->core.threads);
Arnaldo Carvalho de Melo597e48c2014-10-16 13:25:01 -03001449 zfree(&evsel->group_name);
Arnaldo Carvalho de Melo597e48c2014-10-16 13:25:01 -03001450 zfree(&evsel->name);
Ian Rogersd4953f72020-03-14 10:03:56 -07001451 zfree(&evsel->pmu_name);
Ian Rogersb194c9c2021-11-18 00:47:49 -08001452 zfree(&evsel->unit);
Ian Rogers2b62b3a2021-10-15 10:21:25 -07001453 zfree(&evsel->metric_id);
Jin Yao034f7ee2021-01-28 09:34:17 +08001454 evsel__zero_per_pkg(evsel);
1455 hashmap__free(evsel->per_pkg_mask);
1456 evsel->per_pkg_mask = NULL;
Ian Rogers3efc8992020-05-12 16:59:18 -07001457 zfree(&evsel->metric_events);
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -03001458 perf_evsel__object.fini(evsel);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -02001459}
1460
Jiri Olsa5eb2dd22019-07-21 13:23:57 +02001461void evsel__delete(struct evsel *evsel)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -02001462{
Arnaldo Carvalho de Melo30f7c592020-04-29 15:53:17 -03001463 evsel__exit(evsel);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001464 free(evsel);
1465}
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -02001466
Ian Rogers6f844b12022-01-04 22:13:42 -08001467void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread,
Arnaldo Carvalho de Melo12f52612020-04-29 15:47:38 -03001468 struct perf_counts_values *count)
Stephane Eranianc7a79c42013-01-29 12:47:43 +01001469{
1470 struct perf_counts_values tmp;
1471
1472 if (!evsel->prev_raw_counts)
1473 return;
1474
Ian Rogers6f844b12022-01-04 22:13:42 -08001475 if (cpu_map_idx == -1) {
Stephane Eranianc7a79c42013-01-29 12:47:43 +01001476 tmp = evsel->prev_raw_counts->aggr;
1477 evsel->prev_raw_counts->aggr = *count;
1478 } else {
Ian Rogers6f844b12022-01-04 22:13:42 -08001479 tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
1480 *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count;
Stephane Eranianc7a79c42013-01-29 12:47:43 +01001481 }
1482
1483 count->val = count->val - tmp.val;
1484 count->ena = count->ena - tmp.ena;
1485 count->run = count->run - tmp.run;
1486}
1487
Ian Rogersda8c94c2022-01-04 22:13:39 -08001488static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
Jiri Olsa13112bb2014-11-21 10:31:06 +01001489{
Ian Rogersda8c94c2022-01-04 22:13:39 -08001490 struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread);
Jiri Olsa13112bb2014-11-21 10:31:06 +01001491
Ian Rogersda8c94c2022-01-04 22:13:39 -08001492 return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count);
Jiri Olsa13112bb2014-11-21 10:31:06 +01001493}
1494
Ian Rogers6f844b12022-01-04 22:13:42 -08001495static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
1496 u64 val, u64 ena, u64 run)
Jiri Olsaf7794d52017-07-26 14:02:05 +02001497{
1498 struct perf_counts_values *count;
1499
Ian Rogers6f844b12022-01-04 22:13:42 -08001500 count = perf_counts(counter->counts, cpu_map_idx, thread);
Jiri Olsaf7794d52017-07-26 14:02:05 +02001501
1502 count->val = val;
1503 count->ena = ena;
1504 count->run = run;
Jiri Olsadf1d6852019-07-21 13:23:48 +02001505
Ian Rogers6f844b12022-01-04 22:13:42 -08001506 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
Jiri Olsaf7794d52017-07-26 14:02:05 +02001507}
1508
Ian Rogers6f844b12022-01-04 22:13:42 -08001509static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data)
Jiri Olsaf7794d52017-07-26 14:02:05 +02001510{
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001511 u64 read_format = leader->core.attr.read_format;
Jiri Olsaf7794d52017-07-26 14:02:05 +02001512 struct sample_read_value *v;
1513 u64 nr, ena = 0, run = 0, i;
1514
1515 nr = *data++;
1516
Jiri Olsa5643b1a2019-07-21 13:24:46 +02001517 if (nr != (u64) leader->core.nr_members)
Jiri Olsaf7794d52017-07-26 14:02:05 +02001518 return -EINVAL;
1519
1520 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1521 ena = *data++;
1522
1523 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1524 run = *data++;
1525
1526 v = (struct sample_read_value *) data;
1527
Ian Rogers6f844b12022-01-04 22:13:42 -08001528 evsel__set_count(leader, cpu_map_idx, thread, v[0].value, ena, run);
Jiri Olsaf7794d52017-07-26 14:02:05 +02001529
1530 for (i = 1; i < nr; i++) {
Jiri Olsa32dcd022019-07-21 13:23:51 +02001531 struct evsel *counter;
Jiri Olsaf7794d52017-07-26 14:02:05 +02001532
Arnaldo Carvalho de Melo3ccf8a72020-11-30 14:17:57 -03001533 counter = evlist__id2evsel(leader->evlist, v[i].id);
Jiri Olsaf7794d52017-07-26 14:02:05 +02001534 if (!counter)
1535 return -EINVAL;
1536
Ian Rogers6f844b12022-01-04 22:13:42 -08001537 evsel__set_count(counter, cpu_map_idx, thread, v[i].value, ena, run);
Jiri Olsaf7794d52017-07-26 14:02:05 +02001538 }
1539
1540 return 0;
1541}
1542
Ian Rogersda8c94c2022-01-04 22:13:39 -08001543static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread)
Jiri Olsaf7794d52017-07-26 14:02:05 +02001544{
Arnaldo Carvalho de Melo82806c32017-11-09 12:03:40 -03001545 struct perf_stat_evsel *ps = leader->stats;
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001546 u64 read_format = leader->core.attr.read_format;
Jiri Olsa5c30af92019-07-21 13:24:51 +02001547 int size = perf_evsel__read_size(&leader->core);
Jiri Olsaf7794d52017-07-26 14:02:05 +02001548 u64 *data = ps->group_data;
1549
1550 if (!(read_format & PERF_FORMAT_ID))
1551 return -EINVAL;
1552
Arnaldo Carvalho de Meloc754c382020-04-30 10:51:16 -03001553 if (!evsel__is_group_leader(leader))
Jiri Olsaf7794d52017-07-26 14:02:05 +02001554 return -EINVAL;
1555
1556 if (!data) {
1557 data = zalloc(size);
1558 if (!data)
1559 return -ENOMEM;
1560
1561 ps->group_data = data;
1562 }
1563
Ian Rogersda8c94c2022-01-04 22:13:39 -08001564 if (FD(leader, cpu_map_idx, thread) < 0)
Jiri Olsaf7794d52017-07-26 14:02:05 +02001565 return -EINVAL;
1566
Ian Rogersda8c94c2022-01-04 22:13:39 -08001567 if (readn(FD(leader, cpu_map_idx, thread), data, size) <= 0)
Jiri Olsaf7794d52017-07-26 14:02:05 +02001568 return -errno;
1569
Ian Rogersda8c94c2022-01-04 22:13:39 -08001570 return evsel__process_group_data(leader, cpu_map_idx, thread, data);
Jiri Olsaf7794d52017-07-26 14:02:05 +02001571}
1572
Ian Rogersda8c94c2022-01-04 22:13:39 -08001573int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
Jiri Olsaf7794d52017-07-26 14:02:05 +02001574{
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001575 u64 read_format = evsel->core.attr.read_format;
Jiri Olsaf7794d52017-07-26 14:02:05 +02001576
1577 if (read_format & PERF_FORMAT_GROUP)
Ian Rogersda8c94c2022-01-04 22:13:39 -08001578 return evsel__read_group(evsel, cpu_map_idx, thread);
Arnaldo Carvalho de Meloea089692020-04-30 11:00:53 -03001579
Ian Rogersda8c94c2022-01-04 22:13:39 -08001580 return evsel__read_one(evsel, cpu_map_idx, thread);
Jiri Olsaf7794d52017-07-26 14:02:05 +02001581}
1582
Ian Rogersda8c94c2022-01-04 22:13:39 -08001583int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale)
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -02001584{
1585 struct perf_counts_values count;
1586 size_t nv = scale ? 3 : 1;
1587
Ian Rogersda8c94c2022-01-04 22:13:39 -08001588 if (FD(evsel, cpu_map_idx, thread) < 0)
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -02001589 return -EINVAL;
1590
Ian Rogers2ca0a372022-01-04 22:13:29 -08001591 if (evsel->counts == NULL && evsel__alloc_counts(evsel) < 0)
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -02001592 return -ENOMEM;
1593
Ian Rogersda8c94c2022-01-04 22:13:39 -08001594 if (readn(FD(evsel, cpu_map_idx, thread), &count, nv * sizeof(u64)) <= 0)
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -02001595 return -errno;
1596
Ian Rogersda8c94c2022-01-04 22:13:39 -08001597 evsel__compute_deltas(evsel, cpu_map_idx, thread, &count);
Jiri Olsa13112bb2014-11-21 10:31:06 +01001598 perf_counts_values__scale(&count, scale, NULL);
Ian Rogersda8c94c2022-01-04 22:13:39 -08001599 *perf_counts(evsel->counts, cpu_map_idx, thread) = count;
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -02001600 return 0;
1601}
1602
Jin Yao1fcc57b2021-06-09 12:45:55 +08001603static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other,
Ian Rogers2daa08c2022-01-04 22:13:40 -08001604 int cpu_map_idx)
Jin Yao1fcc57b2021-06-09 12:45:55 +08001605{
Ian Rogers6d188042022-01-04 22:13:51 -08001606 struct perf_cpu cpu;
Jin Yao1fcc57b2021-06-09 12:45:55 +08001607
Ian Rogers2daa08c2022-01-04 22:13:40 -08001608 cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
1609 return perf_cpu_map__idx(other->core.cpus, cpu);
Jin Yao1fcc57b2021-06-09 12:45:55 +08001610}
1611
Ian Rogers2daa08c2022-01-04 22:13:40 -08001612static int evsel__hybrid_group_cpu_map_idx(struct evsel *evsel, int cpu_map_idx)
Jin Yao1fcc57b2021-06-09 12:45:55 +08001613{
Jiri Olsafba7c862021-07-06 17:17:00 +02001614 struct evsel *leader = evsel__leader(evsel);
Jin Yao1fcc57b2021-06-09 12:45:55 +08001615
1616 if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) ||
1617 (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) {
Ian Rogers2daa08c2022-01-04 22:13:40 -08001618 return evsel__match_other_cpu(evsel, leader, cpu_map_idx);
Jin Yao1fcc57b2021-06-09 12:45:55 +08001619 }
1620
Ian Rogers2daa08c2022-01-04 22:13:40 -08001621 return cpu_map_idx;
Jin Yao1fcc57b2021-06-09 12:45:55 +08001622}
1623
Ian Rogers2daa08c2022-01-04 22:13:40 -08001624static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread)
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001625{
Jiri Olsafba7c862021-07-06 17:17:00 +02001626 struct evsel *leader = evsel__leader(evsel);
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001627 int fd;
1628
Arnaldo Carvalho de Meloc754c382020-04-30 10:51:16 -03001629 if (evsel__is_group_leader(evsel))
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001630 return -1;
1631
1632 /*
1633 * Leader must be already processed/open,
1634 * if not it's a bug.
1635 */
Jiri Olsa9dfcb752019-07-21 13:24:45 +02001636 BUG_ON(!leader->core.fd);
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001637
Ian Rogers2daa08c2022-01-04 22:13:40 -08001638 cpu_map_idx = evsel__hybrid_group_cpu_map_idx(evsel, cpu_map_idx);
1639 if (cpu_map_idx == -1)
Jin Yao1fcc57b2021-06-09 12:45:55 +08001640 return -1;
1641
Ian Rogers2daa08c2022-01-04 22:13:40 -08001642 fd = FD(leader, cpu_map_idx, thread);
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001643 BUG_ON(fd == -1);
1644
1645 return fd;
1646}
1647
Arnaldo Carvalho de Melo56933022020-11-30 09:08:24 -03001648static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx)
Mengting Zhangca800062017-12-13 15:01:53 +08001649{
1650 for (int cpu = 0; cpu < nr_cpus; cpu++)
1651 for (int thread = thread_idx; thread < nr_threads - 1; thread++)
1652 FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
1653}
1654
Jiri Olsa32dcd022019-07-21 13:23:51 +02001655static int update_fds(struct evsel *evsel,
Ian Rogers6f844b12022-01-04 22:13:42 -08001656 int nr_cpus, int cpu_map_idx,
Mengting Zhangca800062017-12-13 15:01:53 +08001657 int nr_threads, int thread_idx)
1658{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001659 struct evsel *pos;
Mengting Zhangca800062017-12-13 15:01:53 +08001660
Ian Rogers6f844b12022-01-04 22:13:42 -08001661 if (cpu_map_idx >= nr_cpus || thread_idx >= nr_threads)
Mengting Zhangca800062017-12-13 15:01:53 +08001662 return -EINVAL;
1663
1664 evlist__for_each_entry(evsel->evlist, pos) {
Ian Rogers6f844b12022-01-04 22:13:42 -08001665 nr_cpus = pos != evsel ? nr_cpus : cpu_map_idx;
Mengting Zhangca800062017-12-13 15:01:53 +08001666
Arnaldo Carvalho de Melo56933022020-11-30 09:08:24 -03001667 evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
Mengting Zhangca800062017-12-13 15:01:53 +08001668
1669 /*
1670 * Since fds for next evsel has not been created,
1671 * there is no need to iterate whole event list.
1672 */
1673 if (pos == evsel)
1674 break;
1675 }
1676 return 0;
1677}
1678
Ian Rogers1fa497d2022-01-04 22:13:41 -08001679static bool evsel__ignore_missing_thread(struct evsel *evsel,
Ian Rogers6f844b12022-01-04 22:13:42 -08001680 int nr_cpus, int cpu_map_idx,
Ian Rogers1fa497d2022-01-04 22:13:41 -08001681 struct perf_thread_map *threads,
1682 int thread, int err)
Jiri Olsaa359c172016-12-13 08:46:22 +01001683{
Jiri Olsaa2f354e2019-08-22 13:11:41 +02001684 pid_t ignore_pid = perf_thread_map__pid(threads, thread);
Mengting Zhangca800062017-12-13 15:01:53 +08001685
Jiri Olsaa359c172016-12-13 08:46:22 +01001686 if (!evsel->ignore_missing_thread)
1687 return false;
1688
1689 /* The system wide setup does not work with threads. */
Jiri Olsa648b5af2019-08-06 11:35:19 +02001690 if (evsel->core.system_wide)
Jiri Olsaa359c172016-12-13 08:46:22 +01001691 return false;
1692
1693 /* The -ESRCH is perf event syscall errno for pid's not found. */
1694 if (err != -ESRCH)
1695 return false;
1696
1697 /* If there's only one thread, let it fail. */
1698 if (threads->nr == 1)
1699 return false;
1700
Mengting Zhangca800062017-12-13 15:01:53 +08001701 /*
1702 * We should remove fd for missing_thread first
1703 * because thread_map__remove() will decrease threads->nr.
1704 */
Ian Rogers6f844b12022-01-04 22:13:42 -08001705 if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread))
Mengting Zhangca800062017-12-13 15:01:53 +08001706 return false;
1707
Jiri Olsaa359c172016-12-13 08:46:22 +01001708 if (thread_map__remove(threads, thread))
1709 return false;
1710
1711 pr_warning("WARNING: Ignored open failure for pid %d\n",
Mengting Zhangca800062017-12-13 15:01:53 +08001712 ignore_pid);
Jiri Olsaa359c172016-12-13 08:46:22 +01001713 return true;
1714}
1715
Arnaldo Carvalho de Meloca125272019-09-24 15:41:51 -03001716static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
1717 void *priv __maybe_unused)
1718{
1719 return fprintf(fp, " %-32s %s\n", name, val);
1720}
1721
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001722static void display_attr(struct perf_event_attr *attr)
1723{
Ravi Bangoriaccd26742019-11-08 15:11:28 +05301724 if (verbose >= 2 || debug_peo_args) {
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001725 fprintf(stderr, "%.60s\n", graph_dotted_line);
1726 fprintf(stderr, "perf_event_attr:\n");
1727 perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
1728 fprintf(stderr, "%.60s\n", graph_dotted_line);
1729 }
1730}
1731
Riccardo Mancini28667a52021-08-21 11:19:30 +02001732bool evsel__precise_ip_fallback(struct evsel *evsel)
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001733{
Riccardo Mancini28667a52021-08-21 11:19:30 +02001734 /* Do not try less precise if not requested. */
1735 if (!evsel->precise_max)
1736 return false;
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001737
Riccardo Mancini28667a52021-08-21 11:19:30 +02001738 /*
1739 * We tried all the precise_ip values, and it's
1740 * still failing, so leave it to standard fallback.
1741 */
1742 if (!evsel->core.attr.precise_ip) {
1743 evsel->core.attr.precise_ip = evsel->precise_ip_original;
1744 return false;
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001745 }
1746
Riccardo Mancini28667a52021-08-21 11:19:30 +02001747 if (!evsel->precise_ip_original)
1748 evsel->precise_ip_original = evsel->core.attr.precise_ip;
Jiri Olsa4e8a5c12019-03-14 15:00:10 +01001749
Riccardo Mancini28667a52021-08-21 11:19:30 +02001750 evsel->core.attr.precise_ip--;
1751 pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
1752 display_attr(&evsel->core.attr);
1753 return true;
1754}
Riccardo Mancinid45ce0342021-08-21 11:19:21 +02001755
1756static struct perf_cpu_map *empty_cpu_map;
1757static struct perf_thread_map *empty_thread_map;
1758
1759static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
1760 struct perf_thread_map *threads)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001761{
Riccardo Mancinid45ce0342021-08-21 11:19:21 +02001762 int nthreads;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001763
Arnaldo Carvalho de Meloacb9f2d2019-08-13 11:06:38 -03001764 if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
1765 (perf_missing_features.aux_output && evsel->core.attr.aux_output))
Arnaldo Carvalho de Melo32a951b2016-07-14 08:34:33 +00001766 return -EINVAL;
1767
Arnaldo Carvalho de Meloc24ae6d2017-02-14 10:59:04 -03001768 if (cpus == NULL) {
Arnaldo Carvalho de Meloc24ae6d2017-02-14 10:59:04 -03001769 if (empty_cpu_map == NULL) {
Jiri Olsa397721e2019-07-21 13:24:16 +02001770 empty_cpu_map = perf_cpu_map__dummy_new();
Arnaldo Carvalho de Meloc24ae6d2017-02-14 10:59:04 -03001771 if (empty_cpu_map == NULL)
1772 return -ENOMEM;
1773 }
1774
1775 cpus = empty_cpu_map;
1776 }
1777
1778 if (threads == NULL) {
Arnaldo Carvalho de Meloc24ae6d2017-02-14 10:59:04 -03001779 if (empty_thread_map == NULL) {
1780 empty_thread_map = thread_map__new_by_tid(-1);
1781 if (empty_thread_map == NULL)
1782 return -ENOMEM;
1783 }
1784
1785 threads = empty_thread_map;
1786 }
1787
Jiri Olsa648b5af2019-08-06 11:35:19 +02001788 if (evsel->core.system_wide)
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001789 nthreads = 1;
1790 else
1791 nthreads = threads->nr;
1792
Jiri Olsa9dfcb752019-07-21 13:24:45 +02001793 if (evsel->core.fd == NULL &&
Ian Rogers44028692022-01-21 20:58:10 -08001794 perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001795 return -ENOMEM;
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -02001796
Riccardo Mancini46def082021-08-21 11:19:22 +02001797 evsel->open_flags = PERF_FLAG_FD_CLOEXEC;
1798 if (evsel->cgrp)
1799 evsel->open_flags |= PERF_FLAG_PID_CGROUP;
1800
Riccardo Mancinid45ce0342021-08-21 11:19:21 +02001801 return 0;
1802}
1803
Riccardo Mancini588f4ac2021-08-21 11:19:23 +02001804static void evsel__disable_missing_features(struct evsel *evsel)
Riccardo Mancinid45ce0342021-08-21 11:19:21 +02001805{
Kan Liangea8d0ed2021-02-02 12:09:09 -08001806 if (perf_missing_features.weight_struct) {
1807 evsel__set_sample_bit(evsel, WEIGHT);
1808 evsel__reset_sample_bit(evsel, WEIGHT_STRUCT);
1809 }
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001810 if (perf_missing_features.clockid_wrong)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001811 evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001812 if (perf_missing_features.clockid) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001813 evsel->core.attr.use_clockid = 0;
1814 evsel->core.attr.clockid = 0;
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001815 }
Yann Droneaud57480d22014-06-30 22:28:47 +02001816 if (perf_missing_features.cloexec)
Riccardo Mancini46def082021-08-21 11:19:22 +02001817 evsel->open_flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
Stephane Eranian5c5e8542013-08-21 12:10:25 +02001818 if (perf_missing_features.mmap2)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001819 evsel->core.attr.mmap2 = 0;
Namhyung Kim3500eee2021-11-05 13:58:47 -07001820 if (evsel->pmu && evsel->pmu->missing_features.exclude_guest)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001821 evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0;
Andi Kleenbd0f8892015-12-11 16:12:24 -08001822 if (perf_missing_features.lbr_flags)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001823 evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
Andi Kleenbd0f8892015-12-11 16:12:24 -08001824 PERF_SAMPLE_BRANCH_NO_CYCLES);
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001825 if (perf_missing_features.group_read && evsel->core.attr.inherit)
1826 evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
Song Liu9aa0bfa2019-01-17 08:15:17 -08001827 if (perf_missing_features.ksymbol)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001828 evsel->core.attr.ksymbol = 0;
Arnaldo Carvalho de Melo74a1e862019-08-26 19:31:06 -03001829 if (perf_missing_features.bpf)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001830 evsel->core.attr.bpf_event = 0;
Kan Liangd3f85432020-02-28 08:30:01 -08001831 if (perf_missing_features.branch_hw_idx)
1832 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX;
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001833 if (perf_missing_features.sample_id_all)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02001834 evsel->core.attr.sample_id_all = 0;
Riccardo Mancini588f4ac2021-08-21 11:19:23 +02001835}
1836
Riccardo Mancini6efd06e2021-08-21 11:19:24 +02001837int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
1838 struct perf_thread_map *threads)
1839{
1840 int err;
1841
1842 err = __evsel__prepare_open(evsel, cpus, threads);
1843 if (err)
1844 return err;
1845
1846 evsel__disable_missing_features(evsel);
1847
1848 return err;
1849}
1850
Riccardo Mancinid21fc5f2021-08-21 11:19:25 +02001851bool evsel__detect_missing_features(struct evsel *evsel)
1852{
1853 /*
1854 * Must probe features in the order they were added to the
1855 * perf_event_attr interface.
1856 */
1857 if (!perf_missing_features.weight_struct &&
1858 (evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) {
1859 perf_missing_features.weight_struct = true;
1860 pr_debug2("switching off weight struct support\n");
1861 return true;
1862 } else if (!perf_missing_features.code_page_size &&
1863 (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)) {
1864 perf_missing_features.code_page_size = true;
1865 pr_debug2_peo("Kernel has no PERF_SAMPLE_CODE_PAGE_SIZE support, bailing out\n");
1866 return false;
1867 } else if (!perf_missing_features.data_page_size &&
1868 (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)) {
1869 perf_missing_features.data_page_size = true;
1870 pr_debug2_peo("Kernel has no PERF_SAMPLE_DATA_PAGE_SIZE support, bailing out\n");
1871 return false;
1872 } else if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) {
1873 perf_missing_features.cgroup = true;
1874 pr_debug2_peo("Kernel has no cgroup sampling support, bailing out\n");
1875 return false;
1876 } else if (!perf_missing_features.branch_hw_idx &&
1877 (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) {
1878 perf_missing_features.branch_hw_idx = true;
1879 pr_debug2("switching off branch HW index support\n");
1880 return true;
1881 } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) {
1882 perf_missing_features.aux_output = true;
1883 pr_debug2_peo("Kernel has no attr.aux_output support, bailing out\n");
1884 return false;
1885 } else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) {
1886 perf_missing_features.bpf = true;
1887 pr_debug2_peo("switching off bpf_event\n");
1888 return true;
1889 } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) {
1890 perf_missing_features.ksymbol = true;
1891 pr_debug2_peo("switching off ksymbol\n");
1892 return true;
1893 } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) {
1894 perf_missing_features.write_backward = true;
1895 pr_debug2_peo("switching off write_backward\n");
1896 return false;
1897 } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) {
1898 perf_missing_features.clockid_wrong = true;
1899 pr_debug2_peo("switching off clockid\n");
1900 return true;
1901 } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) {
1902 perf_missing_features.clockid = true;
1903 pr_debug2_peo("switching off use_clockid\n");
1904 return true;
1905 } else if (!perf_missing_features.cloexec && (evsel->open_flags & PERF_FLAG_FD_CLOEXEC)) {
1906 perf_missing_features.cloexec = true;
1907 pr_debug2_peo("switching off cloexec flag\n");
1908 return true;
1909 } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) {
1910 perf_missing_features.mmap2 = true;
1911 pr_debug2_peo("switching off mmap2\n");
1912 return true;
Namhyung Kim3500eee2021-11-05 13:58:47 -07001913 } else if ((evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host) &&
1914 (evsel->pmu == NULL || evsel->pmu->missing_features.exclude_guest)) {
1915 if (evsel->pmu == NULL) {
1916 evsel->pmu = evsel__find_pmu(evsel);
1917 if (evsel->pmu)
1918 evsel->pmu->missing_features.exclude_guest = true;
1919 else {
1920 /* we cannot find PMU, disable attrs now */
1921 evsel->core.attr.exclude_host = false;
1922 evsel->core.attr.exclude_guest = false;
1923 }
1924 }
1925
1926 if (evsel->exclude_GH) {
1927 pr_debug2_peo("PMU has no exclude_host/guest support, bailing out\n");
1928 return false;
1929 }
1930 if (!perf_missing_features.exclude_guest) {
1931 perf_missing_features.exclude_guest = true;
1932 pr_debug2_peo("switching off exclude_guest, exclude_host\n");
1933 }
Riccardo Mancinid21fc5f2021-08-21 11:19:25 +02001934 return true;
1935 } else if (!perf_missing_features.sample_id_all) {
1936 perf_missing_features.sample_id_all = true;
1937 pr_debug2_peo("switching off sample_id_all\n");
1938 return true;
1939 } else if (!perf_missing_features.lbr_flags &&
1940 (evsel->core.attr.branch_sample_type &
1941 (PERF_SAMPLE_BRANCH_NO_CYCLES |
1942 PERF_SAMPLE_BRANCH_NO_FLAGS))) {
1943 perf_missing_features.lbr_flags = true;
1944 pr_debug2_peo("switching off branch sample type no (cycles/flags)\n");
1945 return true;
1946 } else if (!perf_missing_features.group_read &&
1947 evsel->core.attr.inherit &&
1948 (evsel->core.attr.read_format & PERF_FORMAT_GROUP) &&
1949 evsel__is_group_leader(evsel)) {
1950 perf_missing_features.group_read = true;
1951 pr_debug2_peo("switching off group read\n");
1952 return true;
1953 } else {
1954 return false;
1955 }
1956}
1957
Riccardo Mancini71efc482021-08-21 11:19:26 +02001958bool evsel__increase_rlimit(enum rlimit_action *set_rlimit)
1959{
1960 int old_errno;
1961 struct rlimit l;
1962
1963 if (*set_rlimit < INCREASED_MAX) {
1964 old_errno = errno;
1965
1966 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
1967 if (*set_rlimit == NO_CHANGE) {
1968 l.rlim_cur = l.rlim_max;
1969 } else {
1970 l.rlim_cur = l.rlim_max + 1000;
1971 l.rlim_max = l.rlim_cur;
1972 }
1973 if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
1974 (*set_rlimit) += 1;
1975 errno = old_errno;
1976 return true;
1977 }
1978 }
1979 errno = old_errno;
1980 }
1981
1982 return false;
1983}
1984
Riccardo Mancini588f4ac2021-08-21 11:19:23 +02001985static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
1986 struct perf_thread_map *threads,
Ian Rogers6f844b12022-01-04 22:13:42 -08001987 int start_cpu_map_idx, int end_cpu_map_idx)
Riccardo Mancini588f4ac2021-08-21 11:19:23 +02001988{
Ian Rogers6f844b12022-01-04 22:13:42 -08001989 int idx, thread, nthreads;
Riccardo Mancini588f4ac2021-08-21 11:19:23 +02001990 int pid = -1, err, old_errno;
Riccardo Mancini71efc482021-08-21 11:19:26 +02001991 enum rlimit_action set_rlimit = NO_CHANGE;
Riccardo Mancini588f4ac2021-08-21 11:19:23 +02001992
1993 err = __evsel__prepare_open(evsel, cpus, threads);
1994 if (err)
1995 return err;
1996
1997 if (cpus == NULL)
1998 cpus = empty_cpu_map;
1999
2000 if (threads == NULL)
2001 threads = empty_thread_map;
2002
2003 if (evsel->core.system_wide)
2004 nthreads = 1;
2005 else
2006 nthreads = threads->nr;
2007
2008 if (evsel->cgrp)
2009 pid = evsel->cgrp->fd;
2010
2011fallback_missing_features:
2012 evsel__disable_missing_features(evsel);
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03002013
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002014 display_attr(&evsel->core.attr);
Adrian Huntere3e1a542013-08-14 15:48:24 +03002015
Ian Rogers6f844b12022-01-04 22:13:42 -08002016 for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) {
Arnaldo Carvalho de Melo9d04f172011-01-12 00:08:18 -02002017
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03002018 for (thread = 0; thread < nthreads; thread++) {
Jiri Olsa83c2e4f32016-12-12 11:35:40 +01002019 int fd, group_fd;
Riccardo Mancinida7c3b42021-08-21 11:19:27 +02002020retry_open:
2021 if (thread >= nthreads)
2022 break;
Stephane Eranian023695d2011-02-14 11:20:01 +02002023
Jiri Olsa648b5af2019-08-06 11:35:19 +02002024 if (!evsel->cgrp && !evsel->core.system_wide)
Jiri Olsaa2f354e2019-08-22 13:11:41 +02002025 pid = perf_thread_map__pid(threads, thread);
Stephane Eranian023695d2011-02-14 11:20:01 +02002026
Ian Rogers6f844b12022-01-04 22:13:42 -08002027 group_fd = get_group_fd(evsel, idx, thread);
Riccardo Mancinida7c3b42021-08-21 11:19:27 +02002028
Jiri Olsa10213e22017-07-03 16:50:18 +02002029 test_attr__ready();
2030
Riccardo Mancini28667a52021-08-21 11:19:30 +02002031 pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
Ian Rogers44028692022-01-21 20:58:10 -08002032 pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags);
Riccardo Mancini28667a52021-08-21 11:19:30 +02002033
Ian Rogers44028692022-01-21 20:58:10 -08002034 fd = sys_perf_event_open(&evsel->core.attr, pid,
2035 perf_cpu_map__cpu(cpus, idx).cpu,
Riccardo Mancini28667a52021-08-21 11:19:30 +02002036 group_fd, evsel->open_flags);
Jiri Olsa83c2e4f32016-12-12 11:35:40 +01002037
Ian Rogers6f844b12022-01-04 22:13:42 -08002038 FD(evsel, idx, thread) = fd;
Jiri Olsa83c2e4f32016-12-12 11:35:40 +01002039
2040 if (fd < 0) {
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02002041 err = -errno;
Jiri Olsaa359c172016-12-13 08:46:22 +01002042
Ravi Bangoriaccd26742019-11-08 15:11:28 +05302043 pr_debug2_peo("\nsys_perf_event_open failed, error %d\n",
Adrian Hunterf852fd62013-11-01 15:51:29 +02002044 err);
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03002045 goto try_fallback;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02002046 }
Wang Nan1f45b1d2015-10-14 12:41:18 +00002047
Ian Rogers6f844b12022-01-04 22:13:42 -08002048 bpf_counter__install_pe(evsel, idx, fd);
Riccardo Mancini91233d02021-08-21 11:19:29 +02002049
Riccardo Manciniebfb0452021-08-21 11:19:28 +02002050 if (unlikely(test_attr__enabled)) {
Ian Rogers44028692022-01-21 20:58:10 -08002051 test_attr__open(&evsel->core.attr, pid,
2052 perf_cpu_map__cpu(cpus, idx),
Riccardo Manciniebfb0452021-08-21 11:19:28 +02002053 fd, group_fd, evsel->open_flags);
2054 }
2055
Ravi Bangoriaccd26742019-11-08 15:11:28 +05302056 pr_debug2_peo(" = %d\n", fd);
Jiri Olsa7b4b82bc2016-11-21 22:33:26 +01002057
Wang Nan1f45b1d2015-10-14 12:41:18 +00002058 if (evsel->bpf_fd >= 0) {
Jiri Olsa83c2e4f32016-12-12 11:35:40 +01002059 int evt_fd = fd;
Wang Nan1f45b1d2015-10-14 12:41:18 +00002060 int bpf_fd = evsel->bpf_fd;
2061
2062 err = ioctl(evt_fd,
2063 PERF_EVENT_IOC_SET_BPF,
2064 bpf_fd);
2065 if (err && errno != EEXIST) {
2066 pr_err("failed to attach bpf fd %d: %s\n",
2067 bpf_fd, strerror(errno));
2068 err = -EINVAL;
2069 goto out_close;
2070 }
2071 }
2072
Andi Kleenbec19672013-08-04 19:41:26 -07002073 set_rlimit = NO_CHANGE;
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002074
2075 /*
2076 * If we succeeded but had to kill clockid, fail and
Arnaldo Carvalho de Melo2bb72db2020-05-04 13:43:03 -03002077 * have evsel__open_strerror() print us a nice error.
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002078 */
2079 if (perf_missing_features.clockid ||
2080 perf_missing_features.clockid_wrong) {
2081 err = -EINVAL;
2082 goto out_close;
2083 }
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02002084 }
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02002085 }
2086
2087 return 0;
2088
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03002089try_fallback:
Riccardo Mancini28667a52021-08-21 11:19:30 +02002090 if (evsel__precise_ip_fallback(evsel))
2091 goto retry_open;
2092
Ian Rogers44028692022-01-21 20:58:10 -08002093 if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus),
2094 idx, threads, thread, err)) {
Riccardo Mancinida7c3b42021-08-21 11:19:27 +02002095 /* We just removed 1 thread, so lower the upper nthreads limit. */
2096 nthreads--;
2097
2098 /* ... and pretend like nothing have happened. */
2099 err = 0;
2100 goto retry_open;
2101 }
Andi Kleenbec19672013-08-04 19:41:26 -07002102 /*
2103 * perf stat needs between 5 and 22 fds per CPU. When we run out
2104 * of them try to increase the limits.
2105 */
Riccardo Mancini71efc482021-08-21 11:19:26 +02002106 if (err == -EMFILE && evsel__increase_rlimit(&set_rlimit))
2107 goto retry_open;
Andi Kleenbec19672013-08-04 19:41:26 -07002108
Ian Rogers6f844b12022-01-04 22:13:42 -08002109 if (err != -EINVAL || idx > 0 || thread > 0)
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03002110 goto out_close;
2111
Riccardo Mancinid21fc5f2021-08-21 11:19:25 +02002112 if (evsel__detect_missing_features(evsel))
Kan Liangea8d0ed2021-02-02 12:09:09 -08002113 goto fallback_missing_features;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02002114out_close:
Jin Yaoab6c79b2018-01-16 23:43:08 +08002115 if (err)
2116 threads->err_thread = thread;
2117
Andi Kleen796c01a2019-10-20 10:51:54 -07002118 old_errno = errno;
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02002119 do {
2120 while (--thread >= 0) {
Ian Rogers6f844b12022-01-04 22:13:42 -08002121 if (FD(evsel, idx, thread) >= 0)
2122 close(FD(evsel, idx, thread));
2123 FD(evsel, idx, thread) = -1;
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02002124 }
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03002125 thread = nthreads;
Ian Rogers6f844b12022-01-04 22:13:42 -08002126 } while (--idx >= 0);
Andi Kleen796c01a2019-10-20 10:51:54 -07002127 errno = old_errno;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02002128 return err;
2129}
2130
Andi Kleen4804e012019-11-20 16:15:19 -08002131int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
2132 struct perf_thread_map *threads)
2133{
Ian Rogers44028692022-01-21 20:58:10 -08002134 return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus));
Andi Kleen4804e012019-11-20 16:15:19 -08002135}
2136
Jiri Olsa88761fa2019-07-21 13:24:50 +02002137void evsel__close(struct evsel *evsel)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02002138{
Jiri Olsa88761fa2019-07-21 13:24:50 +02002139 perf_evsel__close(&evsel->core);
Jiri Olsa70c20362019-09-03 10:34:29 +02002140 perf_evsel__free_id(&evsel->core);
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02002141}
2142
Ian Rogers6f844b12022-01-04 22:13:42 -08002143int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02002144{
Ian Rogers6f844b12022-01-04 22:13:42 -08002145 if (cpu_map_idx == -1)
Ian Rogers44028692022-01-21 20:58:10 -08002146 return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus));
Andi Kleen4804e012019-11-20 16:15:19 -08002147
Ian Rogers6f844b12022-01-04 22:13:42 -08002148 return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02002149}
2150
Arnaldo Carvalho de Meloaa8c4062020-04-29 16:21:03 -03002151int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02002152{
Jiri Olsa5972d1e2019-07-21 13:24:01 +02002153 return evsel__open(evsel, NULL, threads);
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02002154}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -02002155
Jiri Olsa32dcd022019-07-21 13:23:51 +02002156static int perf_evsel__parse_id_sample(const struct evsel *evsel,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03002157 const union perf_event *event,
2158 struct perf_sample *sample)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002159{
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002160 u64 type = evsel->core.attr.sample_type;
Jiri Olsab1fcd192019-08-25 20:17:52 +02002161 const __u64 *array = event->sample.array;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03002162 bool swapped = evsel->needs_swap;
Jiri Olsa37073f92012-05-30 14:23:44 +02002163 union u64_swap u;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002164
2165 array += ((event->header.size -
2166 sizeof(event->header)) / sizeof(u64)) - 1;
2167
Adrian Hunter75562572013-08-27 11:23:09 +03002168 if (type & PERF_SAMPLE_IDENTIFIER) {
2169 sample->id = *array;
2170 array--;
2171 }
2172
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002173 if (type & PERF_SAMPLE_CPU) {
Jiri Olsa37073f92012-05-30 14:23:44 +02002174 u.val64 = *array;
2175 if (swapped) {
2176 /* undo swap of u64, then swap on individual u32s */
2177 u.val64 = bswap_64(u.val64);
2178 u.val32[0] = bswap_32(u.val32[0]);
2179 }
2180
2181 sample->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002182 array--;
2183 }
2184
2185 if (type & PERF_SAMPLE_STREAM_ID) {
2186 sample->stream_id = *array;
2187 array--;
2188 }
2189
2190 if (type & PERF_SAMPLE_ID) {
2191 sample->id = *array;
2192 array--;
2193 }
2194
2195 if (type & PERF_SAMPLE_TIME) {
2196 sample->time = *array;
2197 array--;
2198 }
2199
2200 if (type & PERF_SAMPLE_TID) {
Jiri Olsa37073f92012-05-30 14:23:44 +02002201 u.val64 = *array;
2202 if (swapped) {
2203 /* undo swap of u64, then swap on individual u32s */
2204 u.val64 = bswap_64(u.val64);
2205 u.val32[0] = bswap_32(u.val32[0]);
2206 u.val32[1] = bswap_32(u.val32[1]);
2207 }
2208
2209 sample->pid = u.val32[0];
2210 sample->tid = u.val32[1];
Adrian Hunterdd44bc62013-10-18 15:29:01 +03002211 array--;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002212 }
2213
2214 return 0;
2215}
2216
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002217static inline bool overflow(const void *endp, u16 max_size, const void *offset,
2218 u64 size)
Frederic Weisbecker98e1da92011-05-21 20:08:15 +02002219{
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002220 return size > max_size || offset + size > endp;
Frederic Weisbecker98e1da92011-05-21 20:08:15 +02002221}
2222
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002223#define OVERFLOW_CHECK(offset, size, max_size) \
2224 do { \
2225 if (overflow(endp, (max_size), (offset), (size))) \
2226 return -EFAULT; \
2227 } while (0)
2228
2229#define OVERFLOW_CHECK_u64(offset) \
2230 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
2231
Jiri Olsa01468122017-08-03 13:10:28 +02002232static int
2233perf_event__check_size(union perf_event *event, unsigned int sample_size)
2234{
2235 /*
2236 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
2237 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
2238 * check the format does not go past the end of the event.
2239 */
2240 if (sample_size + sizeof(event->header) > event->header.size)
2241 return -EFAULT;
2242
2243 return 0;
2244}
2245
Kan Liangfbefe9c2021-02-05 08:01:52 -08002246void __weak arch_perf_parse_sample_weight(struct perf_sample *data,
2247 const __u64 *array,
2248 u64 type __maybe_unused)
2249{
2250 data->weight = *array;
2251}
2252
Madhavan Srinivasan63c12ae2021-10-28 17:07:13 +05302253u64 evsel__bitfield_swap_branch_flags(u64 value)
2254{
2255 u64 new_val = 0;
2256
2257 /*
2258 * branch_flags
2259 * union {
2260 * u64 values;
2261 * struct {
2262 * mispred:1 //target mispredicted
2263 * predicted:1 //target predicted
2264 * in_tx:1 //in transaction
2265 * abort:1 //transaction abort
2266 * cycles:16 //cycle count to last branch
2267 * type:4 //branch type
2268 * reserved:40
2269 * }
2270 * }
2271 *
2272 * Avoid bswap64() the entire branch_flag.value,
2273 * as it has variable bit-field sizes. Instead the
2274 * macro takes the bit-field position/size,
2275 * swaps it based on the host endianness.
2276 *
2277 * tep_is_bigendian() is used here instead of
2278 * bigendian() to avoid python test fails.
2279 */
2280 if (tep_is_bigendian()) {
2281 new_val = bitfield_swap(value, 0, 1);
2282 new_val |= bitfield_swap(value, 1, 1);
2283 new_val |= bitfield_swap(value, 2, 1);
2284 new_val |= bitfield_swap(value, 3, 1);
2285 new_val |= bitfield_swap(value, 4, 16);
2286 new_val |= bitfield_swap(value, 20, 4);
2287 new_val |= bitfield_swap(value, 24, 40);
2288 } else {
2289 new_val = bitfield_swap(value, 63, 1);
2290 new_val |= bitfield_swap(value, 62, 1);
2291 new_val |= bitfield_swap(value, 61, 1);
2292 new_val |= bitfield_swap(value, 60, 1);
2293 new_val |= bitfield_swap(value, 44, 16);
2294 new_val |= bitfield_swap(value, 40, 4);
2295 new_val |= bitfield_swap(value, 0, 40);
2296 }
2297
2298 return new_val;
2299}
2300
Arnaldo Carvalho de Melo6b6017a2020-04-30 11:03:49 -03002301int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
2302 struct perf_sample *data)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002303{
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002304 u64 type = evsel->core.attr.sample_type;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03002305 bool swapped = evsel->needs_swap;
Jiri Olsab1fcd192019-08-25 20:17:52 +02002306 const __u64 *array;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002307 u16 max_size = event->header.size;
2308 const void *endp = (void *)event + max_size;
2309 u64 sz;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002310
David Ahern936be502011-09-06 09:12:26 -06002311 /*
2312 * used for cross-endian analysis. See git commit 65014ab3
2313 * for why this goofiness is needed.
2314 */
Jiri Olsa6a11f922012-05-16 08:59:04 +02002315 union u64_swap u;
David Ahern936be502011-09-06 09:12:26 -06002316
Robert Richterf3bda2c2011-12-15 17:32:39 +01002317 memset(data, 0, sizeof(*data));
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002318 data->cpu = data->pid = data->tid = -1;
2319 data->stream_id = data->id = data->time = -1ULL;
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002320 data->period = evsel->core.attr.sample_period;
Arnaldo Carvalho de Melo473398a2016-03-22 18:23:43 -03002321 data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
Jiri Olsa28a0b392018-01-07 17:03:52 +01002322 data->misc = event->header.misc;
Jiri Olsa3ad31d82017-08-03 16:07:05 +02002323 data->id = -1ULL;
2324 data->data_src = PERF_MEM_DATA_SRC_NONE;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002325
2326 if (event->header.type != PERF_RECORD_SAMPLE) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002327 if (!evsel->core.attr.sample_id_all)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002328 return 0;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03002329 return perf_evsel__parse_id_sample(evsel, event, data);
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002330 }
2331
2332 array = event->sample.array;
2333
Jiri Olsa01468122017-08-03 13:10:28 +02002334 if (perf_event__check_size(event, evsel->sample_size))
Frederic Weisbeckera2854122011-05-21 19:33:04 +02002335 return -EFAULT;
2336
Adrian Hunter75562572013-08-27 11:23:09 +03002337 if (type & PERF_SAMPLE_IDENTIFIER) {
2338 data->id = *array;
2339 array++;
2340 }
2341
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002342 if (type & PERF_SAMPLE_IP) {
Adrian Hunteref893252013-08-27 11:23:06 +03002343 data->ip = *array;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002344 array++;
2345 }
2346
2347 if (type & PERF_SAMPLE_TID) {
David Ahern936be502011-09-06 09:12:26 -06002348 u.val64 = *array;
2349 if (swapped) {
2350 /* undo swap of u64, then swap on individual u32s */
2351 u.val64 = bswap_64(u.val64);
2352 u.val32[0] = bswap_32(u.val32[0]);
2353 u.val32[1] = bswap_32(u.val32[1]);
2354 }
2355
2356 data->pid = u.val32[0];
2357 data->tid = u.val32[1];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002358 array++;
2359 }
2360
2361 if (type & PERF_SAMPLE_TIME) {
2362 data->time = *array;
2363 array++;
2364 }
2365
2366 if (type & PERF_SAMPLE_ADDR) {
2367 data->addr = *array;
2368 array++;
2369 }
2370
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002371 if (type & PERF_SAMPLE_ID) {
2372 data->id = *array;
2373 array++;
2374 }
2375
2376 if (type & PERF_SAMPLE_STREAM_ID) {
2377 data->stream_id = *array;
2378 array++;
2379 }
2380
2381 if (type & PERF_SAMPLE_CPU) {
David Ahern936be502011-09-06 09:12:26 -06002382
2383 u.val64 = *array;
2384 if (swapped) {
2385 /* undo swap of u64, then swap on individual u32s */
2386 u.val64 = bswap_64(u.val64);
2387 u.val32[0] = bswap_32(u.val32[0]);
2388 }
2389
2390 data->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002391 array++;
2392 }
2393
2394 if (type & PERF_SAMPLE_PERIOD) {
2395 data->period = *array;
2396 array++;
2397 }
2398
2399 if (type & PERF_SAMPLE_READ) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002400 u64 read_format = evsel->core.attr.read_format;
Jiri Olsa9ede4732012-10-10 17:38:13 +02002401
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002402 OVERFLOW_CHECK_u64(array);
Jiri Olsa9ede4732012-10-10 17:38:13 +02002403 if (read_format & PERF_FORMAT_GROUP)
2404 data->read.group.nr = *array;
2405 else
2406 data->read.one.value = *array;
2407
2408 array++;
2409
2410 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002411 OVERFLOW_CHECK_u64(array);
Jiri Olsa9ede4732012-10-10 17:38:13 +02002412 data->read.time_enabled = *array;
2413 array++;
2414 }
2415
2416 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002417 OVERFLOW_CHECK_u64(array);
Jiri Olsa9ede4732012-10-10 17:38:13 +02002418 data->read.time_running = *array;
2419 array++;
2420 }
2421
2422 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2423 if (read_format & PERF_FORMAT_GROUP) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002424 const u64 max_group_nr = UINT64_MAX /
2425 sizeof(struct sample_read_value);
2426
2427 if (data->read.group.nr > max_group_nr)
2428 return -EFAULT;
2429 sz = data->read.group.nr *
2430 sizeof(struct sample_read_value);
2431 OVERFLOW_CHECK(array, sz, max_size);
2432 data->read.group.values =
2433 (struct sample_read_value *)array;
2434 array = (void *)array + sz;
Jiri Olsa9ede4732012-10-10 17:38:13 +02002435 } else {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002436 OVERFLOW_CHECK_u64(array);
Jiri Olsa9ede4732012-10-10 17:38:13 +02002437 data->read.one.id = *array;
2438 array++;
2439 }
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002440 }
2441
Adrian Hunter8e94b322020-04-01 13:16:07 +03002442 if (type & PERF_SAMPLE_CALLCHAIN) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002443 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
2444
2445 OVERFLOW_CHECK_u64(array);
2446 data->callchain = (struct ip_callchain *)array++;
2447 if (data->callchain->nr > max_callchain_nr)
Frederic Weisbecker98e1da92011-05-21 20:08:15 +02002448 return -EFAULT;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002449 sz = data->callchain->nr * sizeof(u64);
2450 OVERFLOW_CHECK(array, sz, max_size);
2451 array = (void *)array + sz;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002452 }
2453
2454 if (type & PERF_SAMPLE_RAW) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002455 OVERFLOW_CHECK_u64(array);
David Ahern936be502011-09-06 09:12:26 -06002456 u.val64 = *array;
Jiri Olsaf9d8adb2017-11-29 19:43:46 +01002457
2458 /*
2459 * Undo swap of u64, then swap on individual u32s,
2460 * get the size of the raw area and undo all of the
Ingo Molnar4d39c892021-03-23 17:09:15 +01002461 * swap. The pevent interface handles endianness by
Jiri Olsaf9d8adb2017-11-29 19:43:46 +01002462 * itself.
2463 */
2464 if (swapped) {
David Ahern936be502011-09-06 09:12:26 -06002465 u.val64 = bswap_64(u.val64);
2466 u.val32[0] = bswap_32(u.val32[0]);
2467 u.val32[1] = bswap_32(u.val32[1]);
2468 }
David Ahern936be502011-09-06 09:12:26 -06002469 data->raw_size = u.val32[0];
Jiri Olsaf9d8adb2017-11-29 19:43:46 +01002470
2471 /*
2472 * The raw data is aligned on 64bits including the
2473 * u32 size, so it's safe to use mem_bswap_64.
2474 */
2475 if (swapped)
2476 mem_bswap_64((void *) array, data->raw_size);
2477
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002478 array = (void *)array + sizeof(u32);
Frederic Weisbecker98e1da92011-05-21 20:08:15 +02002479
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002480 OVERFLOW_CHECK(array, data->raw_size, max_size);
2481 data->raw_data = (void *)array;
2482 array = (void *)array + data->raw_size;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002483 }
2484
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01002485 if (type & PERF_SAMPLE_BRANCH_STACK) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002486 const u64 max_branch_nr = UINT64_MAX /
2487 sizeof(struct branch_entry);
Madhavan Srinivasan63c12ae2021-10-28 17:07:13 +05302488 struct branch_entry *e;
2489 unsigned int i;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01002490
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002491 OVERFLOW_CHECK_u64(array);
2492 data->branch_stack = (struct branch_stack *)array++;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01002493
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002494 if (data->branch_stack->nr > max_branch_nr)
2495 return -EFAULT;
Kan Liang42bbabe2020-02-28 08:30:00 -08002496
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01002497 sz = data->branch_stack->nr * sizeof(struct branch_entry);
Madhavan Srinivasan63c12ae2021-10-28 17:07:13 +05302498 if (evsel__has_branch_hw_idx(evsel)) {
Kan Liang42bbabe2020-02-28 08:30:00 -08002499 sz += sizeof(u64);
Madhavan Srinivasan63c12ae2021-10-28 17:07:13 +05302500 e = &data->branch_stack->entries[0];
2501 } else {
Kan Liang42bbabe2020-02-28 08:30:00 -08002502 data->no_hw_idx = true;
Madhavan Srinivasan63c12ae2021-10-28 17:07:13 +05302503 /*
2504 * if the PERF_SAMPLE_BRANCH_HW_INDEX is not applied,
2505 * only nr and entries[] will be output by kernel.
2506 */
2507 e = (struct branch_entry *)&data->branch_stack->hw_idx;
2508 }
2509
2510 if (swapped) {
2511 /*
2512 * struct branch_flag does not have endian
2513 * specific bit field definition. And bswap
2514 * will not resolve the issue, since these
2515 * are bit fields.
2516 *
2517 * evsel__bitfield_swap_branch_flags() uses a
2518 * bitfield_swap macro to swap the bit position
2519 * based on the host endians.
2520 */
2521 for (i = 0; i < data->branch_stack->nr; i++, e++)
2522 e->flags.value = evsel__bitfield_swap_branch_flags(e->flags.value);
2523 }
2524
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002525 OVERFLOW_CHECK(array, sz, max_size);
2526 array = (void *)array + sz;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01002527 }
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002528
2529 if (type & PERF_SAMPLE_REGS_USER) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002530 OVERFLOW_CHECK_u64(array);
Adrian Hunter5b95a4a32013-08-27 11:23:10 +03002531 data->user_regs.abi = *array;
2532 array++;
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002533
Adrian Hunter5b95a4a32013-08-27 11:23:10 +03002534 if (data->user_regs.abi) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002535 u64 mask = evsel->core.attr.sample_regs_user;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002536
Mao Han3a5b64f2019-04-10 16:16:43 +08002537 sz = hweight64(mask) * sizeof(u64);
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002538 OVERFLOW_CHECK(array, sz, max_size);
Jiri Olsa352ea452014-01-07 13:47:25 +01002539 data->user_regs.mask = mask;
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002540 data->user_regs.regs = (u64 *)array;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002541 array = (void *)array + sz;
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002542 }
2543 }
2544
2545 if (type & PERF_SAMPLE_STACK_USER) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002546 OVERFLOW_CHECK_u64(array);
2547 sz = *array++;
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002548
2549 data->user_stack.offset = ((char *)(array - 1)
2550 - (char *) event);
2551
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002552 if (!sz) {
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002553 data->user_stack.size = 0;
2554 } else {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002555 OVERFLOW_CHECK(array, sz, max_size);
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002556 data->user_stack.data = (char *)array;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002557 array = (void *)array + sz;
2558 OVERFLOW_CHECK_u64(array);
Adrian Hunter54bd2692013-07-04 16:20:34 +03002559 data->user_stack.size = *array++;
Jiri Olsaa65cb4b2013-10-02 15:46:39 +02002560 if (WARN_ONCE(data->user_stack.size > sz,
2561 "user stack dump failure\n"))
2562 return -EFAULT;
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002563 }
2564 }
2565
Kan Liangea8d0ed2021-02-02 12:09:09 -08002566 if (type & PERF_SAMPLE_WEIGHT_TYPE) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002567 OVERFLOW_CHECK_u64(array);
Kan Liangfbefe9c2021-02-05 08:01:52 -08002568 arch_perf_parse_sample_weight(data, array, type);
Andi Kleen05484292013-01-24 16:10:29 +01002569 array++;
2570 }
2571
Stephane Eranian98a3b322013-01-24 16:10:35 +01002572 if (type & PERF_SAMPLE_DATA_SRC) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03002573 OVERFLOW_CHECK_u64(array);
Stephane Eranian98a3b322013-01-24 16:10:35 +01002574 data->data_src = *array;
2575 array++;
2576 }
2577
Andi Kleen475eeab2013-09-20 07:40:43 -07002578 if (type & PERF_SAMPLE_TRANSACTION) {
Adrian Hunter87b95522013-11-01 15:51:36 +02002579 OVERFLOW_CHECK_u64(array);
Andi Kleen475eeab2013-09-20 07:40:43 -07002580 data->transaction = *array;
2581 array++;
2582 }
2583
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02002584 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
2585 if (type & PERF_SAMPLE_REGS_INTR) {
2586 OVERFLOW_CHECK_u64(array);
2587 data->intr_regs.abi = *array;
2588 array++;
2589
2590 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002591 u64 mask = evsel->core.attr.sample_regs_intr;
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02002592
Mao Han3a5b64f2019-04-10 16:16:43 +08002593 sz = hweight64(mask) * sizeof(u64);
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02002594 OVERFLOW_CHECK(array, sz, max_size);
2595 data->intr_regs.mask = mask;
2596 data->intr_regs.regs = (u64 *)array;
2597 array = (void *)array + sz;
2598 }
2599 }
2600
Kan Liang3b0a5da2017-08-29 13:11:08 -04002601 data->phys_addr = 0;
2602 if (type & PERF_SAMPLE_PHYS_ADDR) {
2603 data->phys_addr = *array;
2604 array++;
2605 }
2606
Namhyung Kimba78c1c2020-03-25 21:45:30 +09002607 data->cgroup = 0;
2608 if (type & PERF_SAMPLE_CGROUP) {
2609 data->cgroup = *array;
2610 array++;
2611 }
2612
Kan Liang542b88f2020-11-30 09:27:53 -08002613 data->data_page_size = 0;
2614 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
2615 data->data_page_size = *array;
2616 array++;
2617 }
2618
Kan Liangc1de7f32021-01-05 11:57:49 -08002619 data->code_page_size = 0;
2620 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
2621 data->code_page_size = *array;
2622 array++;
2623 }
2624
Adrian Hunter98dcf142019-11-15 14:42:11 +02002625 if (type & PERF_SAMPLE_AUX) {
2626 OVERFLOW_CHECK_u64(array);
2627 sz = *array++;
2628
2629 OVERFLOW_CHECK(array, sz, max_size);
2630 /* Undo swap of data */
2631 if (swapped)
2632 mem_bswap_64((char *)array, sz);
2633 data->aux_sample.size = sz;
2634 data->aux_sample.data = (char *)array;
2635 array = (void *)array + sz;
2636 }
2637
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02002638 return 0;
2639}
Andrew Vagin74eec262011-11-28 12:03:31 +03002640
Arnaldo Carvalho de Melo6b6017a2020-04-30 11:03:49 -03002641int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
2642 u64 *timestamp)
Jiri Olsa01468122017-08-03 13:10:28 +02002643{
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002644 u64 type = evsel->core.attr.sample_type;
Jiri Olsab1fcd192019-08-25 20:17:52 +02002645 const __u64 *array;
Jiri Olsa01468122017-08-03 13:10:28 +02002646
2647 if (!(type & PERF_SAMPLE_TIME))
2648 return -1;
2649
2650 if (event->header.type != PERF_RECORD_SAMPLE) {
2651 struct perf_sample data = {
2652 .time = -1ULL,
2653 };
2654
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002655 if (!evsel->core.attr.sample_id_all)
Jiri Olsa01468122017-08-03 13:10:28 +02002656 return -1;
2657 if (perf_evsel__parse_id_sample(evsel, event, &data))
2658 return -1;
2659
2660 *timestamp = data.time;
2661 return 0;
2662 }
2663
2664 array = event->sample.array;
2665
2666 if (perf_event__check_size(event, evsel->sample_size))
2667 return -EFAULT;
2668
2669 if (type & PERF_SAMPLE_IDENTIFIER)
2670 array++;
2671
2672 if (type & PERF_SAMPLE_IP)
2673 array++;
2674
2675 if (type & PERF_SAMPLE_TID)
2676 array++;
2677
2678 if (type & PERF_SAMPLE_TIME)
2679 *timestamp = *array;
2680
2681 return 0;
2682}
2683
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03002684struct tep_format_field *evsel__field(struct evsel *evsel, const char *name)
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03002685{
Tzvetomir Stoyanov (VMware)af85cd12018-08-08 14:02:50 -04002686 return tep_find_field(evsel->tp_format, name);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03002687}
2688
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03002689void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name)
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03002690{
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03002691 struct tep_format_field *field = evsel__field(evsel, name);
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03002692 int offset;
2693
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03002694 if (!field)
2695 return NULL;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03002696
2697 offset = field->offset;
2698
Tzvetomir Stoyanov (VMware)bb39ccb2018-09-19 14:56:46 -04002699 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03002700 offset = *(int *)(sample->raw_data + field->offset);
2701 offset &= 0xffff;
Masami Hiramatsu7c689c82021-11-22 18:30:48 +09002702 if (field->flags & TEP_FIELD_IS_RELATIVE)
2703 offset += field->offset + field->size;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03002704 }
2705
2706 return sample->raw_data + offset;
2707}
2708
Tzvetomir Stoyanov (VMware)2c92f982018-09-19 14:56:45 -04002709u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample,
Arnaldo Carvalho de Melo90525172016-05-31 12:47:46 -03002710 bool needs_swap)
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03002711{
Arnaldo Carvalho de Meloe6b6f672012-09-26 13:13:04 -03002712 u64 value;
Arnaldo Carvalho de Melo90525172016-05-31 12:47:46 -03002713 void *ptr = sample->raw_data + field->offset;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03002714
Arnaldo Carvalho de Meloe6b6f672012-09-26 13:13:04 -03002715 switch (field->size) {
2716 case 1:
2717 return *(u8 *)ptr;
2718 case 2:
2719 value = *(u16 *)ptr;
2720 break;
2721 case 4:
2722 value = *(u32 *)ptr;
2723 break;
2724 case 8:
David Aherne94eeda2015-03-24 16:14:09 -04002725 memcpy(&value, ptr, sizeof(u64));
Arnaldo Carvalho de Meloe6b6f672012-09-26 13:13:04 -03002726 break;
2727 default:
2728 return 0;
2729 }
2730
Arnaldo Carvalho de Melo90525172016-05-31 12:47:46 -03002731 if (!needs_swap)
Arnaldo Carvalho de Meloe6b6f672012-09-26 13:13:04 -03002732 return value;
2733
2734 switch (field->size) {
2735 case 2:
2736 return bswap_16(value);
2737 case 4:
2738 return bswap_32(value);
2739 case 8:
2740 return bswap_64(value);
2741 default:
2742 return 0;
2743 }
2744
2745 return 0;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03002746}
Arnaldo Carvalho de Melo0698aed2012-12-10 18:17:08 -03002747
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03002748u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name)
Arnaldo Carvalho de Melo90525172016-05-31 12:47:46 -03002749{
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03002750 struct tep_format_field *field = evsel__field(evsel, name);
Arnaldo Carvalho de Melo90525172016-05-31 12:47:46 -03002751
2752 if (!field)
2753 return 0;
2754
2755 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
2756}
2757
Arnaldo Carvalho de Meloae430892020-04-30 11:46:15 -03002758bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03002759{
Arnaldo Carvalho de Melo08094822016-05-12 16:07:47 -03002760 int paranoid;
2761
David Ahern2b821cc2013-07-18 17:27:59 -06002762 if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002763 evsel->core.attr.type == PERF_TYPE_HARDWARE &&
2764 evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03002765 /*
2766 * If it's cycles then fall back to hrtimer based
2767 * cpu-clock-tick sw counter, which is always available even if
2768 * no PMU support.
2769 *
2770 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2771 * b0a873e).
2772 */
2773 scnprintf(msg, msgsize, "%s",
2774"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2775
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002776 evsel->core.attr.type = PERF_TYPE_SOFTWARE;
2777 evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK;
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03002778
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -03002779 zfree(&evsel->name);
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03002780 return true;
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002781 } else if (err == EACCES && !evsel->core.attr.exclude_kernel &&
Arnaldo Carvalho de Melo08094822016-05-12 16:07:47 -03002782 (paranoid = perf_event_paranoid()) > 1) {
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -03002783 const char *name = evsel__name(evsel);
Arnaldo Carvalho de Melo08094822016-05-12 16:07:47 -03002784 char *new_name;
Jiri Olsa129193b2018-04-23 11:08:17 +02002785 const char *sep = ":";
Arnaldo Carvalho de Melo08094822016-05-12 16:07:47 -03002786
Stephane Eranianbec49a92020-04-14 09:15:50 -07002787 /* If event has exclude user then don't exclude kernel. */
2788 if (evsel->core.attr.exclude_user)
2789 return false;
2790
Jiri Olsa129193b2018-04-23 11:08:17 +02002791 /* Is there already the separator in the name. */
2792 if (strchr(name, '/') ||
Stephane Eranian70943492020-05-05 11:29:43 -07002793 (strchr(name, ':') && !evsel->is_libpfm_event))
Jiri Olsa129193b2018-04-23 11:08:17 +02002794 sep = "";
2795
2796 if (asprintf(&new_name, "%s%su", name, sep) < 0)
Arnaldo Carvalho de Melo08094822016-05-12 16:07:47 -03002797 return false;
2798
2799 if (evsel->name)
2800 free(evsel->name);
2801 evsel->name = new_name;
Stephane Eranian4ec8d982019-09-20 16:03:56 -07002802 scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying "
2803 "to fall back to excluding kernel and hypervisor "
2804 " samples", paranoid);
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002805 evsel->core.attr.exclude_kernel = 1;
Stephane Eranian4ec8d982019-09-20 16:03:56 -07002806 evsel->core.attr.exclude_hv = 1;
Arnaldo Carvalho de Melo08094822016-05-12 16:07:47 -03002807
2808 return true;
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03002809 }
2810
2811 return false;
2812}
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002813
Arnaldo Carvalho de Melo2157f6e2017-06-20 12:05:38 -03002814static bool find_process(const char *name)
2815{
2816 size_t len = strlen(name);
2817 DIR *dir;
2818 struct dirent *d;
2819 int ret = -1;
2820
2821 dir = opendir(procfs__mountpoint());
2822 if (!dir)
2823 return false;
2824
2825 /* Walk through the directory. */
2826 while (ret && (d = readdir(dir)) != NULL) {
2827 char path[PATH_MAX];
2828 char *data;
2829 size_t size;
2830
2831 if ((d->d_type != DT_DIR) ||
2832 !strcmp(".", d->d_name) ||
2833 !strcmp("..", d->d_name))
2834 continue;
2835
2836 scnprintf(path, sizeof(path), "%s/%s/comm",
2837 procfs__mountpoint(), d->d_name);
2838
2839 if (filename__read_str(path, &data, &size))
2840 continue;
2841
2842 ret = strncmp(name, data, len);
2843 free(data);
2844 }
2845
2846 closedir(dir);
2847 return ret ? false : true;
2848}
2849
Arnaldo Carvalho de Melo2bb72db2020-05-04 13:43:03 -03002850int evsel__open_strerror(struct evsel *evsel, struct target *target,
2851 int err, char *msg, size_t size)
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002852{
Masami Hiramatsu6e81c742014-08-14 02:22:36 +00002853 char sbuf[STRERR_BUFSIZE];
Alexey Budankovc1034eb2020-04-30 10:15:57 +03002854 int printed = 0, enforced = 0;
Masami Hiramatsu6e81c742014-08-14 02:22:36 +00002855
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002856 switch (err) {
2857 case EPERM:
2858 case EACCES:
Alexey Budankovc1034eb2020-04-30 10:15:57 +03002859 printed += scnprintf(msg + printed, size - printed,
2860 "Access to performance monitoring and observability operations is limited.\n");
2861
2862 if (!sysfs__read_int("fs/selinux/enforce", &enforced)) {
2863 if (enforced) {
2864 printed += scnprintf(msg + printed, size - printed,
2865 "Enforced MAC policy settings (SELinux) can limit access to performance\n"
2866 "monitoring and observability operations. Inspect system audit records for\n"
2867 "more perf_event access control information and adjusting the policy.\n");
2868 }
2869 }
2870
Jin Yao32ccb132017-04-07 20:08:52 +08002871 if (err == EPERM)
Alexey Budankovc1034eb2020-04-30 10:15:57 +03002872 printed += scnprintf(msg, size,
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -03002873 "No permission to enable %s event.\n\n", evsel__name(evsel));
Jin Yao32ccb132017-04-07 20:08:52 +08002874
2875 return scnprintf(msg + printed, size - printed,
Alexey Budankovc1034eb2020-04-30 10:15:57 +03002876 "Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open\n"
Alexey Budankov4b0297e2020-08-05 10:31:20 +03002877 "access to performance monitoring and observability operations for processes\n"
2878 "without CAP_PERFMON, CAP_SYS_PTRACE or CAP_SYS_ADMIN Linux capability.\n"
2879 "More information can be found at 'Perf events and tool security' document:\n"
2880 "https://www.kernel.org/doc/html/latest/admin-guide/perf-security.html\n"
Alexey Budankovc1034eb2020-04-30 10:15:57 +03002881 "perf_event_paranoid setting is %d:\n"
Ben Hutchings3379e0c2016-01-19 21:35:15 +00002882 " -1: Allow use of (almost) all events by all users\n"
Konstantin Khlebnikovac0bb6b2017-08-20 14:39:20 +03002883 " Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
Alexey Budankovc1034eb2020-04-30 10:15:57 +03002884 ">= 0: Disallow raw and ftrace function tracepoint access\n"
2885 ">= 1: Disallow CPU event access\n"
2886 ">= 2: Disallow kernel profiling\n"
2887 "To make the adjusted perf_event_paranoid setting permanent preserve it\n"
2888 "in /etc/sysctl.conf (e.g. kernel.perf_event_paranoid = <setting>)",
2889 perf_event_paranoid());
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002890 case ENOENT:
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -03002891 return scnprintf(msg, size, "The %s event is not supported.", evsel__name(evsel));
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002892 case EMFILE:
2893 return scnprintf(msg, size, "%s",
2894 "Too many events are opened.\n"
Jiri Olsa18ffdfe2015-05-25 22:51:54 +02002895 "Probably the maximum number of open file descriptors has been reached.\n"
2896 "Hint: Try again after reducing the number of events.\n"
2897 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
Arnaldo Carvalho de Melode46d522016-04-27 17:51:45 -03002898 case ENOMEM:
Arnaldo Carvalho de Melo27de9b22018-05-28 16:00:29 -03002899 if (evsel__has_callchain(evsel) &&
Arnaldo Carvalho de Melode46d522016-04-27 17:51:45 -03002900 access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
2901 return scnprintf(msg, size,
2902 "Not enough memory to setup event with callchain.\n"
2903 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
Arnaldo Carvalho de Melo029c75e2018-05-17 16:31:32 -03002904 "Hint: Current value: %d", sysctl__max_stack());
Arnaldo Carvalho de Melode46d522016-04-27 17:51:45 -03002905 break;
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002906 case ENODEV:
2907 if (target->cpu_list)
2908 return scnprintf(msg, size, "%s",
Arnaldo Carvalho de Melo81d64f42016-04-27 17:56:53 -03002909 "No such device - did you specify an out-of-range profile CPU?");
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002910 break;
2911 case EOPNOTSUPP:
Adrian Hunter2c9a11a2020-07-10 18:10:55 +03002912 if (evsel->core.attr.aux_output)
2913 return scnprintf(msg, size,
2914 "%s: PMU Hardware doesn't support 'aux_output' feature",
2915 evsel__name(evsel));
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002916 if (evsel->core.attr.sample_period != 0)
Kim Phillips114bc192017-11-14 15:04:52 -06002917 return scnprintf(msg, size,
2918 "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -03002919 evsel__name(evsel));
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002920 if (evsel->core.attr.precise_ip)
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002921 return scnprintf(msg, size, "%s",
2922 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2923#if defined(__i386__) || defined(__x86_64__)
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002924 if (evsel->core.attr.type == PERF_TYPE_HARDWARE)
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002925 return scnprintf(msg, size, "%s",
Andi Kleenccbb6af2018-04-06 13:38:12 -07002926 "No hardware sampling interrupt available.\n");
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002927#endif
2928 break;
Jiri Olsa63914ac2014-08-01 17:46:54 +02002929 case EBUSY:
2930 if (find_process("oprofiled"))
2931 return scnprintf(msg, size,
2932 "The PMU counters are busy/taken by another profiler.\n"
2933 "We found oprofile daemon running, please stop it and try again.");
2934 break;
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002935 case EINVAL:
Kan Liangc1de7f32021-01-05 11:57:49 -08002936 if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_size)
2937 return scnprintf(msg, size, "Asking for the code page size isn't supported by this kernel.");
Arnaldo Carvalho de Melo456ef4c12020-12-07 14:04:05 -03002938 if (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE && perf_missing_features.data_page_size)
2939 return scnprintf(msg, size, "Asking for the data page size isn't supported by this kernel.");
Jiri Olsa1fc632c2019-07-21 13:24:29 +02002940 if (evsel->core.attr.write_backward && perf_missing_features.write_backward)
Arnaldo Carvalho de Melo7da36e92016-06-20 10:47:18 +00002941 return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002942 if (perf_missing_features.clockid)
2943 return scnprintf(msg, size, "clockid feature not supported.");
2944 if (perf_missing_features.clockid_wrong)
2945 return scnprintf(msg, size, "wrong clockid (%d).", clockid);
Arnaldo Carvalho de Meloacb9f2d2019-08-13 11:06:38 -03002946 if (perf_missing_features.aux_output)
2947 return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel.");
Ian Rogersdcffc5e2021-12-23 10:39:48 -08002948 if (!target__has_cpu(target))
2949 return scnprintf(msg, size,
2950 "Invalid event (%s) in per-thread mode, enable system wide with '-a'.",
2951 evsel__name(evsel));
Peter Zijlstra814c8c32015-03-31 00:19:31 +02002952 break;
Kan Liang2a57d402021-02-02 12:09:06 -08002953 case ENODATA:
2954 return scnprintf(msg, size, "Cannot collect data source with the load latency event alone. "
2955 "Please add an auxiliary event in front of the load latency event.");
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002956 default:
2957 break;
2958 }
2959
2960 return scnprintf(msg, size,
Masami Hiramatsu6e81c742014-08-14 02:22:36 +00002961 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
Andi Kleenec394842018-04-06 13:38:11 -07002962 "/bin/dmesg | grep -i perf may provide additional information.\n",
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -03002963 err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel));
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002964}
Ravi Bangoriaf4e47f92016-06-30 11:44:19 +05302965
Arnaldo Carvalho de Melo6e6d1d62020-05-04 13:44:03 -03002966struct perf_env *evsel__env(struct evsel *evsel)
Jin Yao69fb09f2017-07-07 13:06:34 +08002967{
Arnaldo Carvalho de Melo5449f132017-12-11 12:46:11 -03002968 if (evsel && evsel->evlist)
2969 return evsel->evlist->env;
Arnaldo Carvalho de Melo9db0e362019-09-30 11:48:32 -03002970 return &perf_env;
Jin Yao69fb09f2017-07-07 13:06:34 +08002971}
Jiri Olsa650d6222018-08-30 08:32:16 +02002972
Jiri Olsa63503db2019-07-21 13:23:52 +02002973static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
Jiri Olsa650d6222018-08-30 08:32:16 +02002974{
Ian Rogers6f844b12022-01-04 22:13:42 -08002975 int cpu_map_idx, thread;
Jiri Olsa650d6222018-08-30 08:32:16 +02002976
Ian Rogers6f844b12022-01-04 22:13:42 -08002977 for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) {
Jiri Olsa9dfcb752019-07-21 13:24:45 +02002978 for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
Jiri Olsa650d6222018-08-30 08:32:16 +02002979 thread++) {
Ian Rogers6f844b12022-01-04 22:13:42 -08002980 int fd = FD(evsel, cpu_map_idx, thread);
Jiri Olsa650d6222018-08-30 08:32:16 +02002981
Jiri Olsad5a99482019-09-03 11:19:56 +02002982 if (perf_evlist__id_add_fd(&evlist->core, &evsel->core,
Ian Rogers6f844b12022-01-04 22:13:42 -08002983 cpu_map_idx, thread, fd) < 0)
Jiri Olsa650d6222018-08-30 08:32:16 +02002984 return -1;
2985 }
2986 }
2987
2988 return 0;
2989}
2990
Arnaldo Carvalho de Melo34397752020-05-04 13:45:19 -03002991int evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
Jiri Olsa650d6222018-08-30 08:32:16 +02002992{
Jiri Olsad400bd32019-07-21 13:24:37 +02002993 struct perf_cpu_map *cpus = evsel->core.cpus;
Jiri Olsaaf663bd2019-07-21 13:24:39 +02002994 struct perf_thread_map *threads = evsel->core.threads;
Jiri Olsa650d6222018-08-30 08:32:16 +02002995
Ian Rogers44028692022-01-21 20:58:10 -08002996 if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr))
Jiri Olsa650d6222018-08-30 08:32:16 +02002997 return -ENOMEM;
2998
2999 return store_evsel_ids(evsel, evlist);
3000}
Jin Yao034f7ee2021-01-28 09:34:17 +08003001
3002void evsel__zero_per_pkg(struct evsel *evsel)
3003{
3004 struct hashmap_entry *cur;
3005 size_t bkt;
3006
3007 if (evsel->per_pkg_mask) {
3008 hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt)
3009 free((char *)cur->key);
3010
3011 hashmap__clear(evsel->per_pkg_mask);
3012 }
3013}
Jin Yao660e5332021-04-27 15:01:29 +08003014
3015bool evsel__is_hybrid(struct evsel *evsel)
3016{
3017 return evsel->pmu_name && perf_pmu__is_hybrid(evsel->pmu_name);
3018}
Jiri Olsafba7c862021-07-06 17:17:00 +02003019
3020struct evsel *evsel__leader(struct evsel *evsel)
3021{
3022 return container_of(evsel->core.leader, struct evsel, core);
3023}
3024
3025bool evsel__has_leader(struct evsel *evsel, struct evsel *leader)
3026{
3027 return evsel->core.leader == &leader->core;
3028}
3029
3030bool evsel__is_leader(struct evsel *evsel)
3031{
3032 return evsel__has_leader(evsel, evsel);
3033}
3034
3035void evsel__set_leader(struct evsel *evsel, struct evsel *leader)
3036{
3037 evsel->core.leader = &leader->core;
3038}
Ian Rogers9aba0ad2021-11-10 16:21:09 -08003039
3040int evsel__source_count(const struct evsel *evsel)
3041{
3042 struct evsel *pos;
3043 int count = 0;
3044
3045 evlist__for_each_entry(evsel->evlist, pos) {
3046 if (pos->metric_leader == evsel)
3047 count++;
3048 }
3049 return count;
3050}