blob: b4c821be4fb5d8f7f3835114aa71c74b7b3b6285 [file] [log] [blame]
Thomas Gleixner1c6bec52019-06-01 10:08:47 +02001// SPDX-License-Identifier: GPL-2.0-only
Namhyung Kimd01f4e82013-03-07 21:45:20 +09002/*
3 * builtin-ftrace.c
4 *
5 * Copyright (c) 2013 LG Electronics, Namhyung Kim <namhyung@kernel.org>
Namhyung Kimd01f4e82013-03-07 21:45:20 +09006 */
7
8#include "builtin.h"
Namhyung Kimd01f4e82013-03-07 21:45:20 +09009
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030010#include <errno.h>
Namhyung Kimd01f4e82013-03-07 21:45:20 +090011#include <unistd.h>
12#include <signal.h>
Arnaldo Carvalho de Melof2a39fe2019-08-30 14:45:20 -030013#include <stdlib.h>
Namhyung Kima9af6be2017-02-24 10:12:48 +090014#include <fcntl.h>
Arnaldo Carvalho de Melo42087352017-04-19 19:06:30 -030015#include <poll.h>
Igor Lubashevc766f3d2019-08-07 10:44:17 -040016#include <linux/capability.h>
Arnaldo Carvalho de Melo8520a982019-08-29 16:18:59 -030017#include <linux/string.h>
Namhyung Kimd01f4e82013-03-07 21:45:20 +090018
19#include "debug.h"
Arnaldo Carvalho de Melo8520a982019-08-29 16:18:59 -030020#include <subcmd/pager.h>
Namhyung Kimd01f4e82013-03-07 21:45:20 +090021#include <subcmd/parse-options.h>
Arnaldo Carvalho de Melo20a9ed22017-04-18 11:44:58 -030022#include <api/fs/tracing_path.h>
Namhyung Kimd01f4e82013-03-07 21:45:20 +090023#include "evlist.h"
24#include "target.h"
Namhyung Kimdc231032017-02-24 10:12:50 +090025#include "cpumap.h"
Namhyung Kimd01f4e82013-03-07 21:45:20 +090026#include "thread_map.h"
Igor Lubashevc766f3d2019-08-07 10:44:17 -040027#include "util/cap.h"
Taeung Songb05d1092017-01-31 20:38:29 +090028#include "util/config.h"
Changbin Du846e1932020-08-08 10:31:27 +080029#include "util/units.h"
Changbin Dub1d84af2020-08-08 10:31:31 +080030#include "util/parse-sublevel-options.h"
Namhyung Kimd01f4e82013-03-07 21:45:20 +090031
Namhyung Kimd01f4e82013-03-07 21:45:20 +090032#define DEFAULT_TRACER "function_graph"
33
34struct perf_ftrace {
Jiri Olsa63503db2019-07-21 13:23:52 +020035 struct evlist *evlist;
Namhyung Kim78b83e82017-06-18 23:23:01 +090036 struct target target;
37 const char *tracer;
Changbin Dud6d81bf2020-08-08 10:31:25 +080038 bool list_avail_functions;
Namhyung Kim78b83e82017-06-18 23:23:01 +090039 struct list_head filters;
40 struct list_head notrace;
41 struct list_head graph_funcs;
42 struct list_head nograph_funcs;
Namhyung Kim1096c352017-06-18 23:23:02 +090043 int graph_depth;
Changbin Du846e1932020-08-08 10:31:27 +080044 unsigned long percpu_buffer_size;
Changbin Du5b3474722020-08-08 10:31:29 +080045 bool inherit;
Changbin Dub1d84af2020-08-08 10:31:31 +080046 int func_stack_trace;
Changbin Du38988f22020-08-08 10:31:32 +080047 int graph_nosleep_time;
Changbin Dud1bcf172020-08-08 10:31:33 +080048 int graph_noirqs;
Namhyung Kim78b83e82017-06-18 23:23:01 +090049};
50
51struct filter_entry {
52 struct list_head list;
53 char name[];
Namhyung Kimd01f4e82013-03-07 21:45:20 +090054};
55
Changbin Du51a09d82020-05-10 23:06:11 +080056static volatile int workload_exec_errno;
Namhyung Kimd01f4e82013-03-07 21:45:20 +090057static bool done;
58
59static void sig_handler(int sig __maybe_unused)
60{
61 done = true;
62}
63
64/*
65 * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
66 * we asked by setting its exec_error to the function below,
67 * ftrace__workload_exec_failed_signal.
68 *
69 * XXX We need to handle this more appropriately, emitting an error, etc.
70 */
71static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
72 siginfo_t *info __maybe_unused,
73 void *ucontext __maybe_unused)
74{
Changbin Du51a09d82020-05-10 23:06:11 +080075 workload_exec_errno = info->si_value.sival_int;
Namhyung Kimd01f4e82013-03-07 21:45:20 +090076 done = true;
77}
78
Namhyung Kima9af6be2017-02-24 10:12:48 +090079static int __write_tracing_file(const char *name, const char *val, bool append)
Namhyung Kimd01f4e82013-03-07 21:45:20 +090080{
81 char *file;
82 int fd, ret = -1;
83 ssize_t size = strlen(val);
Namhyung Kima9af6be2017-02-24 10:12:48 +090084 int flags = O_WRONLY;
Namhyung Kime7bd9ba2017-06-18 23:22:59 +090085 char errbuf[512];
Changbin Du63cd02d2018-02-19 10:33:29 +080086 char *val_copy;
Namhyung Kimd01f4e82013-03-07 21:45:20 +090087
88 file = get_tracing_file(name);
89 if (!file) {
90 pr_debug("cannot get tracing file: %s\n", name);
91 return -1;
92 }
93
Namhyung Kima9af6be2017-02-24 10:12:48 +090094 if (append)
95 flags |= O_APPEND;
96 else
97 flags |= O_TRUNC;
98
99 fd = open(file, flags);
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900100 if (fd < 0) {
Namhyung Kime7bd9ba2017-06-18 23:22:59 +0900101 pr_debug("cannot open tracing file: %s: %s\n",
102 name, str_error_r(errno, errbuf, sizeof(errbuf)));
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900103 goto out;
104 }
105
Changbin Du63cd02d2018-02-19 10:33:29 +0800106 /*
107 * Copy the original value and append a '\n'. Without this,
108 * the kernel can hide possible errors.
109 */
110 val_copy = strdup(val);
111 if (!val_copy)
112 goto out_close;
113 val_copy[size] = '\n';
114
115 if (write(fd, val_copy, size + 1) == size + 1)
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900116 ret = 0;
117 else
Namhyung Kime7bd9ba2017-06-18 23:22:59 +0900118 pr_debug("write '%s' to tracing/%s failed: %s\n",
119 val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900120
Changbin Du63cd02d2018-02-19 10:33:29 +0800121 free(val_copy);
122out_close:
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900123 close(fd);
124out:
125 put_tracing_file(file);
126 return ret;
127}
128
Namhyung Kima9af6be2017-02-24 10:12:48 +0900129static int write_tracing_file(const char *name, const char *val)
130{
131 return __write_tracing_file(name, val, false);
132}
133
134static int append_tracing_file(const char *name, const char *val)
135{
136 return __write_tracing_file(name, val, true);
137}
138
Changbin Dud6d81bf2020-08-08 10:31:25 +0800139static int read_tracing_file_to_stdout(const char *name)
140{
141 char buf[4096];
142 char *file;
143 int fd;
144 int ret = -1;
145
146 file = get_tracing_file(name);
147 if (!file) {
148 pr_debug("cannot get tracing file: %s\n", name);
149 return -1;
150 }
151
152 fd = open(file, O_RDONLY);
153 if (fd < 0) {
154 pr_debug("cannot open tracing file: %s: %s\n",
155 name, str_error_r(errno, buf, sizeof(buf)));
156 goto out;
157 }
158
159 /* read contents to stdout */
160 while (true) {
161 int n = read(fd, buf, sizeof(buf));
162 if (n == 0)
163 break;
164 else if (n < 0)
165 goto out_close;
166
167 if (fwrite(buf, n, 1, stdout) != 1)
168 goto out_close;
169 }
170 ret = 0;
171
172out_close:
173 close(fd);
174out:
175 put_tracing_file(file);
176 return ret;
177}
178
Changbin Du68faab02020-08-08 10:31:26 +0800179static int write_tracing_file_int(const char *name, int value)
180{
181 char buf[16];
182
183 snprintf(buf, sizeof(buf), "%d", value);
184 if (write_tracing_file(name, buf) < 0)
185 return -1;
186
187 return 0;
188}
189
Changbin Du5b3474722020-08-08 10:31:29 +0800190static int write_tracing_option_file(const char *name, const char *val)
191{
192 char *file;
193 int ret;
194
195 if (asprintf(&file, "options/%s", name) < 0)
196 return -1;
197
198 ret = __write_tracing_file(file, val, false);
199 free(file);
200 return ret;
201}
202
Namhyung Kimdc231032017-02-24 10:12:50 +0900203static int reset_tracing_cpu(void);
Namhyung Kim78b83e82017-06-18 23:23:01 +0900204static void reset_tracing_filters(void);
Namhyung Kimdc231032017-02-24 10:12:50 +0900205
Changbin Du5b3474722020-08-08 10:31:29 +0800206static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused)
207{
208 write_tracing_option_file("function-fork", "0");
Changbin Dub1d84af2020-08-08 10:31:31 +0800209 write_tracing_option_file("func_stack_trace", "0");
Changbin Du38988f22020-08-08 10:31:32 +0800210 write_tracing_option_file("sleep-time", "1");
Changbin Dud1bcf172020-08-08 10:31:33 +0800211 write_tracing_option_file("funcgraph-irqs", "1");
Changbin Du5b3474722020-08-08 10:31:29 +0800212}
213
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900214static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
215{
216 if (write_tracing_file("tracing_on", "0") < 0)
217 return -1;
218
219 if (write_tracing_file("current_tracer", "nop") < 0)
220 return -1;
221
222 if (write_tracing_file("set_ftrace_pid", " ") < 0)
223 return -1;
224
Namhyung Kimdc231032017-02-24 10:12:50 +0900225 if (reset_tracing_cpu() < 0)
226 return -1;
227
Namhyung Kim1096c352017-06-18 23:23:02 +0900228 if (write_tracing_file("max_graph_depth", "0") < 0)
229 return -1;
230
Namhyung Kim78b83e82017-06-18 23:23:01 +0900231 reset_tracing_filters();
Changbin Du5b3474722020-08-08 10:31:29 +0800232 reset_tracing_options(ftrace);
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900233 return 0;
234}
235
Namhyung Kima9af6be2017-02-24 10:12:48 +0900236static int set_tracing_pid(struct perf_ftrace *ftrace)
237{
238 int i;
239 char buf[16];
240
241 if (target__has_cpu(&ftrace->target))
242 return 0;
243
Jiri Olsaa2f354e2019-08-22 13:11:41 +0200244 for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) {
Namhyung Kima9af6be2017-02-24 10:12:48 +0900245 scnprintf(buf, sizeof(buf), "%d",
Jiri Olsa03617c22019-07-21 13:24:42 +0200246 ftrace->evlist->core.threads->map[i]);
Namhyung Kima9af6be2017-02-24 10:12:48 +0900247 if (append_tracing_file("set_ftrace_pid", buf) < 0)
248 return -1;
249 }
250 return 0;
251}
252
Jiri Olsaf8548392019-07-21 13:23:49 +0200253static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
Namhyung Kimdc231032017-02-24 10:12:50 +0900254{
255 char *cpumask;
256 size_t mask_size;
257 int ret;
258 int last_cpu;
259
260 last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
He Zhecf30ae72019-08-02 16:29:51 +0800261 mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
Namhyung Kimdc231032017-02-24 10:12:50 +0900262 mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
263
264 cpumask = malloc(mask_size);
265 if (cpumask == NULL) {
266 pr_debug("failed to allocate cpu mask\n");
267 return -1;
268 }
269
270 cpu_map__snprint_mask(cpumap, cpumask, mask_size);
271
272 ret = write_tracing_file("tracing_cpumask", cpumask);
273
274 free(cpumask);
275 return ret;
276}
277
278static int set_tracing_cpu(struct perf_ftrace *ftrace)
279{
Jiri Olsaf72f9012019-07-21 13:24:41 +0200280 struct perf_cpu_map *cpumap = ftrace->evlist->core.cpus;
Namhyung Kimdc231032017-02-24 10:12:50 +0900281
282 if (!target__has_cpu(&ftrace->target))
283 return 0;
284
285 return set_tracing_cpumask(cpumap);
286}
287
Changbin Dub1d84af2020-08-08 10:31:31 +0800288static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace)
289{
290 if (!ftrace->func_stack_trace)
291 return 0;
292
293 if (write_tracing_option_file("func_stack_trace", "1") < 0)
294 return -1;
295
296 return 0;
297}
298
Namhyung Kimdc231032017-02-24 10:12:50 +0900299static int reset_tracing_cpu(void)
300{
Jiri Olsa9c3516d2019-07-21 13:24:30 +0200301 struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
Namhyung Kimdc231032017-02-24 10:12:50 +0900302 int ret;
303
304 ret = set_tracing_cpumask(cpumap);
Jiri Olsa38f01d82019-07-21 13:24:17 +0200305 perf_cpu_map__put(cpumap);
Namhyung Kimdc231032017-02-24 10:12:50 +0900306 return ret;
307}
308
Namhyung Kim78b83e82017-06-18 23:23:01 +0900309static int __set_tracing_filter(const char *filter_file, struct list_head *funcs)
310{
311 struct filter_entry *pos;
312
313 list_for_each_entry(pos, funcs, list) {
314 if (append_tracing_file(filter_file, pos->name) < 0)
315 return -1;
316 }
317
318 return 0;
319}
320
321static int set_tracing_filters(struct perf_ftrace *ftrace)
322{
323 int ret;
324
325 ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters);
326 if (ret < 0)
327 return ret;
328
329 ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace);
330 if (ret < 0)
331 return ret;
332
333 ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs);
334 if (ret < 0)
335 return ret;
336
337 /* old kernels do not have this filter */
338 __set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs);
339
340 return ret;
341}
342
343static void reset_tracing_filters(void)
344{
345 write_tracing_file("set_ftrace_filter", " ");
346 write_tracing_file("set_ftrace_notrace", " ");
347 write_tracing_file("set_graph_function", " ");
348 write_tracing_file("set_graph_notrace", " ");
349}
350
Namhyung Kim1096c352017-06-18 23:23:02 +0900351static int set_tracing_depth(struct perf_ftrace *ftrace)
352{
Namhyung Kim1096c352017-06-18 23:23:02 +0900353 if (ftrace->graph_depth == 0)
354 return 0;
355
356 if (ftrace->graph_depth < 0) {
357 pr_err("invalid graph depth: %d\n", ftrace->graph_depth);
358 return -1;
359 }
360
Changbin Du68faab02020-08-08 10:31:26 +0800361 if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0)
Namhyung Kim1096c352017-06-18 23:23:02 +0900362 return -1;
363
364 return 0;
365}
366
Changbin Du846e1932020-08-08 10:31:27 +0800367static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace)
368{
369 int ret;
370
371 if (ftrace->percpu_buffer_size == 0)
372 return 0;
373
374 ret = write_tracing_file_int("buffer_size_kb",
375 ftrace->percpu_buffer_size / 1024);
376 if (ret < 0)
377 return ret;
378
379 return 0;
380}
381
Changbin Du5b3474722020-08-08 10:31:29 +0800382static int set_tracing_trace_inherit(struct perf_ftrace *ftrace)
383{
384 if (!ftrace->inherit)
385 return 0;
386
387 if (write_tracing_option_file("function-fork", "1") < 0)
388 return -1;
389
390 return 0;
391}
392
Changbin Du38988f22020-08-08 10:31:32 +0800393static int set_tracing_sleep_time(struct perf_ftrace *ftrace)
394{
395 if (!ftrace->graph_nosleep_time)
396 return 0;
397
398 if (write_tracing_option_file("sleep-time", "0") < 0)
399 return -1;
400
401 return 0;
402}
403
Changbin Dud1bcf172020-08-08 10:31:33 +0800404static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace)
405{
406 if (!ftrace->graph_noirqs)
407 return 0;
408
409 if (write_tracing_option_file("funcgraph-irqs", "0") < 0)
410 return -1;
411
412 return 0;
413}
414
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900415static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
416{
417 char *trace_file;
418 int trace_fd;
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900419 char buf[4096];
420 struct pollfd pollfd = {
421 .events = POLLIN,
422 };
423
Alexey Budankov6b3e0e22020-04-02 11:47:35 +0300424 if (!(perf_cap__capable(CAP_PERFMON) ||
425 perf_cap__capable(CAP_SYS_ADMIN))) {
Arnaldo Carvalho de Melo73e5de72019-08-12 17:27:11 -0300426 pr_err("ftrace only works for %s!\n",
427#ifdef HAVE_LIBCAP_SUPPORT
Alexey Budankov6b3e0e22020-04-02 11:47:35 +0300428 "users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
Arnaldo Carvalho de Melo73e5de72019-08-12 17:27:11 -0300429#else
430 "root"
431#endif
432 );
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900433 return -1;
434 }
435
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900436 signal(SIGINT, sig_handler);
437 signal(SIGUSR1, sig_handler);
438 signal(SIGCHLD, sig_handler);
Namhyung Kim58335962017-02-24 10:12:51 +0900439 signal(SIGPIPE, sig_handler);
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900440
Changbin Dud6d81bf2020-08-08 10:31:25 +0800441 if (ftrace->list_avail_functions)
442 return read_tracing_file_to_stdout("available_filter_functions");
443
Changbin Du63cd02d2018-02-19 10:33:29 +0800444 if (reset_tracing_files(ftrace) < 0) {
445 pr_err("failed to reset ftrace\n");
Namhyung Kima9af6be2017-02-24 10:12:48 +0900446 goto out;
Changbin Du63cd02d2018-02-19 10:33:29 +0800447 }
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900448
449 /* reset ftrace buffer */
450 if (write_tracing_file("trace", "0") < 0)
451 goto out;
452
Namhyung Kima9af6be2017-02-24 10:12:48 +0900453 if (argc && perf_evlist__prepare_workload(ftrace->evlist,
454 &ftrace->target, argv, false,
455 ftrace__workload_exec_failed_signal) < 0) {
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900456 goto out;
Namhyung Kima9af6be2017-02-24 10:12:48 +0900457 }
458
459 if (set_tracing_pid(ftrace) < 0) {
460 pr_err("failed to set ftrace pid\n");
461 goto out_reset;
462 }
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900463
Namhyung Kimdc231032017-02-24 10:12:50 +0900464 if (set_tracing_cpu(ftrace) < 0) {
465 pr_err("failed to set tracing cpumask\n");
466 goto out_reset;
467 }
468
Changbin Dub1d84af2020-08-08 10:31:31 +0800469 if (set_tracing_func_stack_trace(ftrace) < 0) {
470 pr_err("failed to set tracing option func_stack_trace\n");
471 goto out_reset;
472 }
473
Namhyung Kim78b83e82017-06-18 23:23:01 +0900474 if (set_tracing_filters(ftrace) < 0) {
475 pr_err("failed to set tracing filters\n");
476 goto out_reset;
477 }
478
Namhyung Kim1096c352017-06-18 23:23:02 +0900479 if (set_tracing_depth(ftrace) < 0) {
480 pr_err("failed to set graph depth\n");
481 goto out_reset;
482 }
483
Changbin Du846e1932020-08-08 10:31:27 +0800484 if (set_tracing_percpu_buffer_size(ftrace) < 0) {
485 pr_err("failed to set tracing per-cpu buffer size\n");
486 goto out_reset;
487 }
488
Changbin Du5b3474722020-08-08 10:31:29 +0800489 if (set_tracing_trace_inherit(ftrace) < 0) {
490 pr_err("failed to set tracing option function-fork\n");
491 goto out_reset;
492 }
493
Changbin Du38988f22020-08-08 10:31:32 +0800494 if (set_tracing_sleep_time(ftrace) < 0) {
495 pr_err("failed to set tracing option sleep-time\n");
496 goto out_reset;
497 }
498
Changbin Dud1bcf172020-08-08 10:31:33 +0800499 if (set_tracing_funcgraph_irqs(ftrace) < 0) {
500 pr_err("failed to set tracing option funcgraph-irqs\n");
501 goto out_reset;
502 }
503
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900504 if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
505 pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
Namhyung Kima9af6be2017-02-24 10:12:48 +0900506 goto out_reset;
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900507 }
508
Namhyung Kim29681bc2017-06-18 23:23:00 +0900509 setup_pager();
510
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900511 trace_file = get_tracing_file("trace_pipe");
512 if (!trace_file) {
513 pr_err("failed to open trace_pipe\n");
Namhyung Kima9af6be2017-02-24 10:12:48 +0900514 goto out_reset;
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900515 }
516
517 trace_fd = open(trace_file, O_RDONLY);
518
519 put_tracing_file(trace_file);
520
521 if (trace_fd < 0) {
522 pr_err("failed to open trace_pipe\n");
Namhyung Kima9af6be2017-02-24 10:12:48 +0900523 goto out_reset;
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900524 }
525
526 fcntl(trace_fd, F_SETFL, O_NONBLOCK);
527 pollfd.fd = trace_fd;
528
Changbin Du81523c12020-08-08 10:31:28 +0800529 /* display column headers */
530 read_tracing_file_to_stdout("trace");
531
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900532 if (write_tracing_file("tracing_on", "1") < 0) {
533 pr_err("can't enable tracing\n");
534 goto out_close_fd;
535 }
536
537 perf_evlist__start_workload(ftrace->evlist);
538
539 while (!done) {
540 if (poll(&pollfd, 1, -1) < 0)
541 break;
542
543 if (pollfd.revents & POLLIN) {
544 int n = read(trace_fd, buf, sizeof(buf));
545 if (n < 0)
546 break;
547 if (fwrite(buf, n, 1, stdout) != 1)
548 break;
549 }
550 }
551
552 write_tracing_file("tracing_on", "0");
553
Changbin Du51a09d82020-05-10 23:06:11 +0800554 if (workload_exec_errno) {
555 const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
556 /* flush stdout first so below error msg appears at the end. */
557 fflush(stdout);
558 pr_err("workload failed: %s\n", emsg);
559 goto out_close_fd;
560 }
561
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900562 /* read remaining buffer contents */
563 while (true) {
564 int n = read(trace_fd, buf, sizeof(buf));
565 if (n <= 0)
566 break;
567 if (fwrite(buf, n, 1, stdout) != 1)
568 break;
569 }
570
571out_close_fd:
572 close(trace_fd);
Namhyung Kima9af6be2017-02-24 10:12:48 +0900573out_reset:
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900574 reset_tracing_files(ftrace);
Namhyung Kima9af6be2017-02-24 10:12:48 +0900575out:
Changbin Du51a09d82020-05-10 23:06:11 +0800576 return (done && !workload_exec_errno) ? 0 : -1;
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900577}
578
Taeung Songb05d1092017-01-31 20:38:29 +0900579static int perf_ftrace_config(const char *var, const char *value, void *cb)
580{
581 struct perf_ftrace *ftrace = cb;
582
Arnaldo Carvalho de Melo8e99b6d2017-07-20 15:27:39 -0300583 if (!strstarts(var, "ftrace."))
Taeung Songb05d1092017-01-31 20:38:29 +0900584 return 0;
585
586 if (strcmp(var, "ftrace.tracer"))
587 return -1;
588
589 if (!strcmp(value, "function_graph") ||
590 !strcmp(value, "function")) {
591 ftrace->tracer = value;
592 return 0;
593 }
594
595 pr_err("Please select \"function_graph\" (default) or \"function\"\n");
596 return -1;
597}
598
Namhyung Kim78b83e82017-06-18 23:23:01 +0900599static int parse_filter_func(const struct option *opt, const char *str,
600 int unset __maybe_unused)
601{
602 struct list_head *head = opt->value;
603 struct filter_entry *entry;
604
605 entry = malloc(sizeof(*entry) + strlen(str) + 1);
606 if (entry == NULL)
607 return -ENOMEM;
608
609 strcpy(entry->name, str);
610 list_add_tail(&entry->list, head);
611
612 return 0;
613}
614
615static void delete_filter_func(struct list_head *head)
616{
617 struct filter_entry *pos, *tmp;
618
619 list_for_each_entry_safe(pos, tmp, head, list) {
Arnaldo Carvalho de Meloe56fbc92019-07-04 12:13:46 -0300620 list_del_init(&pos->list);
Namhyung Kim78b83e82017-06-18 23:23:01 +0900621 free(pos);
622 }
623}
624
Changbin Du846e1932020-08-08 10:31:27 +0800625static int parse_buffer_size(const struct option *opt,
626 const char *str, int unset)
627{
628 unsigned long *s = (unsigned long *)opt->value;
629 static struct parse_tag tags_size[] = {
630 { .tag = 'B', .mult = 1 },
631 { .tag = 'K', .mult = 1 << 10 },
632 { .tag = 'M', .mult = 1 << 20 },
633 { .tag = 'G', .mult = 1 << 30 },
634 { .tag = 0 },
635 };
636 unsigned long val;
637
638 if (unset) {
639 *s = 0;
640 return 0;
641 }
642
643 val = parse_tag_value(str, tags_size);
644 if (val != (unsigned long) -1) {
645 if (val < 1024) {
646 pr_err("buffer size too small, must larger than 1KB.");
647 return -1;
648 }
649 *s = val;
650 return 0;
651 }
652
653 return -1;
654}
655
Changbin Dub1d84af2020-08-08 10:31:31 +0800656static int parse_func_tracer_opts(const struct option *opt,
657 const char *str, int unset)
658{
659 int ret;
660 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
661 struct sublevel_option func_tracer_opts[] = {
662 { .name = "call-graph", .value_ptr = &ftrace->func_stack_trace },
663 { .name = NULL, }
664 };
665
666 if (unset)
667 return 0;
668
669 ret = perf_parse_sublevel_options(str, func_tracer_opts);
670 if (ret)
671 return ret;
672
673 return 0;
674}
675
Changbin Du38988f22020-08-08 10:31:32 +0800676static int parse_graph_tracer_opts(const struct option *opt,
677 const char *str, int unset)
678{
679 int ret;
680 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
681 struct sublevel_option graph_tracer_opts[] = {
682 { .name = "nosleep-time", .value_ptr = &ftrace->graph_nosleep_time },
Changbin Dud1bcf172020-08-08 10:31:33 +0800683 { .name = "noirqs", .value_ptr = &ftrace->graph_noirqs },
Changbin Du38988f22020-08-08 10:31:32 +0800684 { .name = NULL, }
685 };
686
687 if (unset)
688 return 0;
689
690 ret = perf_parse_sublevel_options(str, graph_tracer_opts);
691 if (ret)
692 return ret;
693
694 return 0;
695}
696
Changbin Dueb6d31a2020-08-08 10:31:24 +0800697static void select_tracer(struct perf_ftrace *ftrace)
698{
699 bool graph = !list_empty(&ftrace->graph_funcs) ||
700 !list_empty(&ftrace->nograph_funcs);
701 bool func = !list_empty(&ftrace->filters) ||
702 !list_empty(&ftrace->notrace);
703
704 /* The function_graph has priority over function tracer. */
705 if (graph)
706 ftrace->tracer = "function_graph";
707 else if (func)
708 ftrace->tracer = "function";
709 /* Otherwise, the default tracer is used. */
710
711 pr_debug("%s tracer is used\n", ftrace->tracer);
712}
713
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -0300714int cmd_ftrace(int argc, const char **argv)
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900715{
716 int ret;
717 struct perf_ftrace ftrace = {
Taeung Songbf062bd2017-01-26 18:35:37 +0900718 .tracer = DEFAULT_TRACER,
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900719 .target = { .uid = UINT_MAX, },
720 };
721 const char * const ftrace_usage[] = {
Namhyung Kima9af6be2017-02-24 10:12:48 +0900722 "perf ftrace [<options>] [<command>]",
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900723 "perf ftrace [<options>] -- <command> [<options>]",
724 NULL
725 };
726 const struct option ftrace_options[] = {
727 OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
Arnaldo Carvalho de Meloec347872017-01-18 21:49:14 -0300728 "tracer to use: function_graph(default) or function"),
Changbin Dud6d81bf2020-08-08 10:31:25 +0800729 OPT_BOOLEAN('F', "funcs", &ftrace.list_avail_functions,
730 "Show available functions to filter"),
Namhyung Kima9af6be2017-02-24 10:12:48 +0900731 OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
732 "trace on existing process id"),
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900733 OPT_INCR('v', "verbose", &verbose,
734 "be more verbose"),
Namhyung Kimdc231032017-02-24 10:12:50 +0900735 OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
736 "system-wide collection from all CPUs"),
737 OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
738 "list of cpus to monitor"),
Namhyung Kim78b83e82017-06-18 23:23:01 +0900739 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
Changbin Dueb6d31a2020-08-08 10:31:24 +0800740 "trace given functions using function tracer",
741 parse_filter_func),
Namhyung Kim78b83e82017-06-18 23:23:01 +0900742 OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
743 "do not trace given functions", parse_filter_func),
Changbin Dub1d84af2020-08-08 10:31:31 +0800744 OPT_CALLBACK(0, "func-opts", &ftrace, "options",
745 "function tracer options, available options: call-graph",
746 parse_func_tracer_opts),
Namhyung Kim78b83e82017-06-18 23:23:01 +0900747 OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
Changbin Dueb6d31a2020-08-08 10:31:24 +0800748 "trace given functions using function_graph tracer",
749 parse_filter_func),
Namhyung Kim78b83e82017-06-18 23:23:01 +0900750 OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
751 "Set nograph filter on given functions", parse_filter_func),
Namhyung Kim1096c352017-06-18 23:23:02 +0900752 OPT_INTEGER('D', "graph-depth", &ftrace.graph_depth,
753 "Max depth for function graph tracer"),
Changbin Du38988f22020-08-08 10:31:32 +0800754 OPT_CALLBACK(0, "graph-opts", &ftrace, "options",
Changbin Dud1bcf172020-08-08 10:31:33 +0800755 "graph tracer options, available options: nosleep-time,noirqs",
Changbin Du38988f22020-08-08 10:31:32 +0800756 parse_graph_tracer_opts),
Changbin Du846e1932020-08-08 10:31:27 +0800757 OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size",
758 "size of per cpu buffer", parse_buffer_size),
Changbin Du5b3474722020-08-08 10:31:29 +0800759 OPT_BOOLEAN(0, "inherit", &ftrace.inherit,
760 "trace children processes"),
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900761 OPT_END()
762 };
763
Namhyung Kim78b83e82017-06-18 23:23:01 +0900764 INIT_LIST_HEAD(&ftrace.filters);
765 INIT_LIST_HEAD(&ftrace.notrace);
766 INIT_LIST_HEAD(&ftrace.graph_funcs);
767 INIT_LIST_HEAD(&ftrace.nograph_funcs);
768
Taeung Songb05d1092017-01-31 20:38:29 +0900769 ret = perf_config(perf_ftrace_config, &ftrace);
770 if (ret < 0)
771 return -1;
772
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900773 argc = parse_options(argc, argv, ftrace_options, ftrace_usage,
774 PARSE_OPT_STOP_AT_NON_OPTION);
Namhyung Kima9af6be2017-02-24 10:12:48 +0900775 if (!argc && target__none(&ftrace.target))
Changbin Du452b0d12020-05-10 23:06:10 +0800776 ftrace.target.system_wide = true;
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900777
Changbin Dueb6d31a2020-08-08 10:31:24 +0800778 select_tracer(&ftrace);
779
Namhyung Kima9af6be2017-02-24 10:12:48 +0900780 ret = target__validate(&ftrace.target);
781 if (ret) {
782 char errbuf[512];
783
784 target__strerror(&ftrace.target, ret, errbuf, 512);
785 pr_err("%s\n", errbuf);
Namhyung Kim78b83e82017-06-18 23:23:01 +0900786 goto out_delete_filters;
Namhyung Kima9af6be2017-02-24 10:12:48 +0900787 }
788
Jiri Olsa0f98b112019-07-21 13:23:55 +0200789 ftrace.evlist = evlist__new();
Namhyung Kim78b83e82017-06-18 23:23:01 +0900790 if (ftrace.evlist == NULL) {
791 ret = -ENOMEM;
792 goto out_delete_filters;
793 }
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900794
795 ret = perf_evlist__create_maps(ftrace.evlist, &ftrace.target);
796 if (ret < 0)
797 goto out_delete_evlist;
798
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900799 ret = __cmd_ftrace(&ftrace, argc, argv);
800
801out_delete_evlist:
Jiri Olsac12995a2019-07-21 13:23:56 +0200802 evlist__delete(ftrace.evlist);
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900803
Namhyung Kim78b83e82017-06-18 23:23:01 +0900804out_delete_filters:
805 delete_filter_func(&ftrace.filters);
806 delete_filter_func(&ftrace.notrace);
807 delete_filter_func(&ftrace.graph_funcs);
808 delete_filter_func(&ftrace.nograph_funcs);
809
Namhyung Kimd01f4e82013-03-07 21:45:20 +0900810 return ret;
811}