blob: 104dbb1095c511c03f95b3565da481a4e88d0f47 [file] [log] [blame]
Thomas Gleixner91007042019-05-29 07:12:25 -07001// SPDX-License-Identifier: GPL-2.0-only
Ingo Molnar07800602009-04-20 15:00:56 +02002/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02003 * builtin-top.c
4 *
5 * Builtin top command: Display a continuously updated profile of
6 * any workload, CPU or specific PID.
7 *
8 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -03009 * 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010 *
11 * Improvements and fixes by:
12 *
13 * Arjan van de Ven <arjan@linux.intel.com>
14 * Yanmin Zhang <yanmin.zhang@intel.com>
15 * Wu Fengguang <fengguang.wu@intel.com>
16 * Mike Galbraith <efault@gmx.de>
17 * Paul Mackerras <paulus@samba.org>
Ingo Molnar07800602009-04-20 15:00:56 +020018 */
Ingo Molnarbf9e1872009-06-02 23:37:05 +020019#include "builtin.h"
Ingo Molnar07800602009-04-20 15:00:56 +020020
Peter Zijlstra1a482f32009-05-23 18:28:58 +020021#include "perf.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +020022
Arnaldo Carvalho de Melo36532462011-02-06 14:54:44 -020023#include "util/annotate.h"
Arnaldo Carvalho de Meloa40b95b2019-01-17 08:15:20 -080024#include "util/bpf-event.h"
Taeung Song41840d22016-06-23 17:55:17 +090025#include "util/config.h"
Ingo Molnar8fc03212009-06-04 15:19:47 +020026#include "util/color.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020027#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020028#include "util/evsel.h"
Arnaldo Carvalho de Melo5ab8c682017-04-25 15:30:47 -030029#include "util/event.h"
Arnaldo Carvalho de Melob0a7d1a2012-10-06 16:26:02 -030030#include "util/machine.h"
Arnaldo Carvalho de Melo1101f692019-01-27 13:42:37 +010031#include "util/map.h"
Arnaldo Carvalho de Melob3165f42009-12-13 19:50:28 -020032#include "util/session.h"
33#include "util/symbol.h"
Arnaldo Carvalho de Melo439d4732009-10-02 03:29:58 -030034#include "util/thread.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020035#include "util/thread_map.h"
Arnaldo Carvalho de Melo8c3e10e2011-01-31 14:50:39 -020036#include "util/top.h"
Arnaldo Carvalho de Melo2da39f12019-08-27 11:51:18 -030037#include "util/util.h"
Arnaldo Carvalho de Melo43cbcd82009-07-01 12:28:37 -030038#include <linux/rbtree.h>
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060039#include <subcmd/parse-options.h>
Ingo Molnarb456bae2009-05-26 09:17:18 +020040#include "util/parse-events.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110041#include "util/cpumap.h"
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -030042#include "util/sort.h"
Arnaldo Carvalho de Melo6a9fa4e2019-06-25 17:31:26 -030043#include "util/string2.h"
Arnaldo Carvalho de Melob0742e92017-04-18 11:08:10 -030044#include "util/term.h"
David Ahern6b118e92012-07-30 22:31:35 -060045#include "util/intlist.h"
Andi Kleena18b027e2015-07-18 08:24:52 -070046#include "util/parse-branch-options.h"
Sukadev Bhattiprolu0d3942d2013-05-14 22:56:51 -070047#include "arch/common.h"
Ingo Molnar07800602009-04-20 15:00:56 +020048
Frederic Weisbecker8f288272009-08-16 22:05:48 +020049#include "util/debug.h"
Jiri Olsa16c66bc2018-11-05 13:24:55 +010050#include "util/ordered-events.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020051
Ingo Molnar07800602009-04-20 15:00:56 +020052#include <assert.h>
Arnaldo Carvalho de Melo31d68e72012-03-27 12:55:57 -030053#include <elf.h>
Ingo Molnar07800602009-04-20 15:00:56 +020054#include <fcntl.h>
Ingo Molnar0e9b20b2009-05-26 09:17:18 +020055
Ingo Molnar07800602009-04-20 15:00:56 +020056#include <stdio.h>
Mike Galbraith923c42c2009-07-22 20:36:03 +020057#include <termios.h>
58#include <unistd.h>
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -020059#include <inttypes.h>
Ingo Molnar0e9b20b2009-05-26 09:17:18 +020060
Ingo Molnar07800602009-04-20 15:00:56 +020061#include <errno.h>
Ingo Molnar07800602009-04-20 15:00:56 +020062#include <time.h>
63#include <sched.h>
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030064#include <signal.h>
Ingo Molnar07800602009-04-20 15:00:56 +020065
66#include <sys/syscall.h>
67#include <sys/ioctl.h>
Arnaldo Carvalho de Meloa8fa4962014-09-15 15:54:34 -030068#include <poll.h>
Ingo Molnar07800602009-04-20 15:00:56 +020069#include <sys/prctl.h>
70#include <sys/wait.h>
71#include <sys/uio.h>
Arnaldo Carvalho de Melo31d68e72012-03-27 12:55:57 -030072#include <sys/utsname.h>
Ingo Molnar07800602009-04-20 15:00:56 +020073#include <sys/mman.h>
74
Arnaldo Carvalho de Melo531d2412016-03-23 15:16:55 -030075#include <linux/stringify.h>
Arnaldo Carvalho de Melob9c4b0f2016-08-08 15:37:58 -030076#include <linux/time64.h>
Ingo Molnar07800602009-04-20 15:00:56 +020077#include <linux/types.h>
78
Arnaldo Carvalho de Melo3052ba52019-06-25 17:27:31 -030079#include <linux/ctype.h>
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -030080
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -030081static volatile int done;
Jiri Olsab135e5e2017-11-14 10:23:39 +010082static volatile int resize;
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -030083
Namhyung Kim933cbb12013-05-14 11:08:59 +090084#define HEADER_LINE_NR 5
85
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -020086static void perf_top__update_print_entries(struct perf_top *top)
Arnaldo Carvalho de Melo3b6ed982009-11-16 19:30:27 -020087{
Namhyung Kim933cbb12013-05-14 11:08:59 +090088 top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
Arnaldo Carvalho de Melo3b6ed982009-11-16 19:30:27 -020089}
90
Jiri Olsa244a1082017-11-15 14:30:57 +010091static void winch_sig(int sig __maybe_unused)
Arnaldo Carvalho de Melo3b6ed982009-11-16 19:30:27 -020092{
Jiri Olsab135e5e2017-11-14 10:23:39 +010093 resize = 1;
94}
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -020095
Jiri Olsab135e5e2017-11-14 10:23:39 +010096static void perf_top__resize(struct perf_top *top)
97{
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -020098 get_term_dimensions(&top->winsize);
99 perf_top__update_print_entries(top);
Arnaldo Carvalho de Melo3b6ed982009-11-16 19:30:27 -0200100}
101
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200102static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
Mike Galbraith923c42c2009-07-22 20:36:03 +0200103{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200104 struct evsel *evsel;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200105 struct symbol *sym;
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200106 struct annotation *notes;
Arnaldo Carvalho de Melo439d4732009-10-02 03:29:58 -0300107 struct map *map;
Arnaldo Carvalho de Melo36532462011-02-06 14:54:44 -0200108 int err = -1;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200109
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300110 if (!he || !he->ms.sym)
Arnaldo Carvalho de Melob0a9ab62010-03-15 11:46:58 -0300111 return -1;
112
Leo Yan111442c2019-07-02 18:34:12 +0800113 evsel = hists_to_evsel(he->hists);
114
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300115 sym = he->ms.sym;
116 map = he->ms.map;
Arnaldo Carvalho de Melob0a9ab62010-03-15 11:46:58 -0300117
118 /*
119 * We can't annotate with just /proc/kallsyms
120 */
Adrian Hunterbbb7f842013-08-07 14:38:54 +0300121 if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
122 !dso__is_kcore(map->dso)) {
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200123 pr_err("Can't annotate %s: No vmlinux file was found in the "
124 "path\n", sym->name);
125 sleep(1);
Arnaldo Carvalho de Melob0a9ab62010-03-15 11:46:58 -0300126 return -1;
Arnaldo Carvalho de Melob2698762009-11-17 18:38:02 -0200127 }
128
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200129 notes = symbol__annotation(sym);
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200130 pthread_mutex_lock(&notes->lock);
Mike Galbraith923c42c2009-07-22 20:36:03 +0200131
Jiri Olsa6484d2f2019-07-21 13:24:28 +0200132 if (!symbol__hists(sym, top->evlist->core.nr_entries)) {
Arnaldo Carvalho de Meloc97cf422011-02-22 12:02:07 -0300133 pthread_mutex_unlock(&notes->lock);
Arnaldo Carvalho de Melo36532462011-02-06 14:54:44 -0200134 pr_err("Not enough memory for annotating '%s' symbol!\n",
135 sym->name);
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200136 sleep(1);
Arnaldo Carvalho de Meloc97cf422011-02-22 12:02:07 -0300137 return err;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200138 }
Arnaldo Carvalho de Melo36532462011-02-06 14:54:44 -0200139
Arnaldo Carvalho de Melo380195e2018-05-28 11:27:40 -0300140 err = symbol__annotate(sym, map, evsel, 0, &top->annotation_opts, NULL);
Arnaldo Carvalho de Melo36532462011-02-06 14:54:44 -0200141 if (err == 0) {
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200142 top->sym_filter_entry = he;
Arnaldo Carvalho de Meloee51d852016-07-29 16:27:18 -0300143 } else {
144 char msg[BUFSIZ];
145 symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
146 pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
Arnaldo Carvalho de Melo36532462011-02-06 14:54:44 -0200147 }
Arnaldo Carvalho de Meloc97cf422011-02-22 12:02:07 -0300148
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200149 pthread_mutex_unlock(&notes->lock);
Arnaldo Carvalho de Melo36532462011-02-06 14:54:44 -0200150 return err;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200151}
152
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300153static void __zero_source_counters(struct hist_entry *he)
Mike Galbraith923c42c2009-07-22 20:36:03 +0200154{
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300155 struct symbol *sym = he->ms.sym;
Arnaldo Carvalho de Melo36532462011-02-06 14:54:44 -0200156 symbol__annotate_zero_histograms(sym);
Mike Galbraith923c42c2009-07-22 20:36:03 +0200157}
158
Arnaldo Carvalho de Melo31d68e72012-03-27 12:55:57 -0300159static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
160{
161 struct utsname uts;
162 int err = uname(&uts);
163
164 ui__warning("Out of bounds address found:\n\n"
165 "Addr: %" PRIx64 "\n"
166 "DSO: %s %c\n"
167 "Map: %" PRIx64 "-%" PRIx64 "\n"
168 "Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
169 "Arch: %s\n"
170 "Kernel: %s\n"
171 "Tools: %s\n\n"
172 "Not all samples will be on the annotation output.\n\n"
173 "Please report to linux-kernel@vger.kernel.org\n",
174 ip, map->dso->long_name, dso__symtab_origin(map->dso),
175 map->start, map->end, sym->start, sym->end,
176 sym->binding == STB_GLOBAL ? 'g' :
177 sym->binding == STB_LOCAL ? 'l' : 'w', sym->name,
178 err ? "[unknown]" : uts.machine,
179 err ? "[unknown]" : uts.release, perf_version_string);
180 if (use_browser <= 0)
181 sleep(5);
Arnaldo Carvalho de Melo48000a12014-12-17 17:24:45 -0300182
Arnaldo Carvalho de Melo31d68e72012-03-27 12:55:57 -0300183 map->erange_warned = true;
184}
185
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200186static void perf_top__record_precise_ip(struct perf_top *top,
187 struct hist_entry *he,
Taeung Songbab89f62017-07-20 16:28:53 -0300188 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200189 struct evsel *evsel, u64 ip)
Mike Galbraith923c42c2009-07-22 20:36:03 +0200190{
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200191 struct annotation *notes;
Namhyung Kimbeefb8d2015-12-11 11:56:56 +0900192 struct symbol *sym = he->ms.sym;
Namhyung Kim48c65bd2014-02-20 10:32:53 +0900193 int err = 0;
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200194
Namhyung Kimbeefb8d2015-12-11 11:56:56 +0900195 if (sym == NULL || (use_browser == 0 &&
196 (top->sym_filter_entry == NULL ||
197 top->sym_filter_entry->ms.sym != sym)))
Mike Galbraith923c42c2009-07-22 20:36:03 +0200198 return;
199
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200200 notes = symbol__annotation(sym);
201
202 if (pthread_mutex_trylock(&notes->lock))
Mike Galbraith923c42c2009-07-22 20:36:03 +0200203 return;
204
Arnaldo Carvalho de Meloe345f3b2018-05-24 12:05:39 -0300205 err = hist_entry__inc_addr_samples(he, sample, evsel, ip);
Mike Galbraith923c42c2009-07-22 20:36:03 +0200206
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200207 pthread_mutex_unlock(&notes->lock);
Arnaldo Carvalho de Melo31d68e72012-03-27 12:55:57 -0300208
Namhyung Kim151ee832015-12-11 11:56:54 +0900209 if (unlikely(err)) {
210 /*
211 * This function is now called with he->hists->lock held.
212 * Release it before going to sleep.
213 */
214 pthread_mutex_unlock(&he->hists->lock);
Namhyung Kim7c503912014-01-07 17:41:03 +0900215
Namhyung Kim151ee832015-12-11 11:56:54 +0900216 if (err == -ERANGE && !he->ms.map->erange_warned)
217 ui__warn_map_erange(he->ms.map, sym, ip);
218 else if (err == -ENOMEM) {
219 pr_err("Not enough memory for annotating '%s' symbol!\n",
220 sym->name);
221 sleep(1);
222 }
223
224 pthread_mutex_lock(&he->hists->lock);
Arnaldo Carvalho de Melob66d8c02013-12-18 15:37:41 -0300225 }
Mike Galbraith923c42c2009-07-22 20:36:03 +0200226}
227
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200228static void perf_top__show_details(struct perf_top *top)
Mike Galbraith923c42c2009-07-22 20:36:03 +0200229{
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200230 struct hist_entry *he = top->sym_filter_entry;
Jiri Olsa32dcd022019-07-21 13:23:51 +0200231 struct evsel *evsel;
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200232 struct annotation *notes;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200233 struct symbol *symbol;
Arnaldo Carvalho de Melo36532462011-02-06 14:54:44 -0200234 int more;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200235
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300236 if (!he)
Mike Galbraith923c42c2009-07-22 20:36:03 +0200237 return;
238
Leo Yan111442c2019-07-02 18:34:12 +0800239 evsel = hists_to_evsel(he->hists);
240
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300241 symbol = he->ms.sym;
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200242 notes = symbol__annotation(symbol);
243
244 pthread_mutex_lock(&notes->lock);
245
Jiri Olsaf681d592017-10-11 17:01:42 +0200246 symbol__calc_percent(symbol, evsel);
247
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200248 if (notes->src == NULL)
249 goto out_unlock;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200250
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300251 printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
Arnaldo Carvalho de Melo982d4102018-05-25 17:28:37 -0300252 printf(" Events Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
Mike Galbraith923c42c2009-07-22 20:36:03 +0200253
Arnaldo Carvalho de Melo982d4102018-05-25 17:28:37 -0300254 more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel, &top->annotation_opts);
Arnaldo Carvalho de Melo5d484f92015-06-17 16:50:52 -0300255
256 if (top->evlist->enabled) {
257 if (top->zero)
258 symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
259 else
260 symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
261 }
Arnaldo Carvalho de Melo36532462011-02-06 14:54:44 -0200262 if (more != 0)
Mike Galbraith923c42c2009-07-22 20:36:03 +0200263 printf("%d lines not displayed, maybe increase display entries [e]\n", more);
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200264out_unlock:
265 pthread_mutex_unlock(&notes->lock);
Mike Galbraith923c42c2009-07-22 20:36:03 +0200266}
Ingo Molnar07800602009-04-20 15:00:56 +0200267
Namhyung Kimea4385f2019-08-28 08:15:54 +0900268static void perf_top__resort_hists(struct perf_top *t)
Arnaldo Carvalho de Melo40d81772019-08-09 16:44:34 -0300269{
Namhyung Kimea4385f2019-08-28 08:15:54 +0900270 struct evlist *evlist = t->evlist;
Arnaldo Carvalho de Melo40d81772019-08-09 16:44:34 -0300271 struct evsel *pos;
272
273 evlist__for_each_entry(evlist, pos) {
274 struct hists *hists = evsel__hists(pos);
275
Namhyung Kimea4385f2019-08-28 08:15:54 +0900276 if (evlist->enabled) {
277 if (t->zero) {
278 hists__delete_entries(hists);
279 } else {
280 hists__decay_entries(hists, t->hide_user_symbols,
281 t->hide_kernel_symbols);
282 }
283 }
284
Arnaldo Carvalho de Melo40d81772019-08-09 16:44:34 -0300285 hists__collapse_resort(hists, NULL);
286
287 /* Non-group events are considered as leader */
288 if (symbol_conf.event_group &&
289 !perf_evsel__is_group_leader(pos)) {
290 struct hists *leader_hists = evsel__hists(pos->leader);
291
292 hists__match(leader_hists, hists);
293 hists__link(leader_hists, hists);
294 }
295 }
296
297 evlist__for_each_entry(evlist, pos) {
298 perf_evsel__output_resort(pos, NULL);
299 }
300}
301
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200302static void perf_top__print_sym_table(struct perf_top *top)
Ingo Molnar07800602009-04-20 15:00:56 +0200303{
Arnaldo Carvalho de Melo8c3e10e2011-01-31 14:50:39 -0200304 char bf[160];
305 int printed = 0;
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200306 const int win_width = top->winsize.ws_col - 1;
Jiri Olsa32dcd022019-07-21 13:23:51 +0200307 struct evsel *evsel = top->sym_evsel;
Jiri Olsa452ce032016-01-18 10:24:00 +0100308 struct hists *hists = evsel__hists(evsel);
Mike Galbraithd94b9432009-05-25 09:57:56 +0200309
Frederic Weisbecker0f5486b2009-06-04 20:48:04 +0200310 puts(CONSOLE_CLEAR);
Ingo Molnar07800602009-04-20 15:00:56 +0200311
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200312 perf_top__header_snprintf(top, bf, sizeof(bf));
Arnaldo Carvalho de Melo8c3e10e2011-01-31 14:50:39 -0200313 printf("%s\n", bf);
Ingo Molnar07800602009-04-20 15:00:56 +0200314
Arnaldo Carvalho de Melo1a105f72009-11-17 15:40:55 -0200315 printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
Ingo Molnar07800602009-04-20 15:00:56 +0200316
Kan Lianga1ff5b02018-01-18 13:26:30 -0800317 if (!top->record_opts.overwrite &&
318 (hists->stats.nr_lost_warned !=
319 hists->stats.nr_events[PERF_RECORD_LOST])) {
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300320 hists->stats.nr_lost_warned =
321 hists->stats.nr_events[PERF_RECORD_LOST];
Arnaldo Carvalho de Melo7b275092011-10-29 12:15:04 -0200322 color_fprintf(stdout, PERF_COLOR_RED,
323 "WARNING: LOST %d chunks, Check IO/CPU overload",
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300324 hists->stats.nr_lost_warned);
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300325 ++printed;
Arnaldo Carvalho de Melo93fc64f2011-01-29 12:08:00 -0200326 }
327
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200328 if (top->sym_filter_entry) {
329 perf_top__show_details(top);
Mike Galbraith923c42c2009-07-22 20:36:03 +0200330 return;
331 }
332
Namhyung Kimea4385f2019-08-28 08:15:54 +0900333 perf_top__resort_hists(top);
Namhyung Kim701937b2014-08-12 17:16:05 +0900334
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300335 hists__output_recalc_col_len(hists, top->print_entries - printed);
Arnaldo Carvalho de Melo7cc017e2009-11-24 12:05:14 -0200336 putchar('\n');
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300337 hists__fprintf(hists, false, top->print_entries - printed, win_width,
Arnaldo Carvalho de Meloe9de7e22018-06-20 15:58:20 -0300338 top->min_percent, stdout, !symbol_conf.use_callchain);
Ingo Molnar07800602009-04-20 15:00:56 +0200339}
340
Mike Galbraith923c42c2009-07-22 20:36:03 +0200341static void prompt_integer(int *target, const char *msg)
342{
343 char *buf = malloc(0), *p;
344 size_t dummy = 0;
345 int tmp;
346
347 fprintf(stdout, "\n%s: ", msg);
348 if (getline(&buf, &dummy, stdin) < 0)
349 return;
350
351 p = strchr(buf, '\n');
352 if (p)
353 *p = 0;
354
355 p = buf;
356 while(*p) {
357 if (!isdigit(*p))
358 goto out_free;
359 p++;
360 }
361 tmp = strtoul(buf, NULL, 10);
362 *target = tmp;
363out_free:
364 free(buf);
365}
366
367static void prompt_percent(int *target, const char *msg)
368{
369 int tmp = 0;
370
371 prompt_integer(&tmp, msg);
372 if (tmp >= 0 && tmp <= 100)
373 *target = tmp;
374}
375
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200376static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
Mike Galbraith923c42c2009-07-22 20:36:03 +0200377{
378 char *buf = malloc(0), *p;
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200379 struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300380 struct hists *hists = evsel__hists(top->sym_evsel);
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300381 struct rb_node *next;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200382 size_t dummy = 0;
383
384 /* zero counters of active symbol */
385 if (syme) {
Mike Galbraith923c42c2009-07-22 20:36:03 +0200386 __zero_source_counters(syme);
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200387 top->sym_filter_entry = NULL;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200388 }
389
390 fprintf(stdout, "\n%s: ", msg);
391 if (getline(&buf, &dummy, stdin) < 0)
392 goto out_free;
393
394 p = strchr(buf, '\n');
395 if (p)
396 *p = 0;
397
Davidlohr Bueso2eb3d682018-12-06 11:18:18 -0800398 next = rb_first_cached(&hists->entries);
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300399 while (next) {
400 n = rb_entry(next, struct hist_entry, rb_node);
401 if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
402 found = n;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200403 break;
404 }
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300405 next = rb_next(&n->rb_node);
Mike Galbraith923c42c2009-07-22 20:36:03 +0200406 }
407
408 if (!found) {
Kirill Smelkov66aeb6d2010-01-13 13:22:19 -0200409 fprintf(stderr, "Sorry, %s is not active.\n", buf);
Mike Galbraith923c42c2009-07-22 20:36:03 +0200410 sleep(1);
Mike Galbraith923c42c2009-07-22 20:36:03 +0200411 } else
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200412 perf_top__parse_source(top, found);
Mike Galbraith923c42c2009-07-22 20:36:03 +0200413
414out_free:
415 free(buf);
416}
417
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200418static void perf_top__print_mapped_keys(struct perf_top *top)
Mike Galbraith923c42c2009-07-22 20:36:03 +0200419{
Mike Galbraith091bd2e2009-08-04 10:21:23 +0200420 char *name = NULL;
421
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200422 if (top->sym_filter_entry) {
423 struct symbol *sym = top->sym_filter_entry->ms.sym;
Mike Galbraith091bd2e2009-08-04 10:21:23 +0200424 name = sym->name;
425 }
426
427 fprintf(stdout, "\nMapped keys:\n");
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200428 fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs);
429 fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
Mike Galbraith091bd2e2009-08-04 10:21:23 +0200430
Jiri Olsa6484d2f2019-07-21 13:24:28 +0200431 if (top->evlist->core.nr_entries > 1)
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300432 fprintf(stdout, "\t[E] active event counter. \t(%s)\n", perf_evsel__name(top->sym_evsel));
Mike Galbraith091bd2e2009-08-04 10:21:23 +0200433
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200434 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
Mike Galbraith091bd2e2009-08-04 10:21:23 +0200435
Arnaldo Carvalho de Melo982d4102018-05-25 17:28:37 -0300436 fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->annotation_opts.min_pcnt);
Kirill Smelkov6cff0e82010-02-03 16:52:08 -0200437 fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
438 fprintf(stdout, "\t[S] stop annotation.\n");
Mike Galbraith091bd2e2009-08-04 10:21:23 +0200439
Arnaldo Carvalho de Melo8ffcda12009-11-16 21:45:24 -0200440 fprintf(stdout,
Sihyeon Jang8fce3742017-11-12 10:10:47 +0900441 "\t[K] hide kernel symbols. \t(%s)\n",
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200442 top->hide_kernel_symbols ? "yes" : "no");
Arnaldo Carvalho de Melo8ffcda12009-11-16 21:45:24 -0200443 fprintf(stdout,
444 "\t[U] hide user symbols. \t(%s)\n",
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200445 top->hide_user_symbols ? "yes" : "no");
446 fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0);
Mike Galbraith091bd2e2009-08-04 10:21:23 +0200447 fprintf(stdout, "\t[qQ] quit.\n");
448}
449
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200450static int perf_top__key_mapped(struct perf_top *top, int c)
Mike Galbraith091bd2e2009-08-04 10:21:23 +0200451{
452 switch (c) {
453 case 'd':
454 case 'e':
455 case 'f':
456 case 'z':
457 case 'q':
458 case 'Q':
Arnaldo Carvalho de Melo8ffcda12009-11-16 21:45:24 -0200459 case 'K':
460 case 'U':
Kirill Smelkov6cff0e82010-02-03 16:52:08 -0200461 case 'F':
462 case 's':
463 case 'S':
Mike Galbraith091bd2e2009-08-04 10:21:23 +0200464 return 1;
465 case 'E':
Jiri Olsa6484d2f2019-07-21 13:24:28 +0200466 return top->evlist->core.nr_entries > 1 ? 1 : 0;
Ingo Molnar83a09442009-08-15 12:26:57 +0200467 default:
468 break;
Mike Galbraith091bd2e2009-08-04 10:21:23 +0200469 }
470
471 return 0;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200472}
473
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -0300474static bool perf_top__handle_keypress(struct perf_top *top, int c)
Mike Galbraith923c42c2009-07-22 20:36:03 +0200475{
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -0300476 bool ret = true;
477
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200478 if (!perf_top__key_mapped(top, c)) {
Mike Galbraith091bd2e2009-08-04 10:21:23 +0200479 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
Jiri Olsa3969cc092014-08-15 16:24:45 -0300480 struct termios save;
Mike Galbraith091bd2e2009-08-04 10:21:23 +0200481
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200482 perf_top__print_mapped_keys(top);
Mike Galbraith091bd2e2009-08-04 10:21:23 +0200483 fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
484 fflush(stdout);
485
Jiri Olsa3969cc092014-08-15 16:24:45 -0300486 set_term_quiet_input(&save);
Mike Galbraith091bd2e2009-08-04 10:21:23 +0200487
488 poll(&stdin_poll, 1, -1);
489 c = getc(stdin);
490
491 tcsetattr(0, TCSAFLUSH, &save);
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200492 if (!perf_top__key_mapped(top, c))
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -0300493 return ret;
Mike Galbraith091bd2e2009-08-04 10:21:23 +0200494 }
495
Mike Galbraith923c42c2009-07-22 20:36:03 +0200496 switch (c) {
497 case 'd':
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200498 prompt_integer(&top->delay_secs, "Enter display delay");
499 if (top->delay_secs < 1)
500 top->delay_secs = 1;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200501 break;
502 case 'e':
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200503 prompt_integer(&top->print_entries, "Enter display entries (lines)");
504 if (top->print_entries == 0) {
Jiri Olsab135e5e2017-11-14 10:23:39 +0100505 perf_top__resize(top);
Jiri Olsa244a1082017-11-15 14:30:57 +0100506 signal(SIGWINCH, winch_sig);
Stephane Eranian509605d2012-01-30 11:50:37 +0100507 } else {
Arnaldo Carvalho de Melo3b6ed982009-11-16 19:30:27 -0200508 signal(SIGWINCH, SIG_DFL);
Stephane Eranian509605d2012-01-30 11:50:37 +0100509 }
Mike Galbraith923c42c2009-07-22 20:36:03 +0200510 break;
511 case 'E':
Jiri Olsa6484d2f2019-07-21 13:24:28 +0200512 if (top->evlist->core.nr_entries > 1) {
Akihiro Nagaice2d17c2011-03-23 16:29:39 +0900513 /* Select 0 as the default event: */
514 int counter = 0;
515
Mike Galbraith923c42c2009-07-22 20:36:03 +0200516 fprintf(stderr, "\nAvailable events:");
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200517
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300518 evlist__for_each_entry(top->evlist, top->sym_evsel)
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300519 fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
Mike Galbraith923c42c2009-07-22 20:36:03 +0200520
Arnaldo Carvalho de Meloec52d972011-03-11 10:11:48 -0300521 prompt_integer(&counter, "Enter details event counter");
Mike Galbraith923c42c2009-07-22 20:36:03 +0200522
Jiri Olsa6484d2f2019-07-21 13:24:28 +0200523 if (counter >= top->evlist->core.nr_entries) {
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300524 top->sym_evsel = perf_evlist__first(top->evlist);
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300525 fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
Mike Galbraith923c42c2009-07-22 20:36:03 +0200526 sleep(1);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200527 break;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200528 }
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300529 evlist__for_each_entry(top->evlist, top->sym_evsel)
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200530 if (top->sym_evsel->idx == counter)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200531 break;
Arnaldo Carvalho de Meloec52d972011-03-11 10:11:48 -0300532 } else
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300533 top->sym_evsel = perf_evlist__first(top->evlist);
Mike Galbraith923c42c2009-07-22 20:36:03 +0200534 break;
535 case 'f':
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200536 prompt_integer(&top->count_filter, "Enter display event count filter");
Mike Galbraith923c42c2009-07-22 20:36:03 +0200537 break;
538 case 'F':
Arnaldo Carvalho de Melo982d4102018-05-25 17:28:37 -0300539 prompt_percent(&top->annotation_opts.min_pcnt,
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200540 "Enter details display event filter (percent)");
Mike Galbraith923c42c2009-07-22 20:36:03 +0200541 break;
Arnaldo Carvalho de Melo8ffcda12009-11-16 21:45:24 -0200542 case 'K':
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200543 top->hide_kernel_symbols = !top->hide_kernel_symbols;
Arnaldo Carvalho de Melo8ffcda12009-11-16 21:45:24 -0200544 break;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200545 case 'q':
546 case 'Q':
547 printf("exiting.\n");
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200548 if (top->dump_symtab)
549 perf_session__fprintf_dsos(top->session, stderr);
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -0300550 ret = false;
551 break;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200552 case 's':
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200553 perf_top__prompt_symbol(top, "Enter details symbol");
Mike Galbraith923c42c2009-07-22 20:36:03 +0200554 break;
555 case 'S':
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200556 if (!top->sym_filter_entry)
Mike Galbraith923c42c2009-07-22 20:36:03 +0200557 break;
558 else {
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200559 struct hist_entry *syme = top->sym_filter_entry;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200560
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200561 top->sym_filter_entry = NULL;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200562 __zero_source_counters(syme);
Mike Galbraith923c42c2009-07-22 20:36:03 +0200563 }
564 break;
Arnaldo Carvalho de Melo8ffcda12009-11-16 21:45:24 -0200565 case 'U':
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200566 top->hide_user_symbols = !top->hide_user_symbols;
Arnaldo Carvalho de Melo8ffcda12009-11-16 21:45:24 -0200567 break;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200568 case 'z':
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200569 top->zero = !top->zero;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200570 break;
Ingo Molnar83a09442009-08-15 12:26:57 +0200571 default:
572 break;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200573 }
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -0300574
575 return ret;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200576}
577
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300578static void perf_top__sort_new_samples(void *arg)
579{
580 struct perf_top *t = arg;
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300581
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300582 if (t->evlist->selected != NULL)
583 t->sym_evsel = t->evlist->selected;
584
Namhyung Kimea4385f2019-08-28 08:15:54 +0900585 perf_top__resort_hists(t);
Jiri Olsa254de742018-11-05 21:34:47 +0100586
Jiri Olsad8590432018-11-19 11:12:01 +0100587 if (t->lost || t->drop)
Jiri Olsa254de742018-11-05 21:34:47 +0100588 pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n");
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300589}
590
Jiri Olsac94cef42018-11-07 20:11:19 +0100591static void stop_top(void)
592{
593 session_done = 1;
594 done = 1;
595}
596
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200597static void *display_thread_tui(void *arg)
Arnaldo Carvalho de Meloc0443df2011-01-31 18:19:33 -0200598{
Jiri Olsa32dcd022019-07-21 13:23:51 +0200599 struct evsel *pos;
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200600 struct perf_top *top = arg;
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300601 const char *help = "For a higher level overview, try: perf top --sort comm,dso";
Namhyung Kim9783adf2012-11-02 14:50:05 +0900602 struct hist_browser_timer hbt = {
603 .timer = perf_top__sort_new_samples,
604 .arg = top,
605 .refresh = top->delay_secs,
606 };
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300607
Krister Johansen868a8322017-07-05 18:48:12 -0700608 /* In order to read symbols from other namespaces perf to needs to call
609 * setns(2). This isn't permitted if the struct_fs has multiple users.
610 * unshare(2) the fs so that we may continue to setns into namespaces
611 * that we're observing.
612 */
613 unshare(CLONE_FS);
614
Arnaldo Carvalho de Melo1205a272019-08-06 11:20:42 -0300615 prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
616
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200617 perf_top__sort_new_samples(top);
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -0200618
619 /*
620 * Initialize the uid_filter_str, in the future the TUI will allow
Ingo Molnaradba1632018-12-03 11:22:00 +0100621 * Zooming in/out UIDs. For now just use whatever the user passed
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -0200622 * via --uid.
623 */
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300624 evlist__for_each_entry(top->evlist, pos) {
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300625 struct hists *hists = evsel__hists(pos);
626 hists->uid_filter_str = top->record_opts.target.uid_str;
627 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -0200628
Namhyung Kim13d1e532015-06-21 12:41:16 +0900629 perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
630 top->min_percent,
Kan Lianga1ff5b02018-01-18 13:26:30 -0800631 &top->session->header.env,
Arnaldo Carvalho de Melocd0cccb2018-05-28 13:54:59 -0300632 !top->record_opts.overwrite,
633 &top->annotation_opts);
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300634
Jiri Olsac94cef42018-11-07 20:11:19 +0100635 stop_top();
Arnaldo Carvalho de Meloc0443df2011-01-31 18:19:33 -0200636 return NULL;
637}
638
Jiri Olsa4a1a9972014-08-11 10:50:07 +0200639static void display_sig(int sig __maybe_unused)
640{
Jiri Olsac94cef42018-11-07 20:11:19 +0100641 stop_top();
Jiri Olsa4a1a9972014-08-11 10:50:07 +0200642}
643
644static void display_setup_sig(void)
645{
Arnaldo Carvalho de Melo09f4d782015-08-19 15:16:08 -0300646 signal(SIGSEGV, sighandler_dump_stack);
647 signal(SIGFPE, sighandler_dump_stack);
Jiri Olsa4a1a9972014-08-11 10:50:07 +0200648 signal(SIGINT, display_sig);
649 signal(SIGQUIT, display_sig);
650 signal(SIGTERM, display_sig);
651}
652
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200653static void *display_thread(void *arg)
Ingo Molnar07800602009-04-20 15:00:56 +0200654{
Frederic Weisbecker0f5486b2009-06-04 20:48:04 +0200655 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
Jiri Olsa9398c482014-08-11 10:50:02 +0200656 struct termios save;
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200657 struct perf_top *top = arg;
Mike Galbraith923c42c2009-07-22 20:36:03 +0200658 int delay_msecs, c;
Frederic Weisbecker0f5486b2009-06-04 20:48:04 +0200659
Krister Johansen868a8322017-07-05 18:48:12 -0700660 /* In order to read symbols from other namespaces perf to needs to call
661 * setns(2). This isn't permitted if the struct_fs has multiple users.
662 * unshare(2) the fs so that we may continue to setns into namespaces
663 * that we're observing.
664 */
665 unshare(CLONE_FS);
666
Arnaldo Carvalho de Melo1205a272019-08-06 11:20:42 -0300667 prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
668
Jiri Olsa4a1a9972014-08-11 10:50:07 +0200669 display_setup_sig();
Arnaldo Carvalho de Melo3af6e332011-10-13 08:52:46 -0300670 pthread__unblock_sigwinch();
Mike Galbraith923c42c2009-07-22 20:36:03 +0200671repeat:
Arnaldo Carvalho de Melob9c4b0f2016-08-08 15:37:58 -0300672 delay_msecs = top->delay_secs * MSEC_PER_SEC;
Jiri Olsa9398c482014-08-11 10:50:02 +0200673 set_term_quiet_input(&save);
Mike Galbraith923c42c2009-07-22 20:36:03 +0200674 /* trash return*/
675 getc(stdin);
Ingo Molnar07800602009-04-20 15:00:56 +0200676
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -0300677 while (!done) {
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200678 perf_top__print_sym_table(top);
Arnaldo Carvalho de Melo3af6e332011-10-13 08:52:46 -0300679 /*
680 * Either timeout expired or we got an EINTR due to SIGWINCH,
681 * refresh screen in both cases.
682 */
683 switch (poll(&stdin_poll, 1, delay_msecs)) {
684 case 0:
685 continue;
686 case -1:
687 if (errno == EINTR)
688 continue;
Arnaldo Carvalho de Melo7b0214b2017-02-08 17:01:46 -0300689 __fallthrough;
Arnaldo Carvalho de Melo3af6e332011-10-13 08:52:46 -0300690 default:
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -0300691 c = getc(stdin);
692 tcsetattr(0, TCSAFLUSH, &save);
693
694 if (perf_top__handle_keypress(top, c))
695 goto repeat;
Jiri Olsac94cef42018-11-07 20:11:19 +0100696 stop_top();
Arnaldo Carvalho de Melo3af6e332011-10-13 08:52:46 -0300697 }
698 }
Ingo Molnar07800602009-04-20 15:00:56 +0200699
Jiri Olsa4a1a9972014-08-11 10:50:07 +0200700 tcsetattr(0, TCSAFLUSH, &save);
Ingo Molnar07800602009-04-20 15:00:56 +0200701 return NULL;
702}
703
Namhyung Kim7c503912014-01-07 17:41:03 +0900704static int hist_iter__top_callback(struct hist_entry_iter *iter,
705 struct addr_location *al, bool single,
706 void *arg)
707{
708 struct perf_top *top = arg;
709 struct hist_entry *he = iter->he;
Jiri Olsa32dcd022019-07-21 13:23:51 +0200710 struct evsel *evsel = iter->evsel;
Namhyung Kim7c503912014-01-07 17:41:03 +0900711
Jiri Olsa2e0453a2016-05-03 13:54:44 +0200712 if (perf_hpp_list.sym && single)
Arnaldo Carvalho de Meloe345f3b2018-05-24 12:05:39 -0300713 perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr);
Namhyung Kim7c503912014-01-07 17:41:03 +0900714
Andi Kleena18b027e2015-07-18 08:24:52 -0700715 hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
716 !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
Namhyung Kim7c503912014-01-07 17:41:03 +0900717 return 0;
718}
719
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200720static void perf_event__process_sample(struct perf_tool *tool,
721 const union perf_event *event,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200722 struct evsel *evsel,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200723 struct perf_sample *sample,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200724 struct machine *machine)
Ingo Molnar07800602009-04-20 15:00:56 +0200725{
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200726 struct perf_top *top = container_of(tool, struct perf_top, tool);
Arnaldo Carvalho de Melo1ed091c2009-11-27 16:29:23 -0200727 struct addr_location al;
Arnaldo Carvalho de Melo19d4ac32011-10-05 19:30:22 -0300728 int err;
Ingo Molnar07800602009-04-20 15:00:56 +0200729
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300730 if (!machine && perf_guest) {
David Ahern6b118e92012-07-30 22:31:35 -0600731 static struct intlist *seen;
732
733 if (!seen)
Arnaldo Carvalho de Meloffe0fb72013-01-24 16:17:27 -0300734 seen = intlist__new(NULL);
David Ahern6b118e92012-07-30 22:31:35 -0600735
Adrian Hunteref893252013-08-27 11:23:06 +0300736 if (!intlist__has_entry(seen, sample->pid)) {
David Ahern6b118e92012-07-30 22:31:35 -0600737 pr_err("Can't find guest [%d]'s kernel information\n",
Adrian Hunteref893252013-08-27 11:23:06 +0300738 sample->pid);
739 intlist__add(seen, sample->pid);
David Ahern6b118e92012-07-30 22:31:35 -0600740 }
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800741 return;
742 }
743
Joerg Roedel0c095712012-02-10 18:05:04 +0100744 if (!machine) {
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -0300745 pr_err("%u unprocessable samples recorded.\r",
Arnaldo Carvalho de Melo75be9892015-02-14 14:50:11 -0300746 top->session->evlist->stats.nr_unprocessable_samples++);
Joerg Roedel0c095712012-02-10 18:05:04 +0100747 return;
748 }
749
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200750 if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200751 top->exact_samples++;
Peter Zijlstra1676b8a2010-03-04 14:19:36 +0100752
Arnaldo Carvalho de Melobb3eb562016-03-22 18:39:09 -0300753 if (machine__resolve(machine, &al, sample) < 0)
Arnaldo Carvalho de Melo1ed091c2009-11-27 16:29:23 -0200754 return;
Arnaldo Carvalho de Melo5b2bb752009-10-26 19:23:19 -0200755
Arnaldo Carvalho de Meloe77a0742016-05-17 11:58:52 -0300756 if (!machine->kptr_restrict_warned &&
Arnaldo Carvalho de Melo5f6f5582011-05-27 11:53:28 -0300757 symbol_conf.kptr_restrict &&
758 al.cpumode == PERF_RECORD_MISC_KERNEL) {
Arnaldo Carvalho de Melob89a5122017-11-14 13:30:19 -0300759 if (!perf_evlist__exclude_kernel(top->session->evlist)) {
760 ui__warning(
Arnaldo Carvalho de Melo5f6f5582011-05-27 11:53:28 -0300761"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
Igor Lubashevd06e5fa2019-08-26 21:39:16 -0400762"Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
Arnaldo Carvalho de Melo5f6f5582011-05-27 11:53:28 -0300763"Kernel%s samples will not be resolved.\n",
Arnaldo Carvalho de Meloe94b8612018-04-23 17:13:49 -0300764 al.map && map__has_symbols(al.map) ?
Arnaldo Carvalho de Melo5f6f5582011-05-27 11:53:28 -0300765 " modules" : "");
Arnaldo Carvalho de Melob89a5122017-11-14 13:30:19 -0300766 if (use_browser <= 0)
767 sleep(5);
768 }
Arnaldo Carvalho de Meloe77a0742016-05-17 11:58:52 -0300769 machine->kptr_restrict_warned = true;
Arnaldo Carvalho de Melo5f6f5582011-05-27 11:53:28 -0300770 }
771
Arnaldo Carvalho de Melo68766bf2018-04-23 16:40:02 -0300772 if (al.sym == NULL && al.map != NULL) {
Arnaldo Carvalho de Meloe4a338d2011-05-27 13:42:16 -0300773 const char *msg = "Kernel samples will not be resolved.\n";
Arnaldo Carvalho de Melo72b8fa12010-01-27 21:05:49 -0200774 /*
775 * As we do lazy loading of symtabs we only will know if the
776 * specified vmlinux file is invalid when we actually have a
777 * hit in kernel space and then try to load it. So if we get
778 * here and there are _no_ symbols in the DSO backing the
779 * kernel map, bail out.
780 *
781 * We may never get here, for instance, if we use -K/
782 * --hide-kernel-symbols, even if the user specifies an
783 * invalid --vmlinux ;-)
784 */
Arnaldo Carvalho de Meloe77a0742016-05-17 11:58:52 -0300785 if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
Arnaldo Carvalho de Meloe94b8612018-04-23 17:13:49 -0300786 __map__is_kernel(al.map) && map__has_symbols(al.map)) {
Arnaldo Carvalho de Meloe4a338d2011-05-27 13:42:16 -0300787 if (symbol_conf.vmlinux_name) {
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300788 char serr[256];
789 dso__strerror_load(al.map->dso, serr, sizeof(serr));
790 ui__warning("The %s file can't be used: %s\n%s",
791 symbol_conf.vmlinux_name, serr, msg);
Arnaldo Carvalho de Meloe4a338d2011-05-27 13:42:16 -0300792 } else {
793 ui__warning("A vmlinux file was not found.\n%s",
794 msg);
795 }
796
797 if (use_browser <= 0)
798 sleep(5);
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200799 top->vmlinux_warned = true;
Arnaldo Carvalho de Melo72b8fa12010-01-27 21:05:49 -0200800 }
Arnaldo Carvalho de Melo72b8fa12010-01-27 21:05:49 -0200801 }
802
Arnaldo Carvalho de Melob55cc4e2016-08-30 11:15:59 -0300803 if (al.sym == NULL || !al.sym->idle) {
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300804 struct hists *hists = evsel__hists(evsel);
Namhyung Kim7c503912014-01-07 17:41:03 +0900805 struct hist_entry_iter iter = {
Namhyung Kim063bd932015-05-19 17:04:10 +0900806 .evsel = evsel,
807 .sample = sample,
808 .add_entry_cb = hist_iter__top_callback,
Namhyung Kim7c503912014-01-07 17:41:03 +0900809 };
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200810
Namhyung Kim7c503912014-01-07 17:41:03 +0900811 if (symbol_conf.cumulate_callchain)
812 iter.ops = &hist_iter_cumulative;
813 else
814 iter.ops = &hist_iter_normal;
Arnaldo Carvalho de Melo19d4ac32011-10-05 19:30:22 -0300815
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300816 pthread_mutex_lock(&hists->lock);
Namhyung Kim7c503912014-01-07 17:41:03 +0900817
Namhyung Kim063bd932015-05-19 17:04:10 +0900818 err = hist_entry_iter__add(&iter, &al, top->max_stack, top);
Namhyung Kim7c503912014-01-07 17:41:03 +0900819 if (err < 0)
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300820 pr_err("Problem incrementing symbol period, skipping event\n");
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300821
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300822 pthread_mutex_unlock(&hists->lock);
Arnaldo Carvalho de Melo5b2bb752009-10-26 19:23:19 -0200823 }
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300824
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -0300825 addr_location__put(&al);
Arnaldo Carvalho de Melo5b2bb752009-10-26 19:23:19 -0200826}
827
Jiri Olsad24e3c92018-11-06 15:45:14 +0100828static void
829perf_top__process_lost(struct perf_top *top, union perf_event *event,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200830 struct evsel *evsel)
Jiri Olsad24e3c92018-11-06 15:45:14 +0100831{
832 struct hists *hists = evsel__hists(evsel);
833
834 top->lost += event->lost.lost;
835 top->lost_total += event->lost.lost;
836 hists->stats.total_lost += event->lost.lost;
837}
838
839static void
840perf_top__process_lost_samples(struct perf_top *top,
841 union perf_event *event,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200842 struct evsel *evsel)
Jiri Olsad24e3c92018-11-06 15:45:14 +0100843{
844 struct hists *hists = evsel__hists(evsel);
845
846 top->lost += event->lost_samples.lost;
847 top->lost_total += event->lost_samples.lost;
848 hists->stats.total_lost_samples += event->lost_samples.lost;
849}
850
Jiri Olsad63b9f62018-11-11 19:52:06 +0100851static u64 last_timestamp;
852
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200853static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
Ingo Molnar07800602009-04-20 15:00:56 +0200854{
Kan Liangebebbf02018-01-18 13:26:31 -0800855 struct record_opts *opts = &top->record_opts;
Jiri Olsa63503db2019-07-21 13:23:52 +0200856 struct evlist *evlist = top->evlist;
Kan Liangebebbf02018-01-18 13:26:31 -0800857 struct perf_mmap *md;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200858 union perf_event *event;
Ingo Molnar07800602009-04-20 15:00:56 +0200859
Kan Liangebebbf02018-01-18 13:26:31 -0800860 md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
Kan Liangb9bae2c2018-03-06 10:36:07 -0500861 if (perf_mmap__read_init(md) < 0)
Kan Liangebebbf02018-01-18 13:26:31 -0800862 return;
863
Kan Liang0019dc872018-03-06 10:36:06 -0500864 while ((event = perf_mmap__read_event(md)) != NULL) {
Jiri Olsa16c66bc2018-11-05 13:24:55 +0100865 int ret;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200866
Jiri Olsad63b9f62018-11-11 19:52:06 +0100867 ret = perf_evlist__parse_sample_timestamp(evlist, event, &last_timestamp);
Jiri Olsa16c66bc2018-11-05 13:24:55 +0100868 if (ret && ret != -1)
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200869 break;
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200870
Jiri Olsad63b9f62018-11-11 19:52:06 +0100871 ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0);
Jiri Olsa16c66bc2018-11-05 13:24:55 +0100872 if (ret)
873 break;
Jiri Olsa94ad6e72018-11-05 21:23:40 +0100874
875 perf_mmap__consume(md);
876
877 if (top->qe.rotate) {
878 pthread_mutex_lock(&top->qe.mutex);
879 top->qe.rotate = false;
880 pthread_cond_signal(&top->qe.cond);
881 pthread_mutex_unlock(&top->qe.mutex);
882 }
Ingo Molnar07800602009-04-20 15:00:56 +0200883 }
Kan Liangebebbf02018-01-18 13:26:31 -0800884
885 perf_mmap__read_done(md);
Ingo Molnar07800602009-04-20 15:00:56 +0200886}
887
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200888static void perf_top__mmap_read(struct perf_top *top)
Frederic Weisbecker2f011902009-06-06 23:10:43 +0200889{
Kan Liangebebbf02018-01-18 13:26:31 -0800890 bool overwrite = top->record_opts.overwrite;
Jiri Olsa63503db2019-07-21 13:23:52 +0200891 struct evlist *evlist = top->evlist;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200892 int i;
Frederic Weisbecker2f011902009-06-06 23:10:43 +0200893
Kan Liangebebbf02018-01-18 13:26:31 -0800894 if (overwrite)
895 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
896
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -0200897 for (i = 0; i < top->evlist->nr_mmaps; i++)
898 perf_top__mmap_read_idx(top, i);
Kan Liangebebbf02018-01-18 13:26:31 -0800899
900 if (overwrite) {
901 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
902 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
903 }
Frederic Weisbecker2f011902009-06-06 23:10:43 +0200904}
905
Kan Liang63878a52018-01-18 13:26:26 -0800906/*
907 * Check per-event overwrite term.
908 * perf top should support consistent term for all events.
909 * - All events don't have per-event term
910 * E.g. "cpu/cpu-cycles/,cpu/instructions/"
911 * Nothing change, return 0.
912 * - All events have same per-event term
913 * E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/
914 * Using the per-event setting to replace the opts->overwrite if
915 * they are different, then return 0.
916 * - Events have different per-event term
917 * E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/"
918 * Return -1
919 * - Some of the event set per-event term, but some not.
920 * E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/"
921 * Return -1
922 */
923static int perf_top__overwrite_check(struct perf_top *top)
924{
925 struct record_opts *opts = &top->record_opts;
Jiri Olsa63503db2019-07-21 13:23:52 +0200926 struct evlist *evlist = top->evlist;
Kan Liang63878a52018-01-18 13:26:26 -0800927 struct perf_evsel_config_term *term;
928 struct list_head *config_terms;
Jiri Olsa32dcd022019-07-21 13:23:51 +0200929 struct evsel *evsel;
Kan Liang63878a52018-01-18 13:26:26 -0800930 int set, overwrite = -1;
931
932 evlist__for_each_entry(evlist, evsel) {
933 set = -1;
934 config_terms = &evsel->config_terms;
935 list_for_each_entry(term, config_terms, list) {
936 if (term->type == PERF_EVSEL__CONFIG_TERM_OVERWRITE)
937 set = term->val.overwrite ? 1 : 0;
938 }
939
940 /* no term for current and previous event (likely) */
941 if ((overwrite < 0) && (set < 0))
942 continue;
943
944 /* has term for both current and previous event, compare */
945 if ((overwrite >= 0) && (set >= 0) && (overwrite != set))
946 return -1;
947
948 /* no term for current event but has term for previous one */
949 if ((overwrite >= 0) && (set < 0))
950 return -1;
951
952 /* has term for current event */
953 if ((overwrite < 0) && (set >= 0)) {
954 /* if it's first event, set overwrite */
955 if (evsel == perf_evlist__first(evlist))
956 overwrite = set;
957 else
958 return -1;
959 }
960 }
961
962 if ((overwrite >= 0) && (opts->overwrite != overwrite))
963 opts->overwrite = overwrite;
964
965 return 0;
966}
967
Kan Liang204721d2018-01-18 13:26:28 -0800968static int perf_top_overwrite_fallback(struct perf_top *top,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200969 struct evsel *evsel)
Kan Liang204721d2018-01-18 13:26:28 -0800970{
971 struct record_opts *opts = &top->record_opts;
Jiri Olsa63503db2019-07-21 13:23:52 +0200972 struct evlist *evlist = top->evlist;
Jiri Olsa32dcd022019-07-21 13:23:51 +0200973 struct evsel *counter;
Kan Liang204721d2018-01-18 13:26:28 -0800974
975 if (!opts->overwrite)
976 return 0;
977
978 /* only fall back when first event fails */
979 if (evsel != perf_evlist__first(evlist))
980 return 0;
981
982 evlist__for_each_entry(evlist, counter)
Jiri Olsa1fc632c2019-07-21 13:24:29 +0200983 counter->core.attr.write_backward = false;
Kan Liang204721d2018-01-18 13:26:28 -0800984 opts->overwrite = false;
Kan Liang853745f2018-02-26 10:17:10 -0800985 pr_debug2("fall back to non-overwrite mode\n");
Kan Liang204721d2018-01-18 13:26:28 -0800986 return 1;
987}
988
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -0300989static int perf_top__start_counters(struct perf_top *top)
Arnaldo Carvalho de Melo72cb7012011-01-12 10:52:47 -0200990{
Arnaldo Carvalho de Melod6195a62017-02-13 16:45:24 -0300991 char msg[BUFSIZ];
Jiri Olsa32dcd022019-07-21 13:23:51 +0200992 struct evsel *counter;
Jiri Olsa63503db2019-07-21 13:23:52 +0200993 struct evlist *evlist = top->evlist;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300994 struct record_opts *opts = &top->record_opts;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200995
Kan Liang63878a52018-01-18 13:26:26 -0800996 if (perf_top__overwrite_check(top)) {
997 ui__error("perf top only support consistent per-event "
998 "overwrite setting for all events\n");
999 goto out_err;
1000 }
1001
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -03001002 perf_evlist__config(evlist, opts, &callchain_param);
Arnaldo Carvalho de Melo72cb7012011-01-12 10:52:47 -02001003
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001004 evlist__for_each_entry(evlist, counter) {
Arnaldo Carvalho de Melo72cb7012011-01-12 10:52:47 -02001005try_again:
Jiri Olsaf72f9012019-07-21 13:24:41 +02001006 if (evsel__open(counter, top->evlist->core.cpus,
Jiri Olsa03617c22019-07-21 13:24:42 +02001007 top->evlist->core.threads) < 0) {
Kan Liang204721d2018-01-18 13:26:28 -08001008
1009 /*
1010 * Specially handle overwrite fall back.
1011 * Because perf top is the only tool which has
1012 * overwrite mode by default, support
1013 * both overwrite and non-overwrite mode, and
1014 * require consistent mode for all events.
1015 *
1016 * May move it to generic code with more tools
1017 * have similar attribute.
1018 */
1019 if (perf_missing_features.write_backward &&
1020 perf_top_overwrite_fallback(top, counter))
1021 goto try_again;
1022
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03001023 if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
Namhyung Kimbb963e12017-02-17 17:17:38 +09001024 if (verbose > 0)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03001025 ui__warning("%s\n", msg);
Arnaldo Carvalho de Melo72cb7012011-01-12 10:52:47 -02001026 goto try_again;
1027 }
Arnaldo Carvalho de Meloc286c412011-03-28 09:50:11 -03001028
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03001029 perf_evsel__open_strerror(counter, &opts->target,
1030 errno, msg, sizeof(msg));
1031 ui__error("%s\n", msg);
Arnaldo Carvalho de Meloc286c412011-03-28 09:50:11 -03001032 goto out_err;
Arnaldo Carvalho de Melo72cb7012011-01-12 10:52:47 -02001033 }
Arnaldo Carvalho de Melo72cb7012011-01-12 10:52:47 -02001034 }
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -02001035
Wang Nanf74b9d3a2017-12-03 02:00:37 +00001036 if (perf_evlist__mmap(evlist, opts->mmap_pages) < 0) {
Namhyung Kim3780f482012-05-29 13:22:57 +09001037 ui__error("Failed to mmap with %d (%s)\n",
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001038 errno, str_error_r(errno, msg, sizeof(msg)));
Arnaldo Carvalho de Meloc286c412011-03-28 09:50:11 -03001039 goto out_err;
1040 }
1041
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -03001042 return 0;
Arnaldo Carvalho de Meloc286c412011-03-28 09:50:11 -03001043
1044out_err:
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -03001045 return -1;
Arnaldo Carvalho de Melo72cb7012011-01-12 10:52:47 -02001046}
1047
Arnaldo Carvalho de Meloe3815262016-04-18 12:30:16 -03001048static int callchain_param__setup_sample_type(struct callchain_param *callchain)
Arnaldo Carvalho de Melo19d4ac32011-10-05 19:30:22 -03001049{
Arnaldo Carvalho de Melof2e14cd2019-01-08 10:56:59 -03001050 if (callchain->mode != CHAIN_NONE) {
Arnaldo Carvalho de Meloe3815262016-04-18 12:30:16 -03001051 if (callchain_register_param(callchain) < 0) {
Namhyung Kim3780f482012-05-29 13:22:57 +09001052 ui__error("Can't register callchain params.\n");
Arnaldo Carvalho de Melo19d4ac32011-10-05 19:30:22 -03001053 return -EINVAL;
1054 }
1055 }
1056
1057 return 0;
1058}
1059
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001060static struct ordered_events *rotate_queues(struct perf_top *top)
1061{
1062 struct ordered_events *in = top->qe.in;
1063
1064 if (top->qe.in == &top->qe.data[1])
1065 top->qe.in = &top->qe.data[0];
1066 else
1067 top->qe.in = &top->qe.data[1];
1068
1069 return in;
1070}
1071
1072static void *process_thread(void *arg)
1073{
1074 struct perf_top *top = arg;
1075
1076 while (!done) {
1077 struct ordered_events *out, *in = top->qe.in;
1078
1079 if (!in->nr_events) {
1080 usleep(100);
1081 continue;
1082 }
1083
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001084 out = rotate_queues(top);
Jiri Olsa94ad6e72018-11-05 21:23:40 +01001085
1086 pthread_mutex_lock(&top->qe.mutex);
1087 top->qe.rotate = true;
1088 pthread_cond_wait(&top->qe.cond, &top->qe.mutex);
1089 pthread_mutex_unlock(&top->qe.mutex);
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001090
1091 if (ordered_events__flush(out, OE_FLUSH__TOP))
1092 pr_err("failed to process events\n");
1093 }
1094
1095 return NULL;
1096}
1097
Jiri Olsad63b9f62018-11-11 19:52:06 +01001098/*
1099 * Allow only 'top->delay_secs' seconds behind samples.
1100 */
1101static int should_drop(struct ordered_event *qevent, struct perf_top *top)
1102{
1103 union perf_event *event = qevent->event;
1104 u64 delay_timestamp;
1105
1106 if (event->header.type != PERF_RECORD_SAMPLE)
1107 return false;
1108
1109 delay_timestamp = qevent->timestamp + top->delay_secs * NSEC_PER_SEC;
1110 return delay_timestamp < last_timestamp;
1111}
1112
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001113static int deliver_event(struct ordered_events *qe,
1114 struct ordered_event *qevent)
1115{
1116 struct perf_top *top = qe->data;
Jiri Olsa63503db2019-07-21 13:23:52 +02001117 struct evlist *evlist = top->evlist;
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001118 struct perf_session *session = top->session;
1119 union perf_event *event = qevent->event;
1120 struct perf_sample sample;
Jiri Olsa32dcd022019-07-21 13:23:51 +02001121 struct evsel *evsel;
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001122 struct machine *machine;
1123 int ret = -1;
1124
Jiri Olsa97f7e0b2018-11-11 20:02:46 +01001125 if (should_drop(qevent, top)) {
1126 top->drop++;
1127 top->drop_total++;
Jiri Olsad63b9f62018-11-11 19:52:06 +01001128 return 0;
Jiri Olsa97f7e0b2018-11-11 20:02:46 +01001129 }
Jiri Olsad63b9f62018-11-11 19:52:06 +01001130
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001131 ret = perf_evlist__parse_sample(evlist, event, &sample);
1132 if (ret) {
1133 pr_err("Can't parse sample, err = %d\n", ret);
1134 goto next_event;
1135 }
1136
1137 evsel = perf_evlist__id2evsel(session->evlist, sample.id);
1138 assert(evsel != NULL);
1139
Arnaldo Carvalho de Melo2f53ae32019-08-15 16:03:26 -03001140 if (event->header.type == PERF_RECORD_SAMPLE) {
1141 if (evswitch__discard(&top->evswitch, evsel))
1142 return 0;
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001143 ++top->samples;
Arnaldo Carvalho de Melo2f53ae32019-08-15 16:03:26 -03001144 }
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001145
1146 switch (sample.cpumode) {
1147 case PERF_RECORD_MISC_USER:
1148 ++top->us_samples;
1149 if (top->hide_user_symbols)
1150 goto next_event;
1151 machine = &session->machines.host;
1152 break;
1153 case PERF_RECORD_MISC_KERNEL:
1154 ++top->kernel_samples;
1155 if (top->hide_kernel_symbols)
1156 goto next_event;
1157 machine = &session->machines.host;
1158 break;
1159 case PERF_RECORD_MISC_GUEST_KERNEL:
1160 ++top->guest_kernel_samples;
1161 machine = perf_session__find_machine(session,
1162 sample.pid);
1163 break;
1164 case PERF_RECORD_MISC_GUEST_USER:
1165 ++top->guest_us_samples;
1166 /*
1167 * TODO: we don't process guest user from host side
1168 * except simple counting.
1169 */
1170 goto next_event;
1171 default:
1172 if (event->header.type == PERF_RECORD_SAMPLE)
1173 goto next_event;
1174 machine = &session->machines.host;
1175 break;
1176 }
1177
1178 if (event->header.type == PERF_RECORD_SAMPLE) {
1179 perf_event__process_sample(&top->tool, event, evsel,
1180 &sample, machine);
1181 } else if (event->header.type == PERF_RECORD_LOST) {
1182 perf_top__process_lost(top, event, evsel);
1183 } else if (event->header.type == PERF_RECORD_LOST_SAMPLES) {
1184 perf_top__process_lost_samples(top, event, evsel);
1185 } else if (event->header.type < PERF_RECORD_MAX) {
1186 hists__inc_nr_events(evsel__hists(evsel), event->header.type);
1187 machine__process_event(machine, event, &sample);
1188 } else
1189 ++session->evlist->stats.nr_unknown_events;
1190
1191 ret = 0;
1192next_event:
1193 return ret;
1194}
1195
1196static void init_process_thread(struct perf_top *top)
1197{
1198 ordered_events__init(&top->qe.data[0], deliver_event, top);
1199 ordered_events__init(&top->qe.data[1], deliver_event, top);
1200 ordered_events__set_copy_on_queue(&top->qe.data[0], true);
1201 ordered_events__set_copy_on_queue(&top->qe.data[1], true);
1202 top->qe.in = &top->qe.data[0];
Jiri Olsa94ad6e72018-11-05 21:23:40 +01001203 pthread_mutex_init(&top->qe.mutex, NULL);
1204 pthread_cond_init(&top->qe.cond, NULL);
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001205}
1206
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001207static int __cmd_top(struct perf_top *top)
Ingo Molnar716c69f2009-06-07 17:31:52 +02001208{
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001209 struct record_opts *opts = &top->record_opts;
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001210 pthread_t thread, thread_process;
Arnaldo Carvalho de Melo19d4ac32011-10-05 19:30:22 -03001211 int ret;
Jiri Olsaf5fc14122013-10-15 16:27:32 +02001212
Arnaldo Carvalho de Melof178fd22018-05-28 14:24:45 -03001213 if (!top->annotation_opts.objdump_path) {
1214 ret = perf_env__lookup_objdump(&top->session->header.env,
1215 &top->annotation_opts.objdump_path);
Sukadev Bhattiprolu0d3942d2013-05-14 22:56:51 -07001216 if (ret)
Changbin Du0dba9e42019-03-16 16:05:47 +08001217 return ret;
Sukadev Bhattiprolu0d3942d2013-05-14 22:56:51 -07001218 }
1219
Arnaldo Carvalho de Meloe3815262016-04-18 12:30:16 -03001220 ret = callchain_param__setup_sample_type(&callchain_param);
Arnaldo Carvalho de Melo19d4ac32011-10-05 19:30:22 -03001221 if (ret)
Changbin Du0dba9e42019-03-16 16:05:47 +08001222 return ret;
Arnaldo Carvalho de Melo19d4ac32011-10-05 19:30:22 -03001223
Masami Hiramatsu9d8b1722015-12-09 11:11:23 +09001224 if (perf_session__register_idle_thread(top->session) < 0)
Changbin Du0dba9e42019-03-16 16:05:47 +08001225 return ret;
Namhyung Kimc53d1382015-09-30 10:45:26 +09001226
Kan Liang0c6b4992017-09-29 07:47:55 -07001227 if (top->nr_threads_synthesize > 1)
1228 perf_set_multithreaded();
Kan Liang340b47f2017-09-29 07:47:54 -07001229
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001230 init_process_thread(top);
1231
Namhyung Kima0c0a4a2019-05-22 14:32:50 +09001232 if (opts->record_namespaces)
1233 top->tool.namespace_events = true;
1234
Song Liue5416952019-03-11 22:30:41 -07001235 ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
Arnaldo Carvalho de Meloa40b95b2019-01-17 08:15:20 -08001236 &top->session->machines.host,
1237 &top->record_opts);
1238 if (ret < 0)
Arnaldo Carvalho de Melo2d45ef72019-05-20 11:04:08 -03001239 pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n");
Arnaldo Carvalho de Meloa40b95b2019-01-17 08:15:20 -08001240
Arnaldo Carvalho de Meloa33fbd52013-11-11 11:36:12 -03001241 machine__synthesize_threads(&top->session->machines.host, &opts->target,
Jiri Olsa03617c22019-07-21 13:24:42 +02001242 top->evlist->core.threads, false,
Kan Liang0c6b4992017-09-29 07:47:55 -07001243 top->nr_threads_synthesize);
Kan Liang340b47f2017-09-29 07:47:54 -07001244
Kan Liang0c6b4992017-09-29 07:47:55 -07001245 if (top->nr_threads_synthesize > 1)
1246 perf_set_singlethreaded();
Kan Liang2e7ea3a2015-09-04 10:45:43 -04001247
Jiri Olsa35a634f2016-05-03 13:54:46 +02001248 if (perf_hpp_list.socket) {
Kan Liang2e7ea3a2015-09-04 10:45:43 -04001249 ret = perf_env__read_cpu_topology_map(&perf_env);
Changbin Du0dba9e42019-03-16 16:05:47 +08001250 if (ret < 0) {
1251 char errbuf[BUFSIZ];
1252 const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
1253
1254 ui__error("Could not read the CPU topology map: %s\n", err);
1255 return ret;
1256 }
Kan Liang2e7ea3a2015-09-04 10:45:43 -04001257 }
1258
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -03001259 ret = perf_top__start_counters(top);
1260 if (ret)
Changbin Du0dba9e42019-03-16 16:05:47 +08001261 return ret;
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -03001262
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001263 top->session->evlist = top->evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -03001264 perf_session__set_id_hdr_size(top->session);
Ingo Molnar07800602009-04-20 15:00:56 +02001265
Arnaldo Carvalho de Melo2376c672012-12-11 16:48:41 -03001266 /*
1267 * When perf is starting the traced process, all the events (apart from
1268 * group members) have enable_on_exec=1 set, so don't spoil it by
1269 * prematurely enabling them.
1270 *
1271 * XXX 'top' still doesn't start workloads like record, trace, but should,
1272 * so leave the check here.
1273 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001274 if (!target__none(&opts->target))
Jiri Olsa1c87f162019-07-21 13:24:08 +02001275 evlist__enable(top->evlist);
Arnaldo Carvalho de Melo2376c672012-12-11 16:48:41 -03001276
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -03001277 ret = -1;
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001278 if (pthread_create(&thread_process, NULL, process_thread, top)) {
1279 ui__error("Could not create process thread.\n");
Changbin Du0dba9e42019-03-16 16:05:47 +08001280 return ret;
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001281 }
1282
Arnaldo Carvalho de Meloc0443df2011-01-31 18:19:33 -02001283 if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001284 display_thread), top)) {
Namhyung Kim3780f482012-05-29 13:22:57 +09001285 ui__error("Could not create display thread.\n");
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001286 goto out_join_thread;
Ingo Molnar07800602009-04-20 15:00:56 +02001287 }
1288
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001289 if (top->realtime_prio) {
Ingo Molnar07800602009-04-20 15:00:56 +02001290 struct sched_param param;
1291
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001292 param.sched_priority = top->realtime_prio;
Ingo Molnar07800602009-04-20 15:00:56 +02001293 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Namhyung Kim3780f482012-05-29 13:22:57 +09001294 ui__error("Could not set realtime priority.\n");
Jiri Olsaae256fa2014-08-11 10:50:05 +02001295 goto out_join;
Ingo Molnar07800602009-04-20 15:00:56 +02001296 }
1297 }
1298
David Millerff27a062018-10-30 22:30:03 -07001299 /* Wait for a minimal set of events before starting the snapshot */
1300 perf_evlist__poll(top->evlist, 100);
1301
1302 perf_top__mmap_read(top);
1303
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -03001304 while (!done) {
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001305 u64 hits = top->samples;
Ingo Molnar07800602009-04-20 15:00:56 +02001306
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001307 perf_top__mmap_read(top);
Ingo Molnar07800602009-04-20 15:00:56 +02001308
Kan Liangebebbf02018-01-18 13:26:31 -08001309 if (opts->overwrite || (hits == top->samples))
Arnaldo Carvalho de Melof66a889d2014-08-18 17:25:59 -03001310 ret = perf_evlist__poll(top->evlist, 100);
Jiri Olsab135e5e2017-11-14 10:23:39 +01001311
1312 if (resize) {
1313 perf_top__resize(top);
1314 resize = 0;
1315 }
Ingo Molnar07800602009-04-20 15:00:56 +02001316 }
1317
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -03001318 ret = 0;
Jiri Olsaae256fa2014-08-11 10:50:05 +02001319out_join:
1320 pthread_join(thread, NULL);
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001321out_join_thread:
Jiri Olsa94ad6e72018-11-05 21:23:40 +01001322 pthread_cond_signal(&top->qe.cond);
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001323 pthread_join(thread_process, NULL);
Arnaldo Carvalho de Melo11859e82013-01-30 13:25:53 -03001324 return ret;
Arnaldo Carvalho de Melo19d4ac32011-10-05 19:30:22 -03001325}
1326
1327static int
Jiri Olsaae779a62013-10-26 16:25:34 +02001328callchain_opt(const struct option *opt, const char *arg, int unset)
1329{
1330 symbol_conf.use_callchain = true;
1331 return record_callchain_opt(opt, arg, unset);
1332}
1333
1334static int
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001335parse_callchain_opt(const struct option *opt, const char *arg, int unset)
Arnaldo Carvalho de Melo19d4ac32011-10-05 19:30:22 -03001336{
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001337 struct callchain_param *callchain = opt->value;
Namhyung Kima2c10d32015-10-22 15:28:49 +09001338
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001339 callchain->enabled = !unset;
1340 callchain->record_mode = CALLCHAIN_FP;
Namhyung Kima2c10d32015-10-22 15:28:49 +09001341
1342 /*
1343 * --no-call-graph
1344 */
1345 if (unset) {
1346 symbol_conf.use_callchain = false;
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001347 callchain->record_mode = CALLCHAIN_NONE;
Namhyung Kima2c10d32015-10-22 15:28:49 +09001348 return 0;
1349 }
1350
1351 return parse_callchain_top_opt(arg);
Ingo Molnar07800602009-04-20 15:00:56 +02001352}
Ingo Molnarb456bae2009-05-26 09:17:18 +02001353
Wang Nanb8cbb342016-02-26 09:31:51 +00001354static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
Jiri Olsaeb853e82014-02-03 12:44:42 +01001355{
Yisheng Xiea3a4a3b2018-03-12 19:25:56 +08001356 if (!strcmp(var, "top.call-graph")) {
1357 var = "call-graph.record-mode";
1358 return perf_default_config(var, value, cb);
1359 }
Namhyung Kim104ac992013-01-22 18:09:46 +09001360 if (!strcmp(var, "top.children")) {
1361 symbol_conf.cumulate_callchain = perf_config_bool(var, value);
1362 return 0;
1363 }
Jiri Olsaeb853e82014-02-03 12:44:42 +01001364
Wang Nanb8cbb342016-02-26 09:31:51 +00001365 return 0;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001366}
1367
Namhyung Kimfa5df942013-05-14 11:09:05 +09001368static int
1369parse_percent_limit(const struct option *opt, const char *arg,
1370 int unset __maybe_unused)
1371{
1372 struct perf_top *top = opt->value;
1373
1374 top->min_percent = strtof(arg, NULL);
1375 return 0;
1376}
1377
Namhyung Kim76a26542015-10-22 23:28:32 +09001378const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
1379 "\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
Namhyung Kima2c10d32015-10-22 15:28:49 +09001380
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03001381int cmd_top(int argc, const char **argv)
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001382{
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001383 char errbuf[BUFSIZ];
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001384 struct perf_top top = {
1385 .count_filter = 5,
1386 .delay_secs = 2,
Arnaldo Carvalho de Melo2376c672012-12-11 16:48:41 -03001387 .record_opts = {
1388 .mmap_pages = UINT_MAX,
1389 .user_freq = UINT_MAX,
1390 .user_interval = ULLONG_MAX,
1391 .freq = 4000, /* 4 KHz */
Waiman Long5dbb6e82013-10-18 10:38:49 -04001392 .target = {
Arnaldo Carvalho de Melo2376c672012-12-11 16:48:41 -03001393 .uses_mmap = true,
1394 },
Arnaldo Carvalho de Melo218d6112018-10-29 09:47:00 -03001395 /*
1396 * FIXME: This will lose PERF_RECORD_MMAP and other metadata
1397 * when we pause, fix that and reenable. Probably using a
1398 * separate evlist with a dummy event, i.e. a non-overwrite
1399 * ring buffer just for metadata events, while PERF_RECORD_SAMPLE
1400 * stays in overwrite mode. -acme
1401 * */
1402 .overwrite = 0,
Jiri Olsa16c66bc2018-11-05 13:24:55 +01001403 .sample_time = true,
Jiri Olsa1e6db2e2019-04-15 14:53:33 +02001404 .sample_time_set = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001405 },
Arnaldo Carvalho de Melo029c75e2018-05-17 16:31:32 -03001406 .max_stack = sysctl__max_stack(),
Arnaldo Carvalho de Melo982d4102018-05-25 17:28:37 -03001407 .annotation_opts = annotation__default_options,
Kan Liang0c6b4992017-09-29 07:47:55 -07001408 .nr_threads_synthesize = UINT_MAX,
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001409 };
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001410 struct record_opts *opts = &top.record_opts;
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001411 struct target *target = &opts->target;
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001412 const struct option options[] = {
Arnaldo Carvalho de Melo8c3e10e2011-01-31 14:50:39 -02001413 OPT_CALLBACK('e', "event", &top.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001414 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001415 parse_events_option),
Arnaldo Carvalho de Melo2376c672012-12-11 16:48:41 -03001416 OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
1417 OPT_STRING('p', "pid", &target->pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001418 "profile events on existing process id"),
Arnaldo Carvalho de Melo2376c672012-12-11 16:48:41 -03001419 OPT_STRING('t', "tid", &target->tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001420 "profile events on existing thread id"),
Arnaldo Carvalho de Melo2376c672012-12-11 16:48:41 -03001421 OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
Ingo Molnarb456bae2009-05-26 09:17:18 +02001422 "system-wide collection from all CPUs"),
Arnaldo Carvalho de Melo2376c672012-12-11 16:48:41 -03001423 OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001424 "list of cpus to monitor"),
Arnaldo Carvalho de Melob32d1332009-11-24 12:05:15 -02001425 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1426 "file", "vmlinux pathname"),
Willy Tarreaufc2be692013-09-14 10:32:59 +02001427 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
1428 "don't load vmlinux even if found"),
Arnaldo Carvalho de Melo1b3aae92018-11-27 10:31:03 -03001429 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1430 "file", "kallsyms pathname"),
Arnaldo Carvalho de Melo8c3e10e2011-01-31 14:50:39 -02001431 OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
Arnaldo Carvalho de Melo8ffcda12009-11-16 21:45:24 -02001432 "hide kernel symbols"),
Jiri Olsa994a1f72013-09-01 12:36:12 +02001433 OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
1434 "number of mmap data pages",
1435 perf_evlist__parse_mmap_pages),
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001436 OPT_INTEGER('r', "realtime", &top.realtime_prio,
Ingo Molnarb456bae2009-05-26 09:17:18 +02001437 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo8c3e10e2011-01-31 14:50:39 -02001438 OPT_INTEGER('d', "delay", &top.delay_secs,
Ingo Molnarb456bae2009-05-26 09:17:18 +02001439 "number of seconds to delay between refreshes"),
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001440 OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
Ingo Molnarb456bae2009-05-26 09:17:18 +02001441 "dump the symbol table used for profiling"),
Arnaldo Carvalho de Melo8c3e10e2011-01-31 14:50:39 -02001442 OPT_INTEGER('f', "count-filter", &top.count_filter,
Ingo Molnarb456bae2009-05-26 09:17:18 +02001443 "only display functions with more events than this"),
David Ahernbf806692013-11-14 20:51:30 -07001444 OPT_BOOLEAN(0, "group", &opts->group,
Ingo Molnarb456bae2009-05-26 09:17:18 +02001445 "put the counters into a counter group"),
Arnaldo Carvalho de Melo2376c672012-12-11 16:48:41 -03001446 OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
1447 "child tasks do not inherit counters"),
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001448 OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
Kirill Smelkov6cff0e82010-02-03 16:52:08 -02001449 "symbol to annotate"),
Arnaldo Carvalho de Melo2376c672012-12-11 16:48:41 -03001450 OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
Arnaldo Carvalho de Melo7831bf22018-03-01 14:25:56 -03001451 OPT_CALLBACK('F', "freq", &top.record_opts, "freq or 'max'",
1452 "profile at this frequency",
1453 record__parse_freq),
Arnaldo Carvalho de Melo8c3e10e2011-01-31 14:50:39 -02001454 OPT_INTEGER('E', "entries", &top.print_entries,
Ingo Molnar6e53cdf2009-06-04 08:53:05 +02001455 "display this many functions"),
Arnaldo Carvalho de Melo8c3e10e2011-01-31 14:50:39 -02001456 OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
Arnaldo Carvalho de Melo8ffcda12009-11-16 21:45:24 -02001457 "hide user symbols"),
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001458 OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
1459 OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
Ian Munsiec0555642010-04-13 18:37:33 +10001460 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02001461 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -03001462 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
Namhyung Kima2ce0672014-03-04 09:06:42 +09001463 "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
1464 " Please refer the man page for the complete list."),
Namhyung Kim6fe8c262014-03-04 11:01:41 +09001465 OPT_STRING(0, "fields", &field_order, "key[,keys...]",
1466 "output field(s): overhead, period, sample plus all of sort keys"),
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -03001467 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1468 "Show a column with the number of samples"),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001469 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
Namhyung Kima2c10d32015-10-22 15:28:49 +09001470 NULL, "enables call-graph recording and display",
Jiri Olsaae779a62013-10-26 16:25:34 +02001471 &callchain_opt),
Arnaldo Carvalho de Melo2ddd5c02016-04-18 12:09:08 -03001472 OPT_CALLBACK(0, "call-graph", &callchain_param,
Namhyung Kim76a26542015-10-22 23:28:32 +09001473 "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
Namhyung Kima2c10d32015-10-22 15:28:49 +09001474 top_callchain_help, &parse_callchain_opt),
Namhyung Kim1432ec32013-10-30 17:05:55 +09001475 OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
1476 "Accumulate callchains of children and show total overhead as well"),
Waiman Long5dbb6e82013-10-18 10:38:49 -04001477 OPT_INTEGER(0, "max-stack", &top.max_stack,
1478 "Set the maximum stack depth when parsing the callchain. "
Arnaldo Carvalho de Melo4cb93442016-04-27 10:16:24 -03001479 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
Greg Priceb21484f2012-12-06 21:48:05 -08001480 OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
1481 "ignore callees of these functions in call graphs",
1482 report_parse_ignore_callees_opt),
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -03001483 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1484 "Show a column with the sum of periods"),
1485 OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
1486 "only consider symbols in these dsos"),
1487 OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
1488 "only consider symbols in these comms"),
1489 OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
1490 "only consider these symbols"),
Arnaldo Carvalho de Melo1eddd9e2018-05-28 11:42:59 -03001491 OPT_BOOLEAN(0, "source", &top.annotation_opts.annotate_src,
Arnaldo Carvalho de Melo64c6f0c2011-10-06 12:48:31 -03001492 "Interleave source code with assembly code (default)"),
Arnaldo Carvalho de Melo1eddd9e2018-05-28 11:42:59 -03001493 OPT_BOOLEAN(0, "asm-raw", &top.annotation_opts.show_asm_raw,
Arnaldo Carvalho de Melo64c6f0c2011-10-06 12:48:31 -03001494 "Display raw encoding of assembly instructions (default)"),
Avi Kivity763122a2014-09-13 07:15:05 +03001495 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1496 "Enable kernel symbol demangling"),
Song Liuee7a1122019-03-11 22:30:46 -07001497 OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
Arnaldo Carvalho de Melof178fd22018-05-28 14:24:45 -03001498 OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
Sukadev Bhattiprolu0d3942d2013-05-14 22:56:51 -07001499 "objdump binary to use for disassembly and annotations"),
Arnaldo Carvalho de Meloa47e8432018-05-28 11:50:21 -03001500 OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
Arnaldo Carvalho de Melo64c6f0c2011-10-06 12:48:31 -03001501 "Specify disassembler style (e.g. -M intel for intel syntax)"),
Arnaldo Carvalho de Melo2376c672012-12-11 16:48:41 -03001502 OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
Namhyung Kimfa5df942013-05-14 11:09:05 +09001503 OPT_CALLBACK(0, "percent-limit", &top, "percent",
1504 "Don't show entries under that percent", parse_percent_limit),
Namhyung Kim33db4562014-02-07 12:06:07 +09001505 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
1506 "How to display percentage of filtered entries", parse_filter_percentage),
Namhyung Kimcf590022014-07-31 14:47:39 +09001507 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
1508 "width[,width...]",
1509 "don't try to adjust column width, use these fixed values"),
Mark Drayton3fcb10e2018-12-04 12:34:20 -08001510 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
Kan Liang9d9cad72015-06-17 09:51:11 -04001511 "per thread proc mmap processing timeout in ms"),
Andi Kleena18b027e2015-07-18 08:24:52 -07001512 OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
1513 "branch any", "sample any taken branches",
1514 parse_branch_stack),
1515 OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
1516 "branch filter mask", "branch stack filter modes",
1517 parse_branch_stack),
Namhyung Kim053a3982015-12-23 02:07:05 +09001518 OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
1519 "Show raw trace event output (do not use print fmt or plugins)"),
Namhyung Kimc92fcfd2016-02-25 00:13:50 +09001520 OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
1521 "Show entries in a hierarchy"),
Arnaldo Carvalho de Melo4e303fb2018-10-26 15:55:23 -03001522 OPT_BOOLEAN(0, "overwrite", &top.record_opts.overwrite,
Arnaldo Carvalho de Melo218d6112018-10-29 09:47:00 -03001523 "Use a backward ring buffer, default: no"),
Krister Johansen868a8322017-07-05 18:48:12 -07001524 OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
Kan Liang0c6b4992017-09-29 07:47:55 -07001525 OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
1526 "number of thread to run event synthesize"),
Namhyung Kima0c0a4a2019-05-22 14:32:50 +09001527 OPT_BOOLEAN(0, "namespaces", &opts->record_namespaces,
1528 "Record namespaces events"),
Arnaldo Carvalho de Melo2f53ae32019-08-15 16:03:26 -03001529 OPTS_EVSWITCH(&top.evswitch),
Ingo Molnarb456bae2009-05-26 09:17:18 +02001530 OPT_END()
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001531 };
Jiri Olsa63503db2019-07-21 13:23:52 +02001532 struct evlist *sb_evlist = NULL;
Arnaldo Carvalho de Melobe772842012-10-01 15:20:58 -03001533 const char * const top_usage[] = {
1534 "perf top [<options>]",
1535 NULL
1536 };
Arnaldo Carvalho de Meloa635fc52014-10-09 16:16:00 -03001537 int status = hists__init();
1538
1539 if (status < 0)
1540 return status;
Ingo Molnarb456bae2009-05-26 09:17:18 +02001541
Arnaldo Carvalho de Melo982d4102018-05-25 17:28:37 -03001542 top.annotation_opts.min_pcnt = 5;
1543 top.annotation_opts.context = 4;
1544
Jiri Olsa0f98b112019-07-21 13:23:55 +02001545 top.evlist = evlist__new();
Arnaldo Carvalho de Melo8c3e10e2011-01-31 14:50:39 -02001546 if (top.evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02001547 return -ENOMEM;
1548
Arnaldo Carvalho de Meloecc4c562017-01-24 13:44:10 -03001549 status = perf_config(perf_top_config, &top);
1550 if (status)
1551 return status;
Jiri Olsaeb853e82014-02-03 12:44:42 +01001552
Ingo Molnarb456bae2009-05-26 09:17:18 +02001553 argc = parse_options(argc, argv, options, top_usage, 0);
1554 if (argc)
1555 usage_with_options(top_usage, options);
1556
Jiri Olsa6484d2f2019-07-21 13:24:28 +02001557 if (!top.evlist->core.nr_entries &&
Namhyung Kim54f8f402015-12-23 02:07:01 +09001558 perf_evlist__add_default(top.evlist) < 0) {
1559 pr_err("Not enough memory for event selector list\n");
1560 goto out_delete_evlist;
1561 }
1562
Arnaldo Carvalho de Melo2f53ae32019-08-15 16:03:26 -03001563 status = evswitch__init(&top.evswitch, top.evlist, stderr);
1564 if (status)
1565 goto out_delete_evlist;
1566
Namhyung Kimc92fcfd2016-02-25 00:13:50 +09001567 if (symbol_conf.report_hierarchy) {
1568 /* disable incompatible options */
1569 symbol_conf.event_group = false;
1570 symbol_conf.cumulate_callchain = false;
1571
1572 if (field_order) {
1573 pr_err("Error: --hierarchy and --fields options cannot be used together\n");
1574 parse_options_usage(top_usage, options, "fields", 0);
1575 parse_options_usage(NULL, options, "hierarchy", 0);
1576 goto out_delete_evlist;
1577 }
1578 }
1579
Jin Yao590ac602018-10-31 19:06:35 +08001580 if (opts->branch_stack && callchain_param.enabled)
1581 symbol_conf.show_branchflag_count = true;
1582
Namhyung Kim512ae1b2014-03-18 11:31:39 +09001583 sort__mode = SORT_MODE__TOP;
Namhyung Kim3a5714f2013-05-14 11:09:01 +09001584 /* display thread wants entries to be collapsed in a different tree */
Jiri Olsa52225032016-05-03 13:54:42 +02001585 perf_hpp_list.need_collapse = 1;
Namhyung Kim3a5714f2013-05-14 11:09:01 +09001586
Arnaldo Carvalho de Melo3ee60c32016-01-18 10:24:06 +01001587 if (top.use_stdio)
1588 use_browser = 0;
1589 else if (top.use_tui)
1590 use_browser = 1;
1591
1592 setup_browser(false);
1593
Namhyung Kim40184c42015-12-23 02:07:01 +09001594 if (setup_sorting(top.evlist) < 0) {
Namhyung Kim6fe8c262014-03-04 11:01:41 +09001595 if (sort_order)
1596 parse_options_usage(top_usage, options, "s", 1);
1597 if (field_order)
1598 parse_options_usage(sort_order ? NULL : top_usage,
1599 options, "fields", 0);
1600 goto out_delete_evlist;
1601 }
Namhyung Kim22af9692014-04-16 11:04:51 +09001602
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001603 status = target__validate(target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001604 if (status) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001605 target__strerror(target, status, errbuf, BUFSIZ);
Ingo Molnarea432a8b2013-11-13 00:26:09 +01001606 ui__warning("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001607 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09001608
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001609 status = target__parse_uid(target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001610 if (status) {
1611 int saved_errno = errno;
1612
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001613 target__strerror(target, status, errbuf, BUFSIZ);
Ingo Molnarea432a8b2013-11-13 00:26:09 +01001614 ui__error("%s\n", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001615
1616 status = -saved_errno;
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02001617 goto out_delete_evlist;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001618 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02001619
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001620 if (target__none(target))
Arnaldo Carvalho de Melo2376c672012-12-11 16:48:41 -03001621 target->system_wide = true;
Arnaldo Carvalho de Melo10b47d52012-05-07 16:33:56 -03001622
Arnaldo Carvalho de Melof8a5c0b2015-12-10 14:48:45 -03001623 if (perf_evlist__create_maps(top.evlist, target) < 0) {
1624 ui__error("Couldn't create thread/CPU maps: %s\n",
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001625 errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
Arnaldo Carvalho de Melof8a5c0b2015-12-10 14:48:45 -03001626 goto out_delete_evlist;
1627 }
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001628
Arnaldo Carvalho de Melo8c3e10e2011-01-31 14:50:39 -02001629 if (top.delay_secs < 1)
1630 top.delay_secs = 1;
Frederic Weisbecker2f335a02009-06-05 19:31:01 +02001631
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001632 if (record_opts__config(opts)) {
Arnaldo Carvalho de Melo2376c672012-12-11 16:48:41 -03001633 status = -EINVAL;
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -03001634 goto out_delete_evlist;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001635 }
1636
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001637 top.sym_evsel = perf_evlist__first(top.evlist);
Arnaldo Carvalho de Melocc841582011-01-11 15:16:52 -02001638
Arnaldo Carvalho de Meloe3815262016-04-18 12:30:16 -03001639 if (!callchain_param.enabled) {
Namhyung Kim1432ec32013-10-30 17:05:55 +09001640 symbol_conf.cumulate_callchain = false;
1641 perf_hpp__cancel_cumulate();
1642 }
1643
Namhyung Kim792aeaf2015-10-22 16:45:46 +09001644 if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
1645 callchain_param.order = ORDER_CALLER;
1646
Arnaldo Carvalho de Melob01141f2016-08-25 16:09:21 -03001647 status = symbol__annotation_init();
1648 if (status < 0)
1649 goto out_delete_evlist;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001650
Arnaldo Carvalho de Melo7f0b6fd2018-03-16 14:33:38 -03001651 annotation_config__init();
1652
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001653 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
Changbin Du70c819e2019-03-16 16:05:48 +08001654 status = symbol__init(NULL);
1655 if (status < 0)
1656 goto out_delete_evlist;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001657
Namhyung Kim08e71542013-04-03 21:26:19 +09001658 sort__setup_elide(stdout);
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -03001659
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001660 get_term_dimensions(&top.winsize);
Arnaldo Carvalho de Melo8c3e10e2011-01-31 14:50:39 -02001661 if (top.print_entries == 0) {
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001662 perf_top__update_print_entries(&top);
Jiri Olsa244a1082017-11-15 14:30:57 +01001663 signal(SIGWINCH, winch_sig);
Arnaldo Carvalho de Melo3b6ed982009-11-16 19:30:27 -02001664 }
1665
Changbin Du0dba9e42019-03-16 16:05:47 +08001666 top.session = perf_session__new(NULL, false, NULL);
1667 if (top.session == NULL) {
1668 status = -1;
1669 goto out_delete_evlist;
1670 }
1671
Song Liud56354d2019-03-11 22:30:51 -07001672 if (!top.record_opts.no_bpf_event)
1673 bpf_event__add_sb_event(&sb_evlist, &perf_env);
1674
Song Liu657ee552019-03-11 22:30:50 -07001675 if (perf_evlist__start_sb_thread(sb_evlist, target)) {
1676 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1677 opts->no_bpf_event = true;
1678 }
1679
Arnaldo Carvalho de Melo1758af12011-11-28 09:37:05 -02001680 status = __cmd_top(&top);
Arnaldo Carvalho de Melo806fb632011-11-29 08:05:52 -02001681
Song Liu657ee552019-03-11 22:30:50 -07001682 if (!opts->no_bpf_event)
1683 perf_evlist__stop_sb_thread(sb_evlist);
1684
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02001685out_delete_evlist:
Jiri Olsac12995a2019-07-21 13:23:56 +02001686 evlist__delete(top.evlist);
Changbin Du0dba9e42019-03-16 16:05:47 +08001687 perf_session__delete(top.session);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001688
1689 return status;
Ingo Molnarb456bae2009-05-26 09:17:18 +02001690}