blob: 7e0fa628e9abd34f5ea00117de337f3a53474be2 [file] [log] [blame]
Arnaldo Carvalho de Melo78f7def2011-02-04 09:45:46 -02001#include "annotate.h"
Frederic Weisbecker8a0ecfb2010-05-13 19:47:16 +02002#include "util.h"
Frederic Weisbecker598357e2010-05-21 12:48:39 +02003#include "build-id.h"
John Kacur3d1d07e2009-09-28 15:32:55 +02004#include "hist.h"
Arnaldo Carvalho de Melo4e4f06e2009-12-14 13:10:39 -02005#include "session.h"
6#include "sort.h"
Namhyung Kim29d720e2013-01-22 18:09:33 +09007#include "evsel.h"
Arnaldo Carvalho de Melo9b338272009-12-16 14:31:49 -02008#include <math.h>
John Kacur3d1d07e2009-09-28 15:32:55 +02009
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -020010static bool hists__filter_entry_by_dso(struct hists *hists,
11 struct hist_entry *he);
12static bool hists__filter_entry_by_thread(struct hists *hists,
13 struct hist_entry *he);
Namhyung Kime94d53e2012-03-16 17:50:51 +090014static bool hists__filter_entry_by_symbol(struct hists *hists,
15 struct hist_entry *he);
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -020016
Arnaldo Carvalho de Melo7a007ca2010-07-21 09:19:41 -030017enum hist_filter {
18 HIST_FILTER__DSO,
19 HIST_FILTER__THREAD,
20 HIST_FILTER__PARENT,
Namhyung Kime94d53e2012-03-16 17:50:51 +090021 HIST_FILTER__SYMBOL,
Arnaldo Carvalho de Melo7a007ca2010-07-21 09:19:41 -030022};
23
John Kacur3d1d07e2009-09-28 15:32:55 +020024struct callchain_param callchain_param = {
25 .mode = CHAIN_GRAPH_REL,
Sam Liaod797fdc2011-06-07 23:49:46 +080026 .min_percent = 0.5,
27 .order = ORDER_CALLEE
John Kacur3d1d07e2009-09-28 15:32:55 +020028};
29
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030030u16 hists__col_len(struct hists *hists, enum hist_column col)
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030031{
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030032 return hists->col_len[col];
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030033}
34
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030035void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030036{
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030037 hists->col_len[col] = len;
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030038}
39
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030040bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030041{
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030042 if (len > hists__col_len(hists, col)) {
43 hists__set_col_len(hists, col, len);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030044 return true;
45 }
46 return false;
47}
48
Namhyung Kim7ccf4f92012-08-20 13:52:05 +090049void hists__reset_col_len(struct hists *hists)
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030050{
51 enum hist_column col;
52
53 for (col = 0; col < HISTC_NR_COLS; ++col)
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030054 hists__set_col_len(hists, col, 0);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030055}
56
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +010057static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
58{
59 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
60
61 if (hists__col_len(hists, dso) < unresolved_col_width &&
62 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
63 !symbol_conf.dso_list)
64 hists__set_col_len(hists, dso, unresolved_col_width);
65}
66
Namhyung Kim7ccf4f92012-08-20 13:52:05 +090067void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030068{
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +010069 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
Stephane Eranian98a3b322013-01-24 16:10:35 +010070 int symlen;
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030071 u16 len;
72
Namhyung Kimded19d52013-04-01 20:35:19 +090073 /*
74 * +4 accounts for '[x] ' priv level info
75 * +2 accounts for 0x prefix on raw addresses
76 * +3 accounts for ' y ' symtab origin info
77 */
78 if (h->ms.sym) {
79 symlen = h->ms.sym->namelen + 4;
80 if (verbose)
81 symlen += BITS_PER_LONG / 4 + 2 + 3;
82 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
83 } else {
Stephane Eranian98a3b322013-01-24 16:10:35 +010084 symlen = unresolved_col_width + 4 + 2;
85 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +010086 hists__set_unres_dso_col_len(hists, HISTC_DSO);
Stephane Eranian98a3b322013-01-24 16:10:35 +010087 }
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030088
89 len = thread__comm_len(h->thread);
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030090 if (hists__new_col_len(hists, HISTC_COMM, len))
91 hists__set_col_len(hists, HISTC_THREAD, len + 6);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030092
93 if (h->ms.map) {
94 len = dso__name_len(h->ms.map->dso);
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030095 hists__new_col_len(hists, HISTC_DSO, len);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030096 }
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +010097
Namhyung Kimcb993742012-12-27 18:11:42 +090098 if (h->parent)
99 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
100
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100101 if (h->branch_info) {
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100102 if (h->branch_info->from.sym) {
103 symlen = (int)h->branch_info->from.sym->namelen + 4;
Namhyung Kimded19d52013-04-01 20:35:19 +0900104 if (verbose)
105 symlen += BITS_PER_LONG / 4 + 2 + 3;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100106 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
107
108 symlen = dso__name_len(h->branch_info->from.map->dso);
109 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
110 } else {
111 symlen = unresolved_col_width + 4 + 2;
112 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
113 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
114 }
115
116 if (h->branch_info->to.sym) {
117 symlen = (int)h->branch_info->to.sym->namelen + 4;
Namhyung Kimded19d52013-04-01 20:35:19 +0900118 if (verbose)
119 symlen += BITS_PER_LONG / 4 + 2 + 3;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100120 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
121
122 symlen = dso__name_len(h->branch_info->to.map->dso);
123 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
124 } else {
125 symlen = unresolved_col_width + 4 + 2;
126 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
127 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
128 }
129 }
Stephane Eranian98a3b322013-01-24 16:10:35 +0100130
131 if (h->mem_info) {
Stephane Eranian98a3b322013-01-24 16:10:35 +0100132 if (h->mem_info->daddr.sym) {
133 symlen = (int)h->mem_info->daddr.sym->namelen + 4
134 + unresolved_col_width + 2;
135 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
136 symlen);
137 } else {
138 symlen = unresolved_col_width + 4 + 2;
139 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
140 symlen);
141 }
142 if (h->mem_info->daddr.map) {
143 symlen = dso__name_len(h->mem_info->daddr.map->dso);
144 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
145 symlen);
146 } else {
147 symlen = unresolved_col_width + 4 + 2;
148 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
149 }
150 } else {
151 symlen = unresolved_col_width + 4 + 2;
152 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
153 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
154 }
155
156 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
157 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
158 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
159 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
160 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
161 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -0300162}
163
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900164void hists__output_recalc_col_len(struct hists *hists, int max_rows)
165{
166 struct rb_node *next = rb_first(&hists->entries);
167 struct hist_entry *n;
168 int row = 0;
169
170 hists__reset_col_len(hists);
171
172 while (next && row++ < max_rows) {
173 n = rb_entry(next, struct hist_entry, rb_node);
174 if (!n->filtered)
175 hists__calc_col_len(hists, n);
176 next = rb_next(&n->rb_node);
177 }
178}
179
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -0200180static void hist_entry__add_cpumode_period(struct hist_entry *he,
Arnaldo Carvalho de Meloc82ee822010-05-14 14:19:35 -0300181 unsigned int cpumode, u64 period)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800182{
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300183 switch (cpumode) {
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800184 case PERF_RECORD_MISC_KERNEL:
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900185 he->stat.period_sys += period;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800186 break;
187 case PERF_RECORD_MISC_USER:
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900188 he->stat.period_us += period;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800189 break;
190 case PERF_RECORD_MISC_GUEST_KERNEL:
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900191 he->stat.period_guest_sys += period;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800192 break;
193 case PERF_RECORD_MISC_GUEST_USER:
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900194 he->stat.period_guest_us += period;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800195 break;
196 default:
197 break;
198 }
199}
200
Andi Kleen05484292013-01-24 16:10:29 +0100201static void he_stat__add_period(struct he_stat *he_stat, u64 period,
202 u64 weight)
Namhyung Kim139c0812012-10-04 21:49:43 +0900203{
Stephane Eranian98a3b322013-01-24 16:10:35 +0100204
Namhyung Kim139c0812012-10-04 21:49:43 +0900205 he_stat->period += period;
Andi Kleen05484292013-01-24 16:10:29 +0100206 he_stat->weight += weight;
Namhyung Kim139c0812012-10-04 21:49:43 +0900207 he_stat->nr_events += 1;
208}
209
210static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
211{
212 dest->period += src->period;
213 dest->period_sys += src->period_sys;
214 dest->period_us += src->period_us;
215 dest->period_guest_sys += src->period_guest_sys;
216 dest->period_guest_us += src->period_guest_us;
217 dest->nr_events += src->nr_events;
Andi Kleen05484292013-01-24 16:10:29 +0100218 dest->weight += src->weight;
Namhyung Kim139c0812012-10-04 21:49:43 +0900219}
220
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300221static void hist_entry__decay(struct hist_entry *he)
222{
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900223 he->stat.period = (he->stat.period * 7) / 8;
224 he->stat.nr_events = (he->stat.nr_events * 7) / 8;
Andi Kleen05484292013-01-24 16:10:29 +0100225 /* XXX need decay for weight too? */
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300226}
227
228static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
229{
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900230 u64 prev_period = he->stat.period;
Arnaldo Carvalho de Meloc64550c2011-10-20 06:45:44 -0200231
232 if (prev_period == 0)
Arnaldo Carvalho de Melodf71d952011-10-13 08:01:33 -0300233 return true;
Arnaldo Carvalho de Meloc64550c2011-10-20 06:45:44 -0200234
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300235 hist_entry__decay(he);
Arnaldo Carvalho de Meloc64550c2011-10-20 06:45:44 -0200236
237 if (!he->filtered)
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900238 hists->stats.total_period -= prev_period - he->stat.period;
Arnaldo Carvalho de Meloc64550c2011-10-20 06:45:44 -0200239
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900240 return he->stat.period == 0;
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300241}
242
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900243void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300244{
245 struct rb_node *next = rb_first(&hists->entries);
246 struct hist_entry *n;
247
248 while (next) {
249 n = rb_entry(next, struct hist_entry, rb_node);
250 next = rb_next(&n->rb_node);
Arnaldo Carvalho de Melodf71d952011-10-13 08:01:33 -0300251 /*
252 * We may be annotating this, for instance, so keep it here in
253 * case some it gets new samples, we'll eventually free it when
254 * the user stops browsing and it agains gets fully decayed.
255 */
Arnaldo Carvalho de Melob079d4e2011-10-17 09:05:04 -0200256 if (((zap_user && n->level == '.') ||
257 (zap_kernel && n->level != '.') ||
258 hists__decay_entry(hists, n)) &&
259 !n->used) {
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300260 rb_erase(&n->rb_node, &hists->entries);
261
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900262 if (sort__need_collapse)
Arnaldo Carvalho de Meloab81f3fd2011-10-05 19:16:15 -0300263 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
264
265 hist_entry__free(n);
266 --hists->nr_entries;
267 }
268 }
269}
270
John Kacur3d1d07e2009-09-28 15:32:55 +0200271/*
Arnaldo Carvalho de Meloc82ee822010-05-14 14:19:35 -0300272 * histogram, sorted on item, collects periods
John Kacur3d1d07e2009-09-28 15:32:55 +0200273 */
274
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300275static struct hist_entry *hist_entry__new(struct hist_entry *template)
276{
Frederic Weisbeckerd2009c52010-08-22 20:05:22 +0200277 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100278 struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300279
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -0200280 if (he != NULL) {
281 *he = *template;
Namhyung Kimc4b35352012-10-04 21:49:42 +0900282
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -0200283 if (he->ms.map)
284 he->ms.map->referenced = true;
Stephane Eranian3cf0cb12013-01-14 15:02:45 +0100285
286 if (he->branch_info) {
Namhyung Kim26353a62013-04-01 20:35:17 +0900287 /*
288 * This branch info is (a part of) allocated from
289 * machine__resolve_bstack() and will be freed after
290 * adding new entries. So we need to save a copy.
291 */
292 he->branch_info = malloc(sizeof(*he->branch_info));
293 if (he->branch_info == NULL) {
294 free(he);
295 return NULL;
296 }
297
298 memcpy(he->branch_info, template->branch_info,
299 sizeof(*he->branch_info));
300
Stephane Eranian3cf0cb12013-01-14 15:02:45 +0100301 if (he->branch_info->from.map)
302 he->branch_info->from.map->referenced = true;
303 if (he->branch_info->to.map)
304 he->branch_info->to.map->referenced = true;
305 }
306
Stephane Eranian98a3b322013-01-24 16:10:35 +0100307 if (he->mem_info) {
308 if (he->mem_info->iaddr.map)
309 he->mem_info->iaddr.map->referenced = true;
310 if (he->mem_info->daddr.map)
311 he->mem_info->daddr.map->referenced = true;
312 }
313
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300314 if (symbol_conf.use_callchain)
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -0200315 callchain_init(he->callchain);
Arnaldo Carvalho de Melob821c732012-10-25 14:42:45 -0200316
317 INIT_LIST_HEAD(&he->pairs.node);
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300318 }
319
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -0200320 return he;
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300321}
322
Namhyung Kim66f97ed2012-12-10 17:29:56 +0900323void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -0300324{
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -0300325 if (!h->filtered) {
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300326 hists__calc_col_len(hists, h);
327 ++hists->nr_entries;
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900328 hists->stats.total_period += h->stat.period;
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -0300329 }
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -0300330}
331
Arnaldo Carvalho de Melo7a007ca2010-07-21 09:19:41 -0300332static u8 symbol__parent_filter(const struct symbol *parent)
333{
334 if (symbol_conf.exclude_other && parent == NULL)
335 return 1 << HIST_FILTER__PARENT;
336 return 0;
337}
338
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100339static struct hist_entry *add_hist_entry(struct hists *hists,
340 struct hist_entry *entry,
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -0300341 struct addr_location *al,
Andi Kleen05484292013-01-24 16:10:29 +0100342 u64 period,
343 u64 weight)
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300344{
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300345 struct rb_node **p;
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300346 struct rb_node *parent = NULL;
347 struct hist_entry *he;
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300348 int cmp;
349
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300350 pthread_mutex_lock(&hists->lock);
351
352 p = &hists->entries_in->rb_node;
353
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300354 while (*p != NULL) {
355 parent = *p;
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300356 he = rb_entry(parent, struct hist_entry, rb_node_in);
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300357
Namhyung Kim9afcf932012-12-10 17:29:54 +0900358 /*
359 * Make sure that it receives arguments in a same order as
360 * hist_entry__collapse() so that we can use an appropriate
361 * function when searching an entry regardless which sort
362 * keys were used.
363 */
364 cmp = hist_entry__cmp(he, entry);
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300365
366 if (!cmp) {
Andi Kleen05484292013-01-24 16:10:29 +0100367 he_stat__add_period(&he->stat, period, weight);
David Miller63fa4712012-03-27 03:14:18 -0400368
Namhyung Kimceb2acb2013-04-01 20:35:18 +0900369 /*
370 * This mem info was allocated from machine__resolve_mem
371 * and will not be used anymore.
372 */
373 free(entry->mem_info);
374
David Miller63fa4712012-03-27 03:14:18 -0400375 /* If the map of an existing hist_entry has
376 * become out-of-date due to an exec() or
377 * similar, update it. Otherwise we will
378 * mis-adjust symbol addresses when computing
379 * the history counter to increment.
380 */
381 if (he->ms.map != entry->ms.map) {
382 he->ms.map = entry->ms.map;
383 if (he->ms.map)
384 he->ms.map->referenced = true;
385 }
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300386 goto out;
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300387 }
388
389 if (cmp < 0)
390 p = &(*p)->rb_left;
391 else
392 p = &(*p)->rb_right;
393 }
394
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100395 he = hist_entry__new(entry);
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300396 if (!he)
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300397 goto out_unlock;
398
399 rb_link_node(&he->rb_node_in, parent, p);
400 rb_insert_color(&he->rb_node_in, hists->entries_in);
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300401out:
Arnaldo Carvalho de Meloc82ee822010-05-14 14:19:35 -0300402 hist_entry__add_cpumode_period(he, al->cpumode, period);
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300403out_unlock:
404 pthread_mutex_unlock(&hists->lock);
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300405 return he;
406}
407
Stephane Eranian98a3b322013-01-24 16:10:35 +0100408struct hist_entry *__hists__add_mem_entry(struct hists *self,
409 struct addr_location *al,
410 struct symbol *sym_parent,
411 struct mem_info *mi,
412 u64 period,
413 u64 weight)
414{
415 struct hist_entry entry = {
416 .thread = al->thread,
417 .ms = {
418 .map = al->map,
419 .sym = al->sym,
420 },
421 .stat = {
422 .period = period,
423 .weight = weight,
424 .nr_events = 1,
425 },
426 .cpu = al->cpu,
427 .ip = al->addr,
428 .level = al->level,
429 .parent = sym_parent,
430 .filtered = symbol__parent_filter(sym_parent),
431 .hists = self,
432 .mem_info = mi,
433 .branch_info = NULL,
434 };
435 return add_hist_entry(self, &entry, al, period, weight);
436}
437
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100438struct hist_entry *__hists__add_branch_entry(struct hists *self,
439 struct addr_location *al,
440 struct symbol *sym_parent,
441 struct branch_info *bi,
Andi Kleen05484292013-01-24 16:10:29 +0100442 u64 period,
443 u64 weight)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100444{
445 struct hist_entry entry = {
446 .thread = al->thread,
447 .ms = {
448 .map = bi->to.map,
449 .sym = bi->to.sym,
450 },
451 .cpu = al->cpu,
452 .ip = bi->to.addr,
453 .level = al->level,
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900454 .stat = {
455 .period = period,
Namhyung Kimc4b35352012-10-04 21:49:42 +0900456 .nr_events = 1,
Andi Kleen05484292013-01-24 16:10:29 +0100457 .weight = weight,
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900458 },
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100459 .parent = sym_parent,
460 .filtered = symbol__parent_filter(sym_parent),
461 .branch_info = bi,
Jiri Olsaae359f12012-10-04 21:49:35 +0900462 .hists = self,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100463 .mem_info = NULL,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100464 };
465
Andi Kleen05484292013-01-24 16:10:29 +0100466 return add_hist_entry(self, &entry, al, period, weight);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100467}
468
469struct hist_entry *__hists__add_entry(struct hists *self,
470 struct addr_location *al,
Andi Kleen05484292013-01-24 16:10:29 +0100471 struct symbol *sym_parent, u64 period,
472 u64 weight)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100473{
474 struct hist_entry entry = {
475 .thread = al->thread,
476 .ms = {
477 .map = al->map,
478 .sym = al->sym,
479 },
480 .cpu = al->cpu,
481 .ip = al->addr,
482 .level = al->level,
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900483 .stat = {
484 .period = period,
Namhyung Kimc4b35352012-10-04 21:49:42 +0900485 .nr_events = 1,
Andi Kleen05484292013-01-24 16:10:29 +0100486 .weight = weight,
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900487 },
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100488 .parent = sym_parent,
489 .filtered = symbol__parent_filter(sym_parent),
Jiri Olsaae359f12012-10-04 21:49:35 +0900490 .hists = self,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100491 .branch_info = NULL,
492 .mem_info = NULL,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100493 };
494
Andi Kleen05484292013-01-24 16:10:29 +0100495 return add_hist_entry(self, &entry, al, period, weight);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100496}
497
John Kacur3d1d07e2009-09-28 15:32:55 +0200498int64_t
499hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
500{
501 struct sort_entry *se;
502 int64_t cmp = 0;
503
504 list_for_each_entry(se, &hist_entry__sort_list, list) {
Frederic Weisbeckerfcd14982010-04-14 19:11:29 +0200505 cmp = se->se_cmp(left, right);
John Kacur3d1d07e2009-09-28 15:32:55 +0200506 if (cmp)
507 break;
508 }
509
510 return cmp;
511}
512
513int64_t
514hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
515{
516 struct sort_entry *se;
517 int64_t cmp = 0;
518
519 list_for_each_entry(se, &hist_entry__sort_list, list) {
520 int64_t (*f)(struct hist_entry *, struct hist_entry *);
521
Frederic Weisbeckerfcd14982010-04-14 19:11:29 +0200522 f = se->se_collapse ?: se->se_cmp;
John Kacur3d1d07e2009-09-28 15:32:55 +0200523
524 cmp = f(left, right);
525 if (cmp)
526 break;
527 }
528
529 return cmp;
530}
531
532void hist_entry__free(struct hist_entry *he)
533{
Namhyung Kim580e3382012-11-07 16:27:14 +0900534 free(he->branch_info);
Stephane Eranian028f12e2013-01-24 16:10:38 +0100535 free(he->mem_info);
John Kacur3d1d07e2009-09-28 15:32:55 +0200536 free(he);
537}
538
539/*
540 * collapse the histogram
541 */
542
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300543static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
Frederic Weisbecker1b3a0e92011-01-14 04:51:58 +0100544 struct rb_root *root,
545 struct hist_entry *he)
John Kacur3d1d07e2009-09-28 15:32:55 +0200546{
Arnaldo Carvalho de Melob9bf0892009-12-14 11:37:11 -0200547 struct rb_node **p = &root->rb_node;
John Kacur3d1d07e2009-09-28 15:32:55 +0200548 struct rb_node *parent = NULL;
549 struct hist_entry *iter;
550 int64_t cmp;
551
552 while (*p != NULL) {
553 parent = *p;
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300554 iter = rb_entry(parent, struct hist_entry, rb_node_in);
John Kacur3d1d07e2009-09-28 15:32:55 +0200555
556 cmp = hist_entry__collapse(iter, he);
557
558 if (!cmp) {
Namhyung Kim139c0812012-10-04 21:49:43 +0900559 he_stat__add_stat(&iter->stat, &he->stat);
Namhyung Kim9ec60972012-09-26 16:47:28 +0900560
Frederic Weisbecker1b3a0e92011-01-14 04:51:58 +0100561 if (symbol_conf.use_callchain) {
Namhyung Kim47260642012-05-31 14:43:26 +0900562 callchain_cursor_reset(&callchain_cursor);
563 callchain_merge(&callchain_cursor,
564 iter->callchain,
Frederic Weisbecker1b3a0e92011-01-14 04:51:58 +0100565 he->callchain);
566 }
John Kacur3d1d07e2009-09-28 15:32:55 +0200567 hist_entry__free(he);
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -0300568 return false;
John Kacur3d1d07e2009-09-28 15:32:55 +0200569 }
570
571 if (cmp < 0)
572 p = &(*p)->rb_left;
573 else
574 p = &(*p)->rb_right;
575 }
576
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300577 rb_link_node(&he->rb_node_in, parent, p);
578 rb_insert_color(&he->rb_node_in, root);
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -0300579 return true;
John Kacur3d1d07e2009-09-28 15:32:55 +0200580}
581
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300582static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
583{
584 struct rb_root *root;
585
586 pthread_mutex_lock(&hists->lock);
587
588 root = hists->entries_in;
589 if (++hists->entries_in > &hists->entries_in_array[1])
590 hists->entries_in = &hists->entries_in_array[0];
591
592 pthread_mutex_unlock(&hists->lock);
593
594 return root;
595}
596
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200597static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
598{
599 hists__filter_entry_by_dso(hists, he);
600 hists__filter_entry_by_thread(hists, he);
Namhyung Kime94d53e2012-03-16 17:50:51 +0900601 hists__filter_entry_by_symbol(hists, he);
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200602}
603
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900604void hists__collapse_resort(struct hists *hists)
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300605{
606 struct rb_root *root;
607 struct rb_node *next;
608 struct hist_entry *n;
609
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900610 if (!sort__need_collapse)
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300611 return;
612
613 root = hists__get_rotate_entries_in(hists);
614 next = rb_first(root);
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300615
616 while (next) {
617 n = rb_entry(next, struct hist_entry, rb_node_in);
618 next = rb_next(&n->rb_node_in);
619
620 rb_erase(&n->rb_node_in, root);
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200621 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
622 /*
623 * If it wasn't combined with one of the entries already
624 * collapsed, we need to apply the filters that may have
625 * been set by, say, the hist_browser.
626 */
627 hists__apply_filters(hists, n);
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200628 }
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300629 }
630}
631
John Kacur3d1d07e2009-09-28 15:32:55 +0200632/*
Arnaldo Carvalho de Meloc82ee822010-05-14 14:19:35 -0300633 * reverse the map, sort on period.
John Kacur3d1d07e2009-09-28 15:32:55 +0200634 */
635
Namhyung Kim29d720e2013-01-22 18:09:33 +0900636static int period_cmp(u64 period_a, u64 period_b)
637{
638 if (period_a > period_b)
639 return 1;
640 if (period_a < period_b)
641 return -1;
642 return 0;
643}
644
645static int hist_entry__sort_on_period(struct hist_entry *a,
646 struct hist_entry *b)
647{
648 int ret;
649 int i, nr_members;
650 struct perf_evsel *evsel;
651 struct hist_entry *pair;
652 u64 *periods_a, *periods_b;
653
654 ret = period_cmp(a->stat.period, b->stat.period);
655 if (ret || !symbol_conf.event_group)
656 return ret;
657
658 evsel = hists_to_evsel(a->hists);
659 nr_members = evsel->nr_members;
660 if (nr_members <= 1)
661 return ret;
662
663 periods_a = zalloc(sizeof(periods_a) * nr_members);
664 periods_b = zalloc(sizeof(periods_b) * nr_members);
665
666 if (!periods_a || !periods_b)
667 goto out;
668
669 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
670 evsel = hists_to_evsel(pair->hists);
671 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
672 }
673
674 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
675 evsel = hists_to_evsel(pair->hists);
676 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
677 }
678
679 for (i = 1; i < nr_members; i++) {
680 ret = period_cmp(periods_a[i], periods_b[i]);
681 if (ret)
682 break;
683 }
684
685out:
686 free(periods_a);
687 free(periods_b);
688
689 return ret;
690}
691
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -0300692static void __hists__insert_output_entry(struct rb_root *entries,
693 struct hist_entry *he,
694 u64 min_callchain_hits)
John Kacur3d1d07e2009-09-28 15:32:55 +0200695{
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -0300696 struct rb_node **p = &entries->rb_node;
John Kacur3d1d07e2009-09-28 15:32:55 +0200697 struct rb_node *parent = NULL;
698 struct hist_entry *iter;
699
Arnaldo Carvalho de Melod599db32009-12-15 20:04:42 -0200700 if (symbol_conf.use_callchain)
Arnaldo Carvalho de Melob9fb9302010-04-02 09:50:42 -0300701 callchain_param.sort(&he->sorted_chain, he->callchain,
John Kacur3d1d07e2009-09-28 15:32:55 +0200702 min_callchain_hits, &callchain_param);
703
704 while (*p != NULL) {
705 parent = *p;
706 iter = rb_entry(parent, struct hist_entry, rb_node);
707
Namhyung Kim29d720e2013-01-22 18:09:33 +0900708 if (hist_entry__sort_on_period(he, iter) > 0)
John Kacur3d1d07e2009-09-28 15:32:55 +0200709 p = &(*p)->rb_left;
710 else
711 p = &(*p)->rb_right;
712 }
713
714 rb_link_node(&he->rb_node, parent, p);
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -0300715 rb_insert_color(&he->rb_node, entries);
John Kacur3d1d07e2009-09-28 15:32:55 +0200716}
717
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900718void hists__output_resort(struct hists *hists)
John Kacur3d1d07e2009-09-28 15:32:55 +0200719{
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300720 struct rb_root *root;
John Kacur3d1d07e2009-09-28 15:32:55 +0200721 struct rb_node *next;
722 struct hist_entry *n;
John Kacur3d1d07e2009-09-28 15:32:55 +0200723 u64 min_callchain_hits;
724
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300725 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
John Kacur3d1d07e2009-09-28 15:32:55 +0200726
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900727 if (sort__need_collapse)
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300728 root = &hists->entries_collapsed;
729 else
730 root = hists->entries_in;
731
732 next = rb_first(root);
733 hists->entries = RB_ROOT;
John Kacur3d1d07e2009-09-28 15:32:55 +0200734
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300735 hists->nr_entries = 0;
Arnaldo Carvalho de Melo79286312011-10-27 09:19:48 -0200736 hists->stats.total_period = 0;
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300737 hists__reset_col_len(hists);
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -0300738
John Kacur3d1d07e2009-09-28 15:32:55 +0200739 while (next) {
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300740 n = rb_entry(next, struct hist_entry, rb_node_in);
741 next = rb_next(&n->rb_node_in);
John Kacur3d1d07e2009-09-28 15:32:55 +0200742
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300743 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300744 hists__inc_nr_entries(hists, n);
John Kacur3d1d07e2009-09-28 15:32:55 +0200745 }
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300746}
Arnaldo Carvalho de Melob9bf0892009-12-14 11:37:11 -0200747
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300748static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
Arnaldo Carvalho de Melocc5edb02010-07-16 12:35:07 -0300749 enum hist_filter filter)
750{
751 h->filtered &= ~(1 << filter);
752 if (h->filtered)
753 return;
754
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300755 ++hists->nr_entries;
Arnaldo Carvalho de Melo0f0cbf72010-07-26 17:13:40 -0300756 if (h->ms.unfolded)
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300757 hists->nr_entries += h->nr_rows;
Arnaldo Carvalho de Melo0f0cbf72010-07-26 17:13:40 -0300758 h->row_offset = 0;
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900759 hists->stats.total_period += h->stat.period;
760 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
Arnaldo Carvalho de Melocc5edb02010-07-16 12:35:07 -0300761
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300762 hists__calc_col_len(hists, h);
Arnaldo Carvalho de Melocc5edb02010-07-16 12:35:07 -0300763}
764
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200765
766static bool hists__filter_entry_by_dso(struct hists *hists,
767 struct hist_entry *he)
768{
769 if (hists->dso_filter != NULL &&
770 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
771 he->filtered |= (1 << HIST_FILTER__DSO);
772 return true;
773 }
774
775 return false;
776}
777
Arnaldo Carvalho de Melod7b76f02011-10-18 19:07:34 -0200778void hists__filter_by_dso(struct hists *hists)
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300779{
780 struct rb_node *nd;
781
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300782 hists->nr_entries = hists->stats.total_period = 0;
783 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
784 hists__reset_col_len(hists);
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300785
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300786 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300787 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
788
789 if (symbol_conf.exclude_other && !h->parent)
790 continue;
791
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200792 if (hists__filter_entry_by_dso(hists, h))
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300793 continue;
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300794
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300795 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300796 }
797}
798
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200799static bool hists__filter_entry_by_thread(struct hists *hists,
800 struct hist_entry *he)
801{
802 if (hists->thread_filter != NULL &&
803 he->thread != hists->thread_filter) {
804 he->filtered |= (1 << HIST_FILTER__THREAD);
805 return true;
806 }
807
808 return false;
809}
810
Arnaldo Carvalho de Melod7b76f02011-10-18 19:07:34 -0200811void hists__filter_by_thread(struct hists *hists)
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300812{
813 struct rb_node *nd;
814
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300815 hists->nr_entries = hists->stats.total_period = 0;
816 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
817 hists__reset_col_len(hists);
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300818
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300819 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300820 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
821
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200822 if (hists__filter_entry_by_thread(hists, h))
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300823 continue;
Arnaldo Carvalho de Melocc5edb02010-07-16 12:35:07 -0300824
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300825 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300826 }
827}
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300828
Namhyung Kime94d53e2012-03-16 17:50:51 +0900829static bool hists__filter_entry_by_symbol(struct hists *hists,
830 struct hist_entry *he)
831{
832 if (hists->symbol_filter_str != NULL &&
833 (!he->ms.sym || strstr(he->ms.sym->name,
834 hists->symbol_filter_str) == NULL)) {
835 he->filtered |= (1 << HIST_FILTER__SYMBOL);
836 return true;
837 }
838
839 return false;
840}
841
842void hists__filter_by_symbol(struct hists *hists)
843{
844 struct rb_node *nd;
845
846 hists->nr_entries = hists->stats.total_period = 0;
847 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
848 hists__reset_col_len(hists);
849
850 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
851 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
852
853 if (hists__filter_entry_by_symbol(hists, h))
854 continue;
855
856 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
857 }
858}
859
Arnaldo Carvalho de Melo2f525d02011-02-04 13:43:24 -0200860int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300861{
Arnaldo Carvalho de Melo2f525d02011-02-04 13:43:24 -0200862 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300863}
864
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200865int hist_entry__annotate(struct hist_entry *he, size_t privsize)
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300866{
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200867 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300868}
Arnaldo Carvalho de Meloc8446b92010-05-14 10:36:42 -0300869
Arnaldo Carvalho de Melo28a6b6a2012-12-18 16:24:46 -0300870void events_stats__inc(struct events_stats *stats, u32 type)
871{
872 ++stats->nr_events[0];
873 ++stats->nr_events[type];
874}
875
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300876void hists__inc_nr_events(struct hists *hists, u32 type)
Arnaldo Carvalho de Meloc8446b92010-05-14 10:36:42 -0300877{
Arnaldo Carvalho de Melo28a6b6a2012-12-18 16:24:46 -0300878 events_stats__inc(&hists->stats, type);
Arnaldo Carvalho de Meloc8446b92010-05-14 10:36:42 -0300879}
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300880
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300881static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
882 struct hist_entry *pair)
883{
Namhyung Kimce74f602012-12-10 17:29:55 +0900884 struct rb_root *root;
885 struct rb_node **p;
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300886 struct rb_node *parent = NULL;
887 struct hist_entry *he;
888 int cmp;
889
Namhyung Kimce74f602012-12-10 17:29:55 +0900890 if (sort__need_collapse)
891 root = &hists->entries_collapsed;
892 else
893 root = hists->entries_in;
894
895 p = &root->rb_node;
896
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300897 while (*p != NULL) {
898 parent = *p;
Namhyung Kimce74f602012-12-10 17:29:55 +0900899 he = rb_entry(parent, struct hist_entry, rb_node_in);
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300900
Namhyung Kimce74f602012-12-10 17:29:55 +0900901 cmp = hist_entry__collapse(he, pair);
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300902
903 if (!cmp)
904 goto out;
905
906 if (cmp < 0)
907 p = &(*p)->rb_left;
908 else
909 p = &(*p)->rb_right;
910 }
911
912 he = hist_entry__new(pair);
913 if (he) {
Arnaldo Carvalho de Melo30193d72012-11-12 13:20:03 -0300914 memset(&he->stat, 0, sizeof(he->stat));
915 he->hists = hists;
Namhyung Kimce74f602012-12-10 17:29:55 +0900916 rb_link_node(&he->rb_node_in, parent, p);
917 rb_insert_color(&he->rb_node_in, root);
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300918 hists__inc_nr_entries(hists, he);
919 }
920out:
921 return he;
922}
923
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300924static struct hist_entry *hists__find_entry(struct hists *hists,
925 struct hist_entry *he)
926{
Namhyung Kimce74f602012-12-10 17:29:55 +0900927 struct rb_node *n;
928
929 if (sort__need_collapse)
930 n = hists->entries_collapsed.rb_node;
931 else
932 n = hists->entries_in->rb_node;
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300933
934 while (n) {
Namhyung Kimce74f602012-12-10 17:29:55 +0900935 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
936 int64_t cmp = hist_entry__collapse(iter, he);
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300937
938 if (cmp < 0)
939 n = n->rb_left;
940 else if (cmp > 0)
941 n = n->rb_right;
942 else
943 return iter;
944 }
945
946 return NULL;
947}
948
949/*
950 * Look for pairs to link to the leader buckets (hist_entries):
951 */
952void hists__match(struct hists *leader, struct hists *other)
953{
Namhyung Kimce74f602012-12-10 17:29:55 +0900954 struct rb_root *root;
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300955 struct rb_node *nd;
956 struct hist_entry *pos, *pair;
957
Namhyung Kimce74f602012-12-10 17:29:55 +0900958 if (sort__need_collapse)
959 root = &leader->entries_collapsed;
960 else
961 root = leader->entries_in;
962
963 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
964 pos = rb_entry(nd, struct hist_entry, rb_node_in);
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300965 pair = hists__find_entry(other, pos);
966
967 if (pair)
Namhyung Kim5fa90412012-11-29 15:38:34 +0900968 hist_entry__add_pair(pair, pos);
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300969 }
970}
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300971
972/*
973 * Look for entries in the other hists that are not present in the leader, if
974 * we find them, just add a dummy entry on the leader hists, with period=0,
975 * nr_events=0, to serve as the list header.
976 */
977int hists__link(struct hists *leader, struct hists *other)
978{
Namhyung Kimce74f602012-12-10 17:29:55 +0900979 struct rb_root *root;
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300980 struct rb_node *nd;
981 struct hist_entry *pos, *pair;
982
Namhyung Kimce74f602012-12-10 17:29:55 +0900983 if (sort__need_collapse)
984 root = &other->entries_collapsed;
985 else
986 root = other->entries_in;
987
988 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
989 pos = rb_entry(nd, struct hist_entry, rb_node_in);
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300990
991 if (!hist_entry__has_pairs(pos)) {
992 pair = hists__add_dummy_entry(leader, pos);
993 if (pair == NULL)
994 return -1;
Namhyung Kim5fa90412012-11-29 15:38:34 +0900995 hist_entry__add_pair(pos, pair);
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300996 }
997 }
998
999 return 0;
1000}