|
@@ -4,6 +4,7 @@
|
|
|
#include "session.h"
|
|
|
#include "sort.h"
|
|
|
#include "evsel.h"
|
|
|
+#include "annotate.h"
|
|
|
#include <math.h>
|
|
|
|
|
|
static bool hists__filter_entry_by_dso(struct hists *hists,
|
|
@@ -429,6 +430,304 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
|
|
|
return add_hist_entry(hists, &entry, al);
|
|
|
}
|
|
|
|
|
|
+static int
|
|
|
+iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
|
|
|
+ struct addr_location *al __maybe_unused)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
|
|
|
+ struct addr_location *al __maybe_unused)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
+{
|
|
|
+ struct perf_sample *sample = iter->sample;
|
|
|
+ struct mem_info *mi;
|
|
|
+
|
|
|
+ mi = sample__resolve_mem(sample, al);
|
|
|
+ if (mi == NULL)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ iter->priv = mi;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
+{
|
|
|
+ u64 cost;
|
|
|
+ struct mem_info *mi = iter->priv;
|
|
|
+ struct hist_entry *he;
|
|
|
+
|
|
|
+ if (mi == NULL)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ cost = iter->sample->weight;
|
|
|
+ if (!cost)
|
|
|
+ cost = 1;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * must pass period=weight in order to get the correct
|
|
|
+ * sorting from hists__collapse_resort() which is solely
|
|
|
+ * based on periods. We want sorting be done on nr_events * weight
|
|
|
+ * and this is indirectly achieved by passing period=weight here
|
|
|
+ * and the he_stat__add_period() function.
|
|
|
+ */
|
|
|
+ he = __hists__add_entry(&iter->evsel->hists, al, iter->parent, NULL, mi,
|
|
|
+ cost, cost, 0);
|
|
|
+ if (!he)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ iter->he = he;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+iter_finish_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
+{
|
|
|
+ struct perf_evsel *evsel = iter->evsel;
|
|
|
+ struct hist_entry *he = iter->he;
|
|
|
+ struct mem_info *mx;
|
|
|
+ int err = -EINVAL;
|
|
|
+
|
|
|
+ if (he == NULL)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ if (ui__has_annotation()) {
|
|
|
+ err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
|
|
|
+ if (err)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ mx = he->mem_info;
|
|
|
+ err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx);
|
|
|
+ if (err)
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ hists__inc_nr_samples(&evsel->hists, he->filtered);
|
|
|
+
|
|
|
+ err = hist_entry__append_callchain(he, iter->sample);
|
|
|
+
|
|
|
+out:
|
|
|
+ /*
|
|
|
+ * We don't need to free iter->priv (mem_info) here since
|
|
|
+ * the mem info was either already freed in add_hist_entry() or
|
|
|
+ * passed to a new hist entry by hist_entry__new().
|
|
|
+ */
|
|
|
+ iter->priv = NULL;
|
|
|
+
|
|
|
+ iter->he = NULL;
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
+{
|
|
|
+ struct branch_info *bi;
|
|
|
+ struct perf_sample *sample = iter->sample;
|
|
|
+
|
|
|
+ bi = sample__resolve_bstack(sample, al);
|
|
|
+ if (!bi)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ iter->curr = 0;
|
|
|
+ iter->total = sample->branch_stack->nr;
|
|
|
+
|
|
|
+ iter->priv = bi;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
|
|
|
+ struct addr_location *al __maybe_unused)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
+{
|
|
|
+ struct branch_info *bi = iter->priv;
|
|
|
+ int i = iter->curr;
|
|
|
+
|
|
|
+ if (bi == NULL)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (iter->curr >= iter->total)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ al->map = bi[i].to.map;
|
|
|
+ al->sym = bi[i].to.sym;
|
|
|
+ al->addr = bi[i].to.addr;
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
+{
|
|
|
+ struct branch_info *bi, *bx;
|
|
|
+ struct perf_evsel *evsel = iter->evsel;
|
|
|
+ struct hist_entry *he = NULL;
|
|
|
+ int i = iter->curr;
|
|
|
+ int err = 0;
|
|
|
+
|
|
|
+ bi = iter->priv;
|
|
|
+
|
|
|
+ if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The report shows the percentage of total branches captured
|
|
|
+ * and not events sampled. Thus we use a pseudo period of 1.
|
|
|
+ */
|
|
|
+ he = __hists__add_entry(&evsel->hists, al, iter->parent, &bi[i], NULL,
|
|
|
+ 1, 1, 0);
|
|
|
+ if (he == NULL)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ if (ui__has_annotation()) {
|
|
|
+ bx = he->branch_info;
|
|
|
+ err = addr_map_symbol__inc_samples(&bx->from, evsel->idx);
|
|
|
+ if (err)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ err = addr_map_symbol__inc_samples(&bx->to, evsel->idx);
|
|
|
+ if (err)
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ hists__inc_nr_samples(&evsel->hists, he->filtered);
|
|
|
+
|
|
|
+out:
|
|
|
+ iter->he = he;
|
|
|
+ iter->curr++;
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+iter_finish_branch_entry(struct hist_entry_iter *iter,
|
|
|
+ struct addr_location *al __maybe_unused)
|
|
|
+{
|
|
|
+ zfree(&iter->priv);
|
|
|
+ iter->he = NULL;
|
|
|
+
|
|
|
+ return iter->curr >= iter->total ? 0 : -1;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
|
|
|
+ struct addr_location *al __maybe_unused)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
+{
|
|
|
+ struct perf_evsel *evsel = iter->evsel;
|
|
|
+ struct perf_sample *sample = iter->sample;
|
|
|
+ struct hist_entry *he;
|
|
|
+
|
|
|
+ he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
|
|
|
+ sample->period, sample->weight,
|
|
|
+ sample->transaction);
|
|
|
+ if (he == NULL)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ iter->he = he;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+iter_finish_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+ struct hist_entry *he = iter->he;
|
|
|
+ struct perf_evsel *evsel = iter->evsel;
|
|
|
+ struct perf_sample *sample = iter->sample;
|
|
|
+
|
|
|
+ if (he == NULL)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ iter->he = NULL;
|
|
|
+
|
|
|
+ if (ui__has_annotation()) {
|
|
|
+ err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+
|
|
|
+ hists__inc_nr_samples(&evsel->hists, he->filtered);
|
|
|
+
|
|
|
+ return hist_entry__append_callchain(he, sample);
|
|
|
+}
|
|
|
+
|
|
|
+const struct hist_iter_ops hist_iter_mem = {
|
|
|
+ .prepare_entry = iter_prepare_mem_entry,
|
|
|
+ .add_single_entry = iter_add_single_mem_entry,
|
|
|
+ .next_entry = iter_next_nop_entry,
|
|
|
+ .add_next_entry = iter_add_next_nop_entry,
|
|
|
+ .finish_entry = iter_finish_mem_entry,
|
|
|
+};
|
|
|
+
|
|
|
+const struct hist_iter_ops hist_iter_branch = {
|
|
|
+ .prepare_entry = iter_prepare_branch_entry,
|
|
|
+ .add_single_entry = iter_add_single_branch_entry,
|
|
|
+ .next_entry = iter_next_branch_entry,
|
|
|
+ .add_next_entry = iter_add_next_branch_entry,
|
|
|
+ .finish_entry = iter_finish_branch_entry,
|
|
|
+};
|
|
|
+
|
|
|
+const struct hist_iter_ops hist_iter_normal = {
|
|
|
+ .prepare_entry = iter_prepare_normal_entry,
|
|
|
+ .add_single_entry = iter_add_single_normal_entry,
|
|
|
+ .next_entry = iter_next_nop_entry,
|
|
|
+ .add_next_entry = iter_add_next_nop_entry,
|
|
|
+ .finish_entry = iter_finish_normal_entry,
|
|
|
+};
|
|
|
+
|
|
|
+int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
|
|
|
+ struct perf_evsel *evsel, struct perf_sample *sample,
|
|
|
+ int max_stack_depth)
|
|
|
+{
|
|
|
+ int err, err2;
|
|
|
+
|
|
|
+ err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
|
|
|
+ max_stack_depth);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
+ iter->evsel = evsel;
|
|
|
+ iter->sample = sample;
|
|
|
+
|
|
|
+ err = iter->ops->prepare_entry(iter, al);
|
|
|
+ if (err)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ err = iter->ops->add_single_entry(iter, al);
|
|
|
+ if (err)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ while (iter->ops->next_entry(iter, al)) {
|
|
|
+ err = iter->ops->add_next_entry(iter, al);
|
|
|
+ if (err)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+out:
|
|
|
+ err2 = iter->ops->finish_entry(iter, al);
|
|
|
+ if (!err)
|
|
|
+ err = err2;
|
|
|
+
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
int64_t
|
|
|
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
|
|
|
{
|