hist.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940
  1. #include "util.h"
  2. #include "build-id.h"
  3. #include "hist.h"
  4. #include "session.h"
  5. #include "sort.h"
  6. #include "evsel.h"
  7. #include <math.h>
  8. static bool hists__filter_entry_by_dso(struct hists *hists,
  9. struct hist_entry *he);
  10. static bool hists__filter_entry_by_thread(struct hists *hists,
  11. struct hist_entry *he);
  12. static bool hists__filter_entry_by_symbol(struct hists *hists,
  13. struct hist_entry *he);
  14. enum hist_filter {
  15. HIST_FILTER__DSO,
  16. HIST_FILTER__THREAD,
  17. HIST_FILTER__PARENT,
  18. HIST_FILTER__SYMBOL,
  19. };
  20. struct callchain_param callchain_param = {
  21. .mode = CHAIN_GRAPH_REL,
  22. .min_percent = 0.5,
  23. .order = ORDER_CALLEE,
  24. .key = CCKEY_FUNCTION
  25. };
  26. u16 hists__col_len(struct hists *hists, enum hist_column col)
  27. {
  28. return hists->col_len[col];
  29. }
  30. void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
  31. {
  32. hists->col_len[col] = len;
  33. }
  34. bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
  35. {
  36. if (len > hists__col_len(hists, col)) {
  37. hists__set_col_len(hists, col, len);
  38. return true;
  39. }
  40. return false;
  41. }
  42. void hists__reset_col_len(struct hists *hists)
  43. {
  44. enum hist_column col;
  45. for (col = 0; col < HISTC_NR_COLS; ++col)
  46. hists__set_col_len(hists, col, 0);
  47. }
  48. static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
  49. {
  50. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  51. if (hists__col_len(hists, dso) < unresolved_col_width &&
  52. !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  53. !symbol_conf.dso_list)
  54. hists__set_col_len(hists, dso, unresolved_col_width);
  55. }
  56. void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
  57. {
  58. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  59. int symlen;
  60. u16 len;
  61. /*
  62. * +4 accounts for '[x] ' priv level info
  63. * +2 accounts for 0x prefix on raw addresses
  64. * +3 accounts for ' y ' symtab origin info
  65. */
  66. if (h->ms.sym) {
  67. symlen = h->ms.sym->namelen + 4;
  68. if (verbose)
  69. symlen += BITS_PER_LONG / 4 + 2 + 3;
  70. hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  71. } else {
  72. symlen = unresolved_col_width + 4 + 2;
  73. hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  74. hists__set_unres_dso_col_len(hists, HISTC_DSO);
  75. }
  76. len = thread__comm_len(h->thread);
  77. if (hists__new_col_len(hists, HISTC_COMM, len))
  78. hists__set_col_len(hists, HISTC_THREAD, len + 6);
  79. if (h->ms.map) {
  80. len = dso__name_len(h->ms.map->dso);
  81. hists__new_col_len(hists, HISTC_DSO, len);
  82. }
  83. if (h->parent)
  84. hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
  85. if (h->branch_info) {
  86. if (h->branch_info->from.sym) {
  87. symlen = (int)h->branch_info->from.sym->namelen + 4;
  88. if (verbose)
  89. symlen += BITS_PER_LONG / 4 + 2 + 3;
  90. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  91. symlen = dso__name_len(h->branch_info->from.map->dso);
  92. hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
  93. } else {
  94. symlen = unresolved_col_width + 4 + 2;
  95. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  96. hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
  97. }
  98. if (h->branch_info->to.sym) {
  99. symlen = (int)h->branch_info->to.sym->namelen + 4;
  100. if (verbose)
  101. symlen += BITS_PER_LONG / 4 + 2 + 3;
  102. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  103. symlen = dso__name_len(h->branch_info->to.map->dso);
  104. hists__new_col_len(hists, HISTC_DSO_TO, symlen);
  105. } else {
  106. symlen = unresolved_col_width + 4 + 2;
  107. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  108. hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
  109. }
  110. }
  111. if (h->mem_info) {
  112. if (h->mem_info->daddr.sym) {
  113. symlen = (int)h->mem_info->daddr.sym->namelen + 4
  114. + unresolved_col_width + 2;
  115. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
  116. symlen);
  117. } else {
  118. symlen = unresolved_col_width + 4 + 2;
  119. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
  120. symlen);
  121. }
  122. if (h->mem_info->daddr.map) {
  123. symlen = dso__name_len(h->mem_info->daddr.map->dso);
  124. hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
  125. symlen);
  126. } else {
  127. symlen = unresolved_col_width + 4 + 2;
  128. hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
  129. }
  130. } else {
  131. symlen = unresolved_col_width + 4 + 2;
  132. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
  133. hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
  134. }
  135. hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
  136. hists__new_col_len(hists, HISTC_MEM_TLB, 22);
  137. hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
  138. hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
  139. hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
  140. hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
  141. if (h->transaction)
  142. hists__new_col_len(hists, HISTC_TRANSACTION,
  143. hist_entry__transaction_len());
  144. }
  145. void hists__output_recalc_col_len(struct hists *hists, int max_rows)
  146. {
  147. struct rb_node *next = rb_first(&hists->entries);
  148. struct hist_entry *n;
  149. int row = 0;
  150. hists__reset_col_len(hists);
  151. while (next && row++ < max_rows) {
  152. n = rb_entry(next, struct hist_entry, rb_node);
  153. if (!n->filtered)
  154. hists__calc_col_len(hists, n);
  155. next = rb_next(&n->rb_node);
  156. }
  157. }
  158. static void he_stat__add_cpumode_period(struct he_stat *he_stat,
  159. unsigned int cpumode, u64 period)
  160. {
  161. switch (cpumode) {
  162. case PERF_RECORD_MISC_KERNEL:
  163. he_stat->period_sys += period;
  164. break;
  165. case PERF_RECORD_MISC_USER:
  166. he_stat->period_us += period;
  167. break;
  168. case PERF_RECORD_MISC_GUEST_KERNEL:
  169. he_stat->period_guest_sys += period;
  170. break;
  171. case PERF_RECORD_MISC_GUEST_USER:
  172. he_stat->period_guest_us += period;
  173. break;
  174. default:
  175. break;
  176. }
  177. }
  178. static void he_stat__add_period(struct he_stat *he_stat, u64 period,
  179. u64 weight)
  180. {
  181. he_stat->period += period;
  182. he_stat->weight += weight;
  183. he_stat->nr_events += 1;
  184. }
  185. static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
  186. {
  187. dest->period += src->period;
  188. dest->period_sys += src->period_sys;
  189. dest->period_us += src->period_us;
  190. dest->period_guest_sys += src->period_guest_sys;
  191. dest->period_guest_us += src->period_guest_us;
  192. dest->nr_events += src->nr_events;
  193. dest->weight += src->weight;
  194. }
  195. static void he_stat__decay(struct he_stat *he_stat)
  196. {
  197. he_stat->period = (he_stat->period * 7) / 8;
  198. he_stat->nr_events = (he_stat->nr_events * 7) / 8;
  199. /* XXX need decay for weight too? */
  200. }
  201. static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
  202. {
  203. u64 prev_period = he->stat.period;
  204. if (prev_period == 0)
  205. return true;
  206. he_stat__decay(&he->stat);
  207. if (!he->filtered)
  208. hists->stats.total_period -= prev_period - he->stat.period;
  209. return he->stat.period == 0;
  210. }
  211. void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
  212. {
  213. struct rb_node *next = rb_first(&hists->entries);
  214. struct hist_entry *n;
  215. while (next) {
  216. n = rb_entry(next, struct hist_entry, rb_node);
  217. next = rb_next(&n->rb_node);
  218. /*
  219. * We may be annotating this, for instance, so keep it here in
  220. * case some it gets new samples, we'll eventually free it when
  221. * the user stops browsing and it agains gets fully decayed.
  222. */
  223. if (((zap_user && n->level == '.') ||
  224. (zap_kernel && n->level != '.') ||
  225. hists__decay_entry(hists, n)) &&
  226. !n->used) {
  227. rb_erase(&n->rb_node, &hists->entries);
  228. if (sort__need_collapse)
  229. rb_erase(&n->rb_node_in, &hists->entries_collapsed);
  230. hist_entry__free(n);
  231. --hists->nr_entries;
  232. }
  233. }
  234. }
  235. /*
  236. * histogram, sorted on item, collects periods
  237. */
  238. static struct hist_entry *hist_entry__new(struct hist_entry *template)
  239. {
  240. size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
  241. struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
  242. if (he != NULL) {
  243. *he = *template;
  244. if (he->ms.map)
  245. he->ms.map->referenced = true;
  246. if (he->branch_info) {
  247. /*
  248. * This branch info is (a part of) allocated from
  249. * machine__resolve_bstack() and will be freed after
  250. * adding new entries. So we need to save a copy.
  251. */
  252. he->branch_info = malloc(sizeof(*he->branch_info));
  253. if (he->branch_info == NULL) {
  254. free(he);
  255. return NULL;
  256. }
  257. memcpy(he->branch_info, template->branch_info,
  258. sizeof(*he->branch_info));
  259. if (he->branch_info->from.map)
  260. he->branch_info->from.map->referenced = true;
  261. if (he->branch_info->to.map)
  262. he->branch_info->to.map->referenced = true;
  263. }
  264. if (he->mem_info) {
  265. if (he->mem_info->iaddr.map)
  266. he->mem_info->iaddr.map->referenced = true;
  267. if (he->mem_info->daddr.map)
  268. he->mem_info->daddr.map->referenced = true;
  269. }
  270. if (symbol_conf.use_callchain)
  271. callchain_init(he->callchain);
  272. INIT_LIST_HEAD(&he->pairs.node);
  273. }
  274. return he;
  275. }
  276. void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
  277. {
  278. if (!h->filtered) {
  279. hists__calc_col_len(hists, h);
  280. ++hists->nr_entries;
  281. hists->stats.total_period += h->stat.period;
  282. }
  283. }
  284. static u8 symbol__parent_filter(const struct symbol *parent)
  285. {
  286. if (symbol_conf.exclude_other && parent == NULL)
  287. return 1 << HIST_FILTER__PARENT;
  288. return 0;
  289. }
  290. static struct hist_entry *add_hist_entry(struct hists *hists,
  291. struct hist_entry *entry,
  292. struct addr_location *al)
  293. {
  294. struct rb_node **p;
  295. struct rb_node *parent = NULL;
  296. struct hist_entry *he;
  297. int64_t cmp;
  298. u64 period = entry->stat.period;
  299. u64 weight = entry->stat.weight;
  300. p = &hists->entries_in->rb_node;
  301. while (*p != NULL) {
  302. parent = *p;
  303. he = rb_entry(parent, struct hist_entry, rb_node_in);
  304. /*
  305. * Make sure that it receives arguments in a same order as
  306. * hist_entry__collapse() so that we can use an appropriate
  307. * function when searching an entry regardless which sort
  308. * keys were used.
  309. */
  310. cmp = hist_entry__cmp(he, entry);
  311. if (!cmp) {
  312. he_stat__add_period(&he->stat, period, weight);
  313. /*
  314. * This mem info was allocated from machine__resolve_mem
  315. * and will not be used anymore.
  316. */
  317. zfree(&entry->mem_info);
  318. /* If the map of an existing hist_entry has
  319. * become out-of-date due to an exec() or
  320. * similar, update it. Otherwise we will
  321. * mis-adjust symbol addresses when computing
  322. * the history counter to increment.
  323. */
  324. if (he->ms.map != entry->ms.map) {
  325. he->ms.map = entry->ms.map;
  326. if (he->ms.map)
  327. he->ms.map->referenced = true;
  328. }
  329. goto out;
  330. }
  331. if (cmp < 0)
  332. p = &(*p)->rb_left;
  333. else
  334. p = &(*p)->rb_right;
  335. }
  336. he = hist_entry__new(entry);
  337. if (!he)
  338. return NULL;
  339. hists->nr_entries++;
  340. rb_link_node(&he->rb_node_in, parent, p);
  341. rb_insert_color(&he->rb_node_in, hists->entries_in);
  342. out:
  343. he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
  344. return he;
  345. }
  346. struct hist_entry *__hists__add_entry(struct hists *hists,
  347. struct addr_location *al,
  348. struct symbol *sym_parent,
  349. struct branch_info *bi,
  350. struct mem_info *mi,
  351. u64 period, u64 weight, u64 transaction)
  352. {
  353. struct hist_entry entry = {
  354. .thread = al->thread,
  355. .comm = thread__comm(al->thread),
  356. .ms = {
  357. .map = al->map,
  358. .sym = al->sym,
  359. },
  360. .cpu = al->cpu,
  361. .ip = al->addr,
  362. .level = al->level,
  363. .stat = {
  364. .nr_events = 1,
  365. .period = period,
  366. .weight = weight,
  367. },
  368. .parent = sym_parent,
  369. .filtered = symbol__parent_filter(sym_parent),
  370. .hists = hists,
  371. .branch_info = bi,
  372. .mem_info = mi,
  373. .transaction = transaction,
  374. };
  375. return add_hist_entry(hists, &entry, al);
  376. }
  377. int64_t
  378. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  379. {
  380. struct sort_entry *se;
  381. int64_t cmp = 0;
  382. list_for_each_entry(se, &hist_entry__sort_list, list) {
  383. cmp = se->se_cmp(left, right);
  384. if (cmp)
  385. break;
  386. }
  387. return cmp;
  388. }
  389. int64_t
  390. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  391. {
  392. struct sort_entry *se;
  393. int64_t cmp = 0;
  394. list_for_each_entry(se, &hist_entry__sort_list, list) {
  395. int64_t (*f)(struct hist_entry *, struct hist_entry *);
  396. f = se->se_collapse ?: se->se_cmp;
  397. cmp = f(left, right);
  398. if (cmp)
  399. break;
  400. }
  401. return cmp;
  402. }
  403. void hist_entry__free(struct hist_entry *he)
  404. {
  405. zfree(&he->branch_info);
  406. zfree(&he->mem_info);
  407. free_srcline(he->srcline);
  408. free(he);
  409. }
  410. /*
  411. * collapse the histogram
  412. */
  413. static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
  414. struct rb_root *root,
  415. struct hist_entry *he)
  416. {
  417. struct rb_node **p = &root->rb_node;
  418. struct rb_node *parent = NULL;
  419. struct hist_entry *iter;
  420. int64_t cmp;
  421. while (*p != NULL) {
  422. parent = *p;
  423. iter = rb_entry(parent, struct hist_entry, rb_node_in);
  424. cmp = hist_entry__collapse(iter, he);
  425. if (!cmp) {
  426. he_stat__add_stat(&iter->stat, &he->stat);
  427. if (symbol_conf.use_callchain) {
  428. callchain_cursor_reset(&callchain_cursor);
  429. callchain_merge(&callchain_cursor,
  430. iter->callchain,
  431. he->callchain);
  432. }
  433. hist_entry__free(he);
  434. return false;
  435. }
  436. if (cmp < 0)
  437. p = &(*p)->rb_left;
  438. else
  439. p = &(*p)->rb_right;
  440. }
  441. rb_link_node(&he->rb_node_in, parent, p);
  442. rb_insert_color(&he->rb_node_in, root);
  443. return true;
  444. }
  445. static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
  446. {
  447. struct rb_root *root;
  448. pthread_mutex_lock(&hists->lock);
  449. root = hists->entries_in;
  450. if (++hists->entries_in > &hists->entries_in_array[1])
  451. hists->entries_in = &hists->entries_in_array[0];
  452. pthread_mutex_unlock(&hists->lock);
  453. return root;
  454. }
  455. static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
  456. {
  457. hists__filter_entry_by_dso(hists, he);
  458. hists__filter_entry_by_thread(hists, he);
  459. hists__filter_entry_by_symbol(hists, he);
  460. }
  461. void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
  462. {
  463. struct rb_root *root;
  464. struct rb_node *next;
  465. struct hist_entry *n;
  466. if (!sort__need_collapse)
  467. return;
  468. root = hists__get_rotate_entries_in(hists);
  469. next = rb_first(root);
  470. while (next) {
  471. if (session_done())
  472. break;
  473. n = rb_entry(next, struct hist_entry, rb_node_in);
  474. next = rb_next(&n->rb_node_in);
  475. rb_erase(&n->rb_node_in, root);
  476. if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
  477. /*
  478. * If it wasn't combined with one of the entries already
  479. * collapsed, we need to apply the filters that may have
  480. * been set by, say, the hist_browser.
  481. */
  482. hists__apply_filters(hists, n);
  483. }
  484. if (prog)
  485. ui_progress__update(prog, 1);
  486. }
  487. }
  488. /*
  489. * reverse the map, sort on period.
  490. */
  491. static int period_cmp(u64 period_a, u64 period_b)
  492. {
  493. if (period_a > period_b)
  494. return 1;
  495. if (period_a < period_b)
  496. return -1;
  497. return 0;
  498. }
  499. static int hist_entry__sort_on_period(struct hist_entry *a,
  500. struct hist_entry *b)
  501. {
  502. int ret;
  503. int i, nr_members;
  504. struct perf_evsel *evsel;
  505. struct hist_entry *pair;
  506. u64 *periods_a, *periods_b;
  507. ret = period_cmp(a->stat.period, b->stat.period);
  508. if (ret || !symbol_conf.event_group)
  509. return ret;
  510. evsel = hists_to_evsel(a->hists);
  511. nr_members = evsel->nr_members;
  512. if (nr_members <= 1)
  513. return ret;
  514. periods_a = zalloc(sizeof(periods_a) * nr_members);
  515. periods_b = zalloc(sizeof(periods_b) * nr_members);
  516. if (!periods_a || !periods_b)
  517. goto out;
  518. list_for_each_entry(pair, &a->pairs.head, pairs.node) {
  519. evsel = hists_to_evsel(pair->hists);
  520. periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
  521. }
  522. list_for_each_entry(pair, &b->pairs.head, pairs.node) {
  523. evsel = hists_to_evsel(pair->hists);
  524. periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
  525. }
  526. for (i = 1; i < nr_members; i++) {
  527. ret = period_cmp(periods_a[i], periods_b[i]);
  528. if (ret)
  529. break;
  530. }
  531. out:
  532. free(periods_a);
  533. free(periods_b);
  534. return ret;
  535. }
  536. static void __hists__insert_output_entry(struct rb_root *entries,
  537. struct hist_entry *he,
  538. u64 min_callchain_hits)
  539. {
  540. struct rb_node **p = &entries->rb_node;
  541. struct rb_node *parent = NULL;
  542. struct hist_entry *iter;
  543. if (symbol_conf.use_callchain)
  544. callchain_param.sort(&he->sorted_chain, he->callchain,
  545. min_callchain_hits, &callchain_param);
  546. while (*p != NULL) {
  547. parent = *p;
  548. iter = rb_entry(parent, struct hist_entry, rb_node);
  549. if (hist_entry__sort_on_period(he, iter) > 0)
  550. p = &(*p)->rb_left;
  551. else
  552. p = &(*p)->rb_right;
  553. }
  554. rb_link_node(&he->rb_node, parent, p);
  555. rb_insert_color(&he->rb_node, entries);
  556. }
  557. void hists__output_resort(struct hists *hists)
  558. {
  559. struct rb_root *root;
  560. struct rb_node *next;
  561. struct hist_entry *n;
  562. u64 min_callchain_hits;
  563. min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
  564. if (sort__need_collapse)
  565. root = &hists->entries_collapsed;
  566. else
  567. root = hists->entries_in;
  568. next = rb_first(root);
  569. hists->entries = RB_ROOT;
  570. hists->nr_entries = 0;
  571. hists->stats.total_period = 0;
  572. hists__reset_col_len(hists);
  573. while (next) {
  574. n = rb_entry(next, struct hist_entry, rb_node_in);
  575. next = rb_next(&n->rb_node_in);
  576. __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
  577. hists__inc_nr_entries(hists, n);
  578. }
  579. }
  580. static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
  581. enum hist_filter filter)
  582. {
  583. h->filtered &= ~(1 << filter);
  584. if (h->filtered)
  585. return;
  586. ++hists->nr_entries;
  587. if (h->ms.unfolded)
  588. hists->nr_entries += h->nr_rows;
  589. h->row_offset = 0;
  590. hists->stats.total_period += h->stat.period;
  591. hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
  592. hists__calc_col_len(hists, h);
  593. }
  594. static bool hists__filter_entry_by_dso(struct hists *hists,
  595. struct hist_entry *he)
  596. {
  597. if (hists->dso_filter != NULL &&
  598. (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
  599. he->filtered |= (1 << HIST_FILTER__DSO);
  600. return true;
  601. }
  602. return false;
  603. }
  604. void hists__filter_by_dso(struct hists *hists)
  605. {
  606. struct rb_node *nd;
  607. hists->nr_entries = hists->stats.total_period = 0;
  608. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  609. hists__reset_col_len(hists);
  610. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  611. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  612. if (symbol_conf.exclude_other && !h->parent)
  613. continue;
  614. if (hists__filter_entry_by_dso(hists, h))
  615. continue;
  616. hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
  617. }
  618. }
  619. static bool hists__filter_entry_by_thread(struct hists *hists,
  620. struct hist_entry *he)
  621. {
  622. if (hists->thread_filter != NULL &&
  623. he->thread != hists->thread_filter) {
  624. he->filtered |= (1 << HIST_FILTER__THREAD);
  625. return true;
  626. }
  627. return false;
  628. }
  629. void hists__filter_by_thread(struct hists *hists)
  630. {
  631. struct rb_node *nd;
  632. hists->nr_entries = hists->stats.total_period = 0;
  633. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  634. hists__reset_col_len(hists);
  635. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  636. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  637. if (hists__filter_entry_by_thread(hists, h))
  638. continue;
  639. hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
  640. }
  641. }
  642. static bool hists__filter_entry_by_symbol(struct hists *hists,
  643. struct hist_entry *he)
  644. {
  645. if (hists->symbol_filter_str != NULL &&
  646. (!he->ms.sym || strstr(he->ms.sym->name,
  647. hists->symbol_filter_str) == NULL)) {
  648. he->filtered |= (1 << HIST_FILTER__SYMBOL);
  649. return true;
  650. }
  651. return false;
  652. }
  653. void hists__filter_by_symbol(struct hists *hists)
  654. {
  655. struct rb_node *nd;
  656. hists->nr_entries = hists->stats.total_period = 0;
  657. hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
  658. hists__reset_col_len(hists);
  659. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  660. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  661. if (hists__filter_entry_by_symbol(hists, h))
  662. continue;
  663. hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
  664. }
  665. }
  666. void events_stats__inc(struct events_stats *stats, u32 type)
  667. {
  668. ++stats->nr_events[0];
  669. ++stats->nr_events[type];
  670. }
  671. void hists__inc_nr_events(struct hists *hists, u32 type)
  672. {
  673. events_stats__inc(&hists->stats, type);
  674. }
  675. static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
  676. struct hist_entry *pair)
  677. {
  678. struct rb_root *root;
  679. struct rb_node **p;
  680. struct rb_node *parent = NULL;
  681. struct hist_entry *he;
  682. int64_t cmp;
  683. if (sort__need_collapse)
  684. root = &hists->entries_collapsed;
  685. else
  686. root = hists->entries_in;
  687. p = &root->rb_node;
  688. while (*p != NULL) {
  689. parent = *p;
  690. he = rb_entry(parent, struct hist_entry, rb_node_in);
  691. cmp = hist_entry__collapse(he, pair);
  692. if (!cmp)
  693. goto out;
  694. if (cmp < 0)
  695. p = &(*p)->rb_left;
  696. else
  697. p = &(*p)->rb_right;
  698. }
  699. he = hist_entry__new(pair);
  700. if (he) {
  701. memset(&he->stat, 0, sizeof(he->stat));
  702. he->hists = hists;
  703. rb_link_node(&he->rb_node_in, parent, p);
  704. rb_insert_color(&he->rb_node_in, root);
  705. hists__inc_nr_entries(hists, he);
  706. he->dummy = true;
  707. }
  708. out:
  709. return he;
  710. }
  711. static struct hist_entry *hists__find_entry(struct hists *hists,
  712. struct hist_entry *he)
  713. {
  714. struct rb_node *n;
  715. if (sort__need_collapse)
  716. n = hists->entries_collapsed.rb_node;
  717. else
  718. n = hists->entries_in->rb_node;
  719. while (n) {
  720. struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
  721. int64_t cmp = hist_entry__collapse(iter, he);
  722. if (cmp < 0)
  723. n = n->rb_left;
  724. else if (cmp > 0)
  725. n = n->rb_right;
  726. else
  727. return iter;
  728. }
  729. return NULL;
  730. }
  731. /*
  732. * Look for pairs to link to the leader buckets (hist_entries):
  733. */
  734. void hists__match(struct hists *leader, struct hists *other)
  735. {
  736. struct rb_root *root;
  737. struct rb_node *nd;
  738. struct hist_entry *pos, *pair;
  739. if (sort__need_collapse)
  740. root = &leader->entries_collapsed;
  741. else
  742. root = leader->entries_in;
  743. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  744. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  745. pair = hists__find_entry(other, pos);
  746. if (pair)
  747. hist_entry__add_pair(pair, pos);
  748. }
  749. }
  750. /*
  751. * Look for entries in the other hists that are not present in the leader, if
  752. * we find them, just add a dummy entry on the leader hists, with period=0,
  753. * nr_events=0, to serve as the list header.
  754. */
  755. int hists__link(struct hists *leader, struct hists *other)
  756. {
  757. struct rb_root *root;
  758. struct rb_node *nd;
  759. struct hist_entry *pos, *pair;
  760. if (sort__need_collapse)
  761. root = &other->entries_collapsed;
  762. else
  763. root = other->entries_in;
  764. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  765. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  766. if (!hist_entry__has_pairs(pos)) {
  767. pair = hists__add_dummy_entry(leader, pos);
  768. if (pair == NULL)
  769. return -1;
  770. hist_entry__add_pair(pos, pair);
  771. }
  772. }
  773. return 0;
  774. }