hist.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477
  1. #include "util.h"
  2. #include "build-id.h"
  3. #include "hist.h"
  4. #include "session.h"
  5. #include "sort.h"
  6. #include "evlist.h"
  7. #include "evsel.h"
  8. #include "annotate.h"
  9. #include <math.h>
  10. static bool hists__filter_entry_by_dso(struct hists *hists,
  11. struct hist_entry *he);
  12. static bool hists__filter_entry_by_thread(struct hists *hists,
  13. struct hist_entry *he);
  14. static bool hists__filter_entry_by_symbol(struct hists *hists,
  15. struct hist_entry *he);
  16. u16 hists__col_len(struct hists *hists, enum hist_column col)
  17. {
  18. return hists->col_len[col];
  19. }
  20. void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
  21. {
  22. hists->col_len[col] = len;
  23. }
  24. bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
  25. {
  26. if (len > hists__col_len(hists, col)) {
  27. hists__set_col_len(hists, col, len);
  28. return true;
  29. }
  30. return false;
  31. }
  32. void hists__reset_col_len(struct hists *hists)
  33. {
  34. enum hist_column col;
  35. for (col = 0; col < HISTC_NR_COLS; ++col)
  36. hists__set_col_len(hists, col, 0);
  37. }
  38. static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
  39. {
  40. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  41. if (hists__col_len(hists, dso) < unresolved_col_width &&
  42. !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  43. !symbol_conf.dso_list)
  44. hists__set_col_len(hists, dso, unresolved_col_width);
  45. }
  46. void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
  47. {
  48. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  49. int symlen;
  50. u16 len;
  51. /*
  52. * +4 accounts for '[x] ' priv level info
  53. * +2 accounts for 0x prefix on raw addresses
  54. * +3 accounts for ' y ' symtab origin info
  55. */
  56. if (h->ms.sym) {
  57. symlen = h->ms.sym->namelen + 4;
  58. if (verbose)
  59. symlen += BITS_PER_LONG / 4 + 2 + 3;
  60. hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  61. } else {
  62. symlen = unresolved_col_width + 4 + 2;
  63. hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  64. hists__set_unres_dso_col_len(hists, HISTC_DSO);
  65. }
  66. len = thread__comm_len(h->thread);
  67. if (hists__new_col_len(hists, HISTC_COMM, len))
  68. hists__set_col_len(hists, HISTC_THREAD, len + 6);
  69. if (h->ms.map) {
  70. len = dso__name_len(h->ms.map->dso);
  71. hists__new_col_len(hists, HISTC_DSO, len);
  72. }
  73. if (h->parent)
  74. hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
  75. if (h->branch_info) {
  76. if (h->branch_info->from.sym) {
  77. symlen = (int)h->branch_info->from.sym->namelen + 4;
  78. if (verbose)
  79. symlen += BITS_PER_LONG / 4 + 2 + 3;
  80. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  81. symlen = dso__name_len(h->branch_info->from.map->dso);
  82. hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
  83. } else {
  84. symlen = unresolved_col_width + 4 + 2;
  85. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  86. hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
  87. }
  88. if (h->branch_info->to.sym) {
  89. symlen = (int)h->branch_info->to.sym->namelen + 4;
  90. if (verbose)
  91. symlen += BITS_PER_LONG / 4 + 2 + 3;
  92. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  93. symlen = dso__name_len(h->branch_info->to.map->dso);
  94. hists__new_col_len(hists, HISTC_DSO_TO, symlen);
  95. } else {
  96. symlen = unresolved_col_width + 4 + 2;
  97. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  98. hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
  99. }
  100. }
  101. if (h->mem_info) {
  102. if (h->mem_info->daddr.sym) {
  103. symlen = (int)h->mem_info->daddr.sym->namelen + 4
  104. + unresolved_col_width + 2;
  105. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
  106. symlen);
  107. hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
  108. symlen + 1);
  109. } else {
  110. symlen = unresolved_col_width + 4 + 2;
  111. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
  112. symlen);
  113. }
  114. if (h->mem_info->daddr.map) {
  115. symlen = dso__name_len(h->mem_info->daddr.map->dso);
  116. hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
  117. symlen);
  118. } else {
  119. symlen = unresolved_col_width + 4 + 2;
  120. hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
  121. }
  122. } else {
  123. symlen = unresolved_col_width + 4 + 2;
  124. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
  125. hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
  126. }
  127. hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
  128. hists__new_col_len(hists, HISTC_MEM_TLB, 22);
  129. hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
  130. hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
  131. hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
  132. hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
  133. if (h->transaction)
  134. hists__new_col_len(hists, HISTC_TRANSACTION,
  135. hist_entry__transaction_len());
  136. }
  137. void hists__output_recalc_col_len(struct hists *hists, int max_rows)
  138. {
  139. struct rb_node *next = rb_first(&hists->entries);
  140. struct hist_entry *n;
  141. int row = 0;
  142. hists__reset_col_len(hists);
  143. while (next && row++ < max_rows) {
  144. n = rb_entry(next, struct hist_entry, rb_node);
  145. if (!n->filtered)
  146. hists__calc_col_len(hists, n);
  147. next = rb_next(&n->rb_node);
  148. }
  149. }
  150. static void he_stat__add_cpumode_period(struct he_stat *he_stat,
  151. unsigned int cpumode, u64 period)
  152. {
  153. switch (cpumode) {
  154. case PERF_RECORD_MISC_KERNEL:
  155. he_stat->period_sys += period;
  156. break;
  157. case PERF_RECORD_MISC_USER:
  158. he_stat->period_us += period;
  159. break;
  160. case PERF_RECORD_MISC_GUEST_KERNEL:
  161. he_stat->period_guest_sys += period;
  162. break;
  163. case PERF_RECORD_MISC_GUEST_USER:
  164. he_stat->period_guest_us += period;
  165. break;
  166. default:
  167. break;
  168. }
  169. }
  170. static void he_stat__add_period(struct he_stat *he_stat, u64 period,
  171. u64 weight)
  172. {
  173. he_stat->period += period;
  174. he_stat->weight += weight;
  175. he_stat->nr_events += 1;
  176. }
  177. static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
  178. {
  179. dest->period += src->period;
  180. dest->period_sys += src->period_sys;
  181. dest->period_us += src->period_us;
  182. dest->period_guest_sys += src->period_guest_sys;
  183. dest->period_guest_us += src->period_guest_us;
  184. dest->nr_events += src->nr_events;
  185. dest->weight += src->weight;
  186. }
  187. static void he_stat__decay(struct he_stat *he_stat)
  188. {
  189. he_stat->period = (he_stat->period * 7) / 8;
  190. he_stat->nr_events = (he_stat->nr_events * 7) / 8;
  191. /* XXX need decay for weight too? */
  192. }
  193. static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
  194. {
  195. u64 prev_period = he->stat.period;
  196. u64 diff;
  197. if (prev_period == 0)
  198. return true;
  199. he_stat__decay(&he->stat);
  200. if (symbol_conf.cumulate_callchain)
  201. he_stat__decay(he->stat_acc);
  202. diff = prev_period - he->stat.period;
  203. hists->stats.total_period -= diff;
  204. if (!he->filtered)
  205. hists->stats.total_non_filtered_period -= diff;
  206. return he->stat.period == 0;
  207. }
  208. void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
  209. {
  210. struct rb_node *next = rb_first(&hists->entries);
  211. struct hist_entry *n;
  212. while (next) {
  213. n = rb_entry(next, struct hist_entry, rb_node);
  214. next = rb_next(&n->rb_node);
  215. /*
  216. * We may be annotating this, for instance, so keep it here in
  217. * case some it gets new samples, we'll eventually free it when
  218. * the user stops browsing and it agains gets fully decayed.
  219. */
  220. if (((zap_user && n->level == '.') ||
  221. (zap_kernel && n->level != '.') ||
  222. hists__decay_entry(hists, n)) &&
  223. !n->used) {
  224. rb_erase(&n->rb_node, &hists->entries);
  225. if (sort__need_collapse)
  226. rb_erase(&n->rb_node_in, &hists->entries_collapsed);
  227. --hists->nr_entries;
  228. if (!n->filtered)
  229. --hists->nr_non_filtered_entries;
  230. hist_entry__free(n);
  231. }
  232. }
  233. }
  234. void hists__delete_entries(struct hists *hists)
  235. {
  236. struct rb_node *next = rb_first(&hists->entries);
  237. struct hist_entry *n;
  238. while (next) {
  239. n = rb_entry(next, struct hist_entry, rb_node);
  240. next = rb_next(&n->rb_node);
  241. rb_erase(&n->rb_node, &hists->entries);
  242. if (sort__need_collapse)
  243. rb_erase(&n->rb_node_in, &hists->entries_collapsed);
  244. --hists->nr_entries;
  245. if (!n->filtered)
  246. --hists->nr_non_filtered_entries;
  247. hist_entry__free(n);
  248. }
  249. }
  250. /*
  251. * histogram, sorted on item, collects periods
  252. */
  253. static struct hist_entry *hist_entry__new(struct hist_entry *template,
  254. bool sample_self)
  255. {
  256. size_t callchain_size = 0;
  257. struct hist_entry *he;
  258. if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain)
  259. callchain_size = sizeof(struct callchain_root);
  260. he = zalloc(sizeof(*he) + callchain_size);
  261. if (he != NULL) {
  262. *he = *template;
  263. if (symbol_conf.cumulate_callchain) {
  264. he->stat_acc = malloc(sizeof(he->stat));
  265. if (he->stat_acc == NULL) {
  266. free(he);
  267. return NULL;
  268. }
  269. memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
  270. if (!sample_self)
  271. memset(&he->stat, 0, sizeof(he->stat));
  272. }
  273. if (he->ms.map)
  274. he->ms.map->referenced = true;
  275. if (he->branch_info) {
  276. /*
  277. * This branch info is (a part of) allocated from
  278. * sample__resolve_bstack() and will be freed after
  279. * adding new entries. So we need to save a copy.
  280. */
  281. he->branch_info = malloc(sizeof(*he->branch_info));
  282. if (he->branch_info == NULL) {
  283. free(he->stat_acc);
  284. free(he);
  285. return NULL;
  286. }
  287. memcpy(he->branch_info, template->branch_info,
  288. sizeof(*he->branch_info));
  289. if (he->branch_info->from.map)
  290. he->branch_info->from.map->referenced = true;
  291. if (he->branch_info->to.map)
  292. he->branch_info->to.map->referenced = true;
  293. }
  294. if (he->mem_info) {
  295. if (he->mem_info->iaddr.map)
  296. he->mem_info->iaddr.map->referenced = true;
  297. if (he->mem_info->daddr.map)
  298. he->mem_info->daddr.map->referenced = true;
  299. }
  300. if (symbol_conf.use_callchain)
  301. callchain_init(he->callchain);
  302. INIT_LIST_HEAD(&he->pairs.node);
  303. }
  304. return he;
  305. }
  306. static u8 symbol__parent_filter(const struct symbol *parent)
  307. {
  308. if (symbol_conf.exclude_other && parent == NULL)
  309. return 1 << HIST_FILTER__PARENT;
  310. return 0;
  311. }
  312. static struct hist_entry *add_hist_entry(struct hists *hists,
  313. struct hist_entry *entry,
  314. struct addr_location *al,
  315. bool sample_self)
  316. {
  317. struct rb_node **p;
  318. struct rb_node *parent = NULL;
  319. struct hist_entry *he;
  320. int64_t cmp;
  321. u64 period = entry->stat.period;
  322. u64 weight = entry->stat.weight;
  323. p = &hists->entries_in->rb_node;
  324. while (*p != NULL) {
  325. parent = *p;
  326. he = rb_entry(parent, struct hist_entry, rb_node_in);
  327. /*
  328. * Make sure that it receives arguments in a same order as
  329. * hist_entry__collapse() so that we can use an appropriate
  330. * function when searching an entry regardless which sort
  331. * keys were used.
  332. */
  333. cmp = hist_entry__cmp(he, entry);
  334. if (!cmp) {
  335. if (sample_self)
  336. he_stat__add_period(&he->stat, period, weight);
  337. if (symbol_conf.cumulate_callchain)
  338. he_stat__add_period(he->stat_acc, period, weight);
  339. /*
  340. * This mem info was allocated from sample__resolve_mem
  341. * and will not be used anymore.
  342. */
  343. zfree(&entry->mem_info);
  344. /* If the map of an existing hist_entry has
  345. * become out-of-date due to an exec() or
  346. * similar, update it. Otherwise we will
  347. * mis-adjust symbol addresses when computing
  348. * the history counter to increment.
  349. */
  350. if (he->ms.map != entry->ms.map) {
  351. he->ms.map = entry->ms.map;
  352. if (he->ms.map)
  353. he->ms.map->referenced = true;
  354. }
  355. goto out;
  356. }
  357. if (cmp < 0)
  358. p = &(*p)->rb_left;
  359. else
  360. p = &(*p)->rb_right;
  361. }
  362. he = hist_entry__new(entry, sample_self);
  363. if (!he)
  364. return NULL;
  365. rb_link_node(&he->rb_node_in, parent, p);
  366. rb_insert_color(&he->rb_node_in, hists->entries_in);
  367. out:
  368. if (sample_self)
  369. he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
  370. if (symbol_conf.cumulate_callchain)
  371. he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
  372. return he;
  373. }
  374. struct hist_entry *__hists__add_entry(struct hists *hists,
  375. struct addr_location *al,
  376. struct symbol *sym_parent,
  377. struct branch_info *bi,
  378. struct mem_info *mi,
  379. u64 period, u64 weight, u64 transaction,
  380. bool sample_self)
  381. {
  382. struct hist_entry entry = {
  383. .thread = al->thread,
  384. .comm = thread__comm(al->thread),
  385. .ms = {
  386. .map = al->map,
  387. .sym = al->sym,
  388. },
  389. .cpu = al->cpu,
  390. .cpumode = al->cpumode,
  391. .ip = al->addr,
  392. .level = al->level,
  393. .stat = {
  394. .nr_events = 1,
  395. .period = period,
  396. .weight = weight,
  397. },
  398. .parent = sym_parent,
  399. .filtered = symbol__parent_filter(sym_parent) | al->filtered,
  400. .hists = hists,
  401. .branch_info = bi,
  402. .mem_info = mi,
  403. .transaction = transaction,
  404. };
  405. return add_hist_entry(hists, &entry, al, sample_self);
  406. }
  407. static int
  408. iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
  409. struct addr_location *al __maybe_unused)
  410. {
  411. return 0;
  412. }
  413. static int
  414. iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
  415. struct addr_location *al __maybe_unused)
  416. {
  417. return 0;
  418. }
  419. static int
  420. iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
  421. {
  422. struct perf_sample *sample = iter->sample;
  423. struct mem_info *mi;
  424. mi = sample__resolve_mem(sample, al);
  425. if (mi == NULL)
  426. return -ENOMEM;
  427. iter->priv = mi;
  428. return 0;
  429. }
  430. static int
  431. iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
  432. {
  433. u64 cost;
  434. struct mem_info *mi = iter->priv;
  435. struct hists *hists = evsel__hists(iter->evsel);
  436. struct hist_entry *he;
  437. if (mi == NULL)
  438. return -EINVAL;
  439. cost = iter->sample->weight;
  440. if (!cost)
  441. cost = 1;
  442. /*
  443. * must pass period=weight in order to get the correct
  444. * sorting from hists__collapse_resort() which is solely
  445. * based on periods. We want sorting be done on nr_events * weight
  446. * and this is indirectly achieved by passing period=weight here
  447. * and the he_stat__add_period() function.
  448. */
  449. he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
  450. cost, cost, 0, true);
  451. if (!he)
  452. return -ENOMEM;
  453. iter->he = he;
  454. return 0;
  455. }
  456. static int
  457. iter_finish_mem_entry(struct hist_entry_iter *iter,
  458. struct addr_location *al __maybe_unused)
  459. {
  460. struct perf_evsel *evsel = iter->evsel;
  461. struct hists *hists = evsel__hists(evsel);
  462. struct hist_entry *he = iter->he;
  463. int err = -EINVAL;
  464. if (he == NULL)
  465. goto out;
  466. hists__inc_nr_samples(hists, he->filtered);
  467. err = hist_entry__append_callchain(he, iter->sample);
  468. out:
  469. /*
  470. * We don't need to free iter->priv (mem_info) here since
  471. * the mem info was either already freed in add_hist_entry() or
  472. * passed to a new hist entry by hist_entry__new().
  473. */
  474. iter->priv = NULL;
  475. iter->he = NULL;
  476. return err;
  477. }
  478. static int
  479. iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
  480. {
  481. struct branch_info *bi;
  482. struct perf_sample *sample = iter->sample;
  483. bi = sample__resolve_bstack(sample, al);
  484. if (!bi)
  485. return -ENOMEM;
  486. iter->curr = 0;
  487. iter->total = sample->branch_stack->nr;
  488. iter->priv = bi;
  489. return 0;
  490. }
  491. static int
  492. iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
  493. struct addr_location *al __maybe_unused)
  494. {
  495. /* to avoid calling callback function */
  496. iter->he = NULL;
  497. return 0;
  498. }
  499. static int
  500. iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
  501. {
  502. struct branch_info *bi = iter->priv;
  503. int i = iter->curr;
  504. if (bi == NULL)
  505. return 0;
  506. if (iter->curr >= iter->total)
  507. return 0;
  508. al->map = bi[i].to.map;
  509. al->sym = bi[i].to.sym;
  510. al->addr = bi[i].to.addr;
  511. return 1;
  512. }
  513. static int
  514. iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
  515. {
  516. struct branch_info *bi;
  517. struct perf_evsel *evsel = iter->evsel;
  518. struct hists *hists = evsel__hists(evsel);
  519. struct hist_entry *he = NULL;
  520. int i = iter->curr;
  521. int err = 0;
  522. bi = iter->priv;
  523. if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
  524. goto out;
  525. /*
  526. * The report shows the percentage of total branches captured
  527. * and not events sampled. Thus we use a pseudo period of 1.
  528. */
  529. he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
  530. 1, 1, 0, true);
  531. if (he == NULL)
  532. return -ENOMEM;
  533. hists__inc_nr_samples(hists, he->filtered);
  534. out:
  535. iter->he = he;
  536. iter->curr++;
  537. return err;
  538. }
  539. static int
  540. iter_finish_branch_entry(struct hist_entry_iter *iter,
  541. struct addr_location *al __maybe_unused)
  542. {
  543. zfree(&iter->priv);
  544. iter->he = NULL;
  545. return iter->curr >= iter->total ? 0 : -1;
  546. }
  547. static int
  548. iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
  549. struct addr_location *al __maybe_unused)
  550. {
  551. return 0;
  552. }
  553. static int
  554. iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
  555. {
  556. struct perf_evsel *evsel = iter->evsel;
  557. struct perf_sample *sample = iter->sample;
  558. struct hist_entry *he;
  559. he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
  560. sample->period, sample->weight,
  561. sample->transaction, true);
  562. if (he == NULL)
  563. return -ENOMEM;
  564. iter->he = he;
  565. return 0;
  566. }
  567. static int
  568. iter_finish_normal_entry(struct hist_entry_iter *iter,
  569. struct addr_location *al __maybe_unused)
  570. {
  571. struct hist_entry *he = iter->he;
  572. struct perf_evsel *evsel = iter->evsel;
  573. struct perf_sample *sample = iter->sample;
  574. if (he == NULL)
  575. return 0;
  576. iter->he = NULL;
  577. hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
  578. return hist_entry__append_callchain(he, sample);
  579. }
  580. static int
  581. iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
  582. struct addr_location *al __maybe_unused)
  583. {
  584. struct hist_entry **he_cache;
  585. callchain_cursor_commit(&callchain_cursor);
  586. /*
  587. * This is for detecting cycles or recursions so that they're
  588. * cumulated only one time to prevent entries more than 100%
  589. * overhead.
  590. */
  591. he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
  592. if (he_cache == NULL)
  593. return -ENOMEM;
  594. iter->priv = he_cache;
  595. iter->curr = 0;
  596. return 0;
  597. }
  598. static int
  599. iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
  600. struct addr_location *al)
  601. {
  602. struct perf_evsel *evsel = iter->evsel;
  603. struct hists *hists = evsel__hists(evsel);
  604. struct perf_sample *sample = iter->sample;
  605. struct hist_entry **he_cache = iter->priv;
  606. struct hist_entry *he;
  607. int err = 0;
  608. he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
  609. sample->period, sample->weight,
  610. sample->transaction, true);
  611. if (he == NULL)
  612. return -ENOMEM;
  613. iter->he = he;
  614. he_cache[iter->curr++] = he;
  615. callchain_append(he->callchain, &callchain_cursor, sample->period);
  616. /*
  617. * We need to re-initialize the cursor since callchain_append()
  618. * advanced the cursor to the end.
  619. */
  620. callchain_cursor_commit(&callchain_cursor);
  621. hists__inc_nr_samples(hists, he->filtered);
  622. return err;
  623. }
  624. static int
  625. iter_next_cumulative_entry(struct hist_entry_iter *iter,
  626. struct addr_location *al)
  627. {
  628. struct callchain_cursor_node *node;
  629. node = callchain_cursor_current(&callchain_cursor);
  630. if (node == NULL)
  631. return 0;
  632. return fill_callchain_info(al, node, iter->hide_unresolved);
  633. }
  634. static int
  635. iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
  636. struct addr_location *al)
  637. {
  638. struct perf_evsel *evsel = iter->evsel;
  639. struct perf_sample *sample = iter->sample;
  640. struct hist_entry **he_cache = iter->priv;
  641. struct hist_entry *he;
  642. struct hist_entry he_tmp = {
  643. .cpu = al->cpu,
  644. .thread = al->thread,
  645. .comm = thread__comm(al->thread),
  646. .ip = al->addr,
  647. .ms = {
  648. .map = al->map,
  649. .sym = al->sym,
  650. },
  651. .parent = iter->parent,
  652. };
  653. int i;
  654. struct callchain_cursor cursor;
  655. callchain_cursor_snapshot(&cursor, &callchain_cursor);
  656. callchain_cursor_advance(&callchain_cursor);
  657. /*
  658. * Check if there's duplicate entries in the callchain.
  659. * It's possible that it has cycles or recursive calls.
  660. */
  661. for (i = 0; i < iter->curr; i++) {
  662. if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
  663. /* to avoid calling callback function */
  664. iter->he = NULL;
  665. return 0;
  666. }
  667. }
  668. he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
  669. sample->period, sample->weight,
  670. sample->transaction, false);
  671. if (he == NULL)
  672. return -ENOMEM;
  673. iter->he = he;
  674. he_cache[iter->curr++] = he;
  675. callchain_append(he->callchain, &cursor, sample->period);
  676. return 0;
  677. }
  678. static int
  679. iter_finish_cumulative_entry(struct hist_entry_iter *iter,
  680. struct addr_location *al __maybe_unused)
  681. {
  682. zfree(&iter->priv);
  683. iter->he = NULL;
  684. return 0;
  685. }
  686. const struct hist_iter_ops hist_iter_mem = {
  687. .prepare_entry = iter_prepare_mem_entry,
  688. .add_single_entry = iter_add_single_mem_entry,
  689. .next_entry = iter_next_nop_entry,
  690. .add_next_entry = iter_add_next_nop_entry,
  691. .finish_entry = iter_finish_mem_entry,
  692. };
  693. const struct hist_iter_ops hist_iter_branch = {
  694. .prepare_entry = iter_prepare_branch_entry,
  695. .add_single_entry = iter_add_single_branch_entry,
  696. .next_entry = iter_next_branch_entry,
  697. .add_next_entry = iter_add_next_branch_entry,
  698. .finish_entry = iter_finish_branch_entry,
  699. };
  700. const struct hist_iter_ops hist_iter_normal = {
  701. .prepare_entry = iter_prepare_normal_entry,
  702. .add_single_entry = iter_add_single_normal_entry,
  703. .next_entry = iter_next_nop_entry,
  704. .add_next_entry = iter_add_next_nop_entry,
  705. .finish_entry = iter_finish_normal_entry,
  706. };
  707. const struct hist_iter_ops hist_iter_cumulative = {
  708. .prepare_entry = iter_prepare_cumulative_entry,
  709. .add_single_entry = iter_add_single_cumulative_entry,
  710. .next_entry = iter_next_cumulative_entry,
  711. .add_next_entry = iter_add_next_cumulative_entry,
  712. .finish_entry = iter_finish_cumulative_entry,
  713. };
  714. int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
  715. struct perf_evsel *evsel, struct perf_sample *sample,
  716. int max_stack_depth, void *arg)
  717. {
  718. int err, err2;
  719. err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
  720. max_stack_depth);
  721. if (err)
  722. return err;
  723. iter->evsel = evsel;
  724. iter->sample = sample;
  725. err = iter->ops->prepare_entry(iter, al);
  726. if (err)
  727. goto out;
  728. err = iter->ops->add_single_entry(iter, al);
  729. if (err)
  730. goto out;
  731. if (iter->he && iter->add_entry_cb) {
  732. err = iter->add_entry_cb(iter, al, true, arg);
  733. if (err)
  734. goto out;
  735. }
  736. while (iter->ops->next_entry(iter, al)) {
  737. err = iter->ops->add_next_entry(iter, al);
  738. if (err)
  739. break;
  740. if (iter->he && iter->add_entry_cb) {
  741. err = iter->add_entry_cb(iter, al, false, arg);
  742. if (err)
  743. goto out;
  744. }
  745. }
  746. out:
  747. err2 = iter->ops->finish_entry(iter, al);
  748. if (!err)
  749. err = err2;
  750. return err;
  751. }
  752. int64_t
  753. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  754. {
  755. struct perf_hpp_fmt *fmt;
  756. int64_t cmp = 0;
  757. perf_hpp__for_each_sort_list(fmt) {
  758. if (perf_hpp__should_skip(fmt))
  759. continue;
  760. cmp = fmt->cmp(left, right);
  761. if (cmp)
  762. break;
  763. }
  764. return cmp;
  765. }
  766. int64_t
  767. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  768. {
  769. struct perf_hpp_fmt *fmt;
  770. int64_t cmp = 0;
  771. perf_hpp__for_each_sort_list(fmt) {
  772. if (perf_hpp__should_skip(fmt))
  773. continue;
  774. cmp = fmt->collapse(left, right);
  775. if (cmp)
  776. break;
  777. }
  778. return cmp;
  779. }
  780. void hist_entry__free(struct hist_entry *he)
  781. {
  782. zfree(&he->branch_info);
  783. zfree(&he->mem_info);
  784. zfree(&he->stat_acc);
  785. free_srcline(he->srcline);
  786. free(he);
  787. }
  788. /*
  789. * collapse the histogram
  790. */
  791. static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
  792. struct rb_root *root,
  793. struct hist_entry *he)
  794. {
  795. struct rb_node **p = &root->rb_node;
  796. struct rb_node *parent = NULL;
  797. struct hist_entry *iter;
  798. int64_t cmp;
  799. while (*p != NULL) {
  800. parent = *p;
  801. iter = rb_entry(parent, struct hist_entry, rb_node_in);
  802. cmp = hist_entry__collapse(iter, he);
  803. if (!cmp) {
  804. he_stat__add_stat(&iter->stat, &he->stat);
  805. if (symbol_conf.cumulate_callchain)
  806. he_stat__add_stat(iter->stat_acc, he->stat_acc);
  807. if (symbol_conf.use_callchain) {
  808. callchain_cursor_reset(&callchain_cursor);
  809. callchain_merge(&callchain_cursor,
  810. iter->callchain,
  811. he->callchain);
  812. }
  813. hist_entry__free(he);
  814. return false;
  815. }
  816. if (cmp < 0)
  817. p = &(*p)->rb_left;
  818. else
  819. p = &(*p)->rb_right;
  820. }
  821. rb_link_node(&he->rb_node_in, parent, p);
  822. rb_insert_color(&he->rb_node_in, root);
  823. return true;
  824. }
  825. static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
  826. {
  827. struct rb_root *root;
  828. pthread_mutex_lock(&hists->lock);
  829. root = hists->entries_in;
  830. if (++hists->entries_in > &hists->entries_in_array[1])
  831. hists->entries_in = &hists->entries_in_array[0];
  832. pthread_mutex_unlock(&hists->lock);
  833. return root;
  834. }
  835. static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
  836. {
  837. hists__filter_entry_by_dso(hists, he);
  838. hists__filter_entry_by_thread(hists, he);
  839. hists__filter_entry_by_symbol(hists, he);
  840. }
  841. void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
  842. {
  843. struct rb_root *root;
  844. struct rb_node *next;
  845. struct hist_entry *n;
  846. if (!sort__need_collapse)
  847. return;
  848. root = hists__get_rotate_entries_in(hists);
  849. next = rb_first(root);
  850. while (next) {
  851. if (session_done())
  852. break;
  853. n = rb_entry(next, struct hist_entry, rb_node_in);
  854. next = rb_next(&n->rb_node_in);
  855. rb_erase(&n->rb_node_in, root);
  856. if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
  857. /*
  858. * If it wasn't combined with one of the entries already
  859. * collapsed, we need to apply the filters that may have
  860. * been set by, say, the hist_browser.
  861. */
  862. hists__apply_filters(hists, n);
  863. }
  864. if (prog)
  865. ui_progress__update(prog, 1);
  866. }
  867. }
  868. static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
  869. {
  870. struct perf_hpp_fmt *fmt;
  871. int64_t cmp = 0;
  872. perf_hpp__for_each_sort_list(fmt) {
  873. if (perf_hpp__should_skip(fmt))
  874. continue;
  875. cmp = fmt->sort(a, b);
  876. if (cmp)
  877. break;
  878. }
  879. return cmp;
  880. }
  881. static void hists__reset_filter_stats(struct hists *hists)
  882. {
  883. hists->nr_non_filtered_entries = 0;
  884. hists->stats.total_non_filtered_period = 0;
  885. }
  886. void hists__reset_stats(struct hists *hists)
  887. {
  888. hists->nr_entries = 0;
  889. hists->stats.total_period = 0;
  890. hists__reset_filter_stats(hists);
  891. }
  892. static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
  893. {
  894. hists->nr_non_filtered_entries++;
  895. hists->stats.total_non_filtered_period += h->stat.period;
  896. }
  897. void hists__inc_stats(struct hists *hists, struct hist_entry *h)
  898. {
  899. if (!h->filtered)
  900. hists__inc_filter_stats(hists, h);
  901. hists->nr_entries++;
  902. hists->stats.total_period += h->stat.period;
  903. }
  904. static void __hists__insert_output_entry(struct rb_root *entries,
  905. struct hist_entry *he,
  906. u64 min_callchain_hits)
  907. {
  908. struct rb_node **p = &entries->rb_node;
  909. struct rb_node *parent = NULL;
  910. struct hist_entry *iter;
  911. if (symbol_conf.use_callchain)
  912. callchain_param.sort(&he->sorted_chain, he->callchain,
  913. min_callchain_hits, &callchain_param);
  914. while (*p != NULL) {
  915. parent = *p;
  916. iter = rb_entry(parent, struct hist_entry, rb_node);
  917. if (hist_entry__sort(he, iter) > 0)
  918. p = &(*p)->rb_left;
  919. else
  920. p = &(*p)->rb_right;
  921. }
  922. rb_link_node(&he->rb_node, parent, p);
  923. rb_insert_color(&he->rb_node, entries);
  924. }
  925. void hists__output_resort(struct hists *hists)
  926. {
  927. struct rb_root *root;
  928. struct rb_node *next;
  929. struct hist_entry *n;
  930. u64 min_callchain_hits;
  931. min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
  932. if (sort__need_collapse)
  933. root = &hists->entries_collapsed;
  934. else
  935. root = hists->entries_in;
  936. next = rb_first(root);
  937. hists->entries = RB_ROOT;
  938. hists__reset_stats(hists);
  939. hists__reset_col_len(hists);
  940. while (next) {
  941. n = rb_entry(next, struct hist_entry, rb_node_in);
  942. next = rb_next(&n->rb_node_in);
  943. __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
  944. hists__inc_stats(hists, n);
  945. if (!n->filtered)
  946. hists__calc_col_len(hists, n);
  947. }
  948. }
  949. static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
  950. enum hist_filter filter)
  951. {
  952. h->filtered &= ~(1 << filter);
  953. if (h->filtered)
  954. return;
  955. /* force fold unfiltered entry for simplicity */
  956. h->ms.unfolded = false;
  957. h->row_offset = 0;
  958. hists->stats.nr_non_filtered_samples += h->stat.nr_events;
  959. hists__inc_filter_stats(hists, h);
  960. hists__calc_col_len(hists, h);
  961. }
  962. static bool hists__filter_entry_by_dso(struct hists *hists,
  963. struct hist_entry *he)
  964. {
  965. if (hists->dso_filter != NULL &&
  966. (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
  967. he->filtered |= (1 << HIST_FILTER__DSO);
  968. return true;
  969. }
  970. return false;
  971. }
  972. void hists__filter_by_dso(struct hists *hists)
  973. {
  974. struct rb_node *nd;
  975. hists->stats.nr_non_filtered_samples = 0;
  976. hists__reset_filter_stats(hists);
  977. hists__reset_col_len(hists);
  978. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  979. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  980. if (symbol_conf.exclude_other && !h->parent)
  981. continue;
  982. if (hists__filter_entry_by_dso(hists, h))
  983. continue;
  984. hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
  985. }
  986. }
  987. static bool hists__filter_entry_by_thread(struct hists *hists,
  988. struct hist_entry *he)
  989. {
  990. if (hists->thread_filter != NULL &&
  991. he->thread != hists->thread_filter) {
  992. he->filtered |= (1 << HIST_FILTER__THREAD);
  993. return true;
  994. }
  995. return false;
  996. }
  997. void hists__filter_by_thread(struct hists *hists)
  998. {
  999. struct rb_node *nd;
  1000. hists->stats.nr_non_filtered_samples = 0;
  1001. hists__reset_filter_stats(hists);
  1002. hists__reset_col_len(hists);
  1003. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  1004. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  1005. if (hists__filter_entry_by_thread(hists, h))
  1006. continue;
  1007. hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
  1008. }
  1009. }
  1010. static bool hists__filter_entry_by_symbol(struct hists *hists,
  1011. struct hist_entry *he)
  1012. {
  1013. if (hists->symbol_filter_str != NULL &&
  1014. (!he->ms.sym || strstr(he->ms.sym->name,
  1015. hists->symbol_filter_str) == NULL)) {
  1016. he->filtered |= (1 << HIST_FILTER__SYMBOL);
  1017. return true;
  1018. }
  1019. return false;
  1020. }
  1021. void hists__filter_by_symbol(struct hists *hists)
  1022. {
  1023. struct rb_node *nd;
  1024. hists->stats.nr_non_filtered_samples = 0;
  1025. hists__reset_filter_stats(hists);
  1026. hists__reset_col_len(hists);
  1027. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  1028. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  1029. if (hists__filter_entry_by_symbol(hists, h))
  1030. continue;
  1031. hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
  1032. }
  1033. }
  1034. void events_stats__inc(struct events_stats *stats, u32 type)
  1035. {
  1036. ++stats->nr_events[0];
  1037. ++stats->nr_events[type];
  1038. }
  1039. void hists__inc_nr_events(struct hists *hists, u32 type)
  1040. {
  1041. events_stats__inc(&hists->stats, type);
  1042. }
  1043. void hists__inc_nr_samples(struct hists *hists, bool filtered)
  1044. {
  1045. events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
  1046. if (!filtered)
  1047. hists->stats.nr_non_filtered_samples++;
  1048. }
  1049. static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
  1050. struct hist_entry *pair)
  1051. {
  1052. struct rb_root *root;
  1053. struct rb_node **p;
  1054. struct rb_node *parent = NULL;
  1055. struct hist_entry *he;
  1056. int64_t cmp;
  1057. if (sort__need_collapse)
  1058. root = &hists->entries_collapsed;
  1059. else
  1060. root = hists->entries_in;
  1061. p = &root->rb_node;
  1062. while (*p != NULL) {
  1063. parent = *p;
  1064. he = rb_entry(parent, struct hist_entry, rb_node_in);
  1065. cmp = hist_entry__collapse(he, pair);
  1066. if (!cmp)
  1067. goto out;
  1068. if (cmp < 0)
  1069. p = &(*p)->rb_left;
  1070. else
  1071. p = &(*p)->rb_right;
  1072. }
  1073. he = hist_entry__new(pair, true);
  1074. if (he) {
  1075. memset(&he->stat, 0, sizeof(he->stat));
  1076. he->hists = hists;
  1077. rb_link_node(&he->rb_node_in, parent, p);
  1078. rb_insert_color(&he->rb_node_in, root);
  1079. hists__inc_stats(hists, he);
  1080. he->dummy = true;
  1081. }
  1082. out:
  1083. return he;
  1084. }
  1085. static struct hist_entry *hists__find_entry(struct hists *hists,
  1086. struct hist_entry *he)
  1087. {
  1088. struct rb_node *n;
  1089. if (sort__need_collapse)
  1090. n = hists->entries_collapsed.rb_node;
  1091. else
  1092. n = hists->entries_in->rb_node;
  1093. while (n) {
  1094. struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
  1095. int64_t cmp = hist_entry__collapse(iter, he);
  1096. if (cmp < 0)
  1097. n = n->rb_left;
  1098. else if (cmp > 0)
  1099. n = n->rb_right;
  1100. else
  1101. return iter;
  1102. }
  1103. return NULL;
  1104. }
  1105. /*
  1106. * Look for pairs to link to the leader buckets (hist_entries):
  1107. */
  1108. void hists__match(struct hists *leader, struct hists *other)
  1109. {
  1110. struct rb_root *root;
  1111. struct rb_node *nd;
  1112. struct hist_entry *pos, *pair;
  1113. if (sort__need_collapse)
  1114. root = &leader->entries_collapsed;
  1115. else
  1116. root = leader->entries_in;
  1117. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  1118. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  1119. pair = hists__find_entry(other, pos);
  1120. if (pair)
  1121. hist_entry__add_pair(pair, pos);
  1122. }
  1123. }
  1124. /*
  1125. * Look for entries in the other hists that are not present in the leader, if
  1126. * we find them, just add a dummy entry on the leader hists, with period=0,
  1127. * nr_events=0, to serve as the list header.
  1128. */
  1129. int hists__link(struct hists *leader, struct hists *other)
  1130. {
  1131. struct rb_root *root;
  1132. struct rb_node *nd;
  1133. struct hist_entry *pos, *pair;
  1134. if (sort__need_collapse)
  1135. root = &other->entries_collapsed;
  1136. else
  1137. root = other->entries_in;
  1138. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  1139. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  1140. if (!hist_entry__has_pairs(pos)) {
  1141. pair = hists__add_dummy_entry(leader, pos);
  1142. if (pair == NULL)
  1143. return -1;
  1144. hist_entry__add_pair(pos, pair);
  1145. }
  1146. }
  1147. return 0;
  1148. }
  1149. size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
  1150. {
  1151. struct perf_evsel *pos;
  1152. size_t ret = 0;
  1153. evlist__for_each(evlist, pos) {
  1154. ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
  1155. ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
  1156. }
  1157. return ret;
  1158. }
  1159. u64 hists__total_period(struct hists *hists)
  1160. {
  1161. return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
  1162. hists->stats.total_period;
  1163. }
  1164. int parse_filter_percentage(const struct option *opt __maybe_unused,
  1165. const char *arg, int unset __maybe_unused)
  1166. {
  1167. if (!strcmp(arg, "relative"))
  1168. symbol_conf.filter_relative = true;
  1169. else if (!strcmp(arg, "absolute"))
  1170. symbol_conf.filter_relative = false;
  1171. else
  1172. return -1;
  1173. return 0;
  1174. }
  1175. int perf_hist_config(const char *var, const char *value)
  1176. {
  1177. if (!strcmp(var, "hist.percentage"))
  1178. return parse_filter_percentage(NULL, value, 0);
  1179. return 0;
  1180. }
  1181. static int hists_evsel__init(struct perf_evsel *evsel)
  1182. {
  1183. struct hists *hists = evsel__hists(evsel);
  1184. memset(hists, 0, sizeof(*hists));
  1185. hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
  1186. hists->entries_in = &hists->entries_in_array[0];
  1187. hists->entries_collapsed = RB_ROOT;
  1188. hists->entries = RB_ROOT;
  1189. pthread_mutex_init(&hists->lock, NULL);
  1190. return 0;
  1191. }
  1192. /*
  1193. * XXX We probably need a hists_evsel__exit() to free the hist_entries
  1194. * stored in the rbtree...
  1195. */
  1196. int hists__init(void)
  1197. {
  1198. int err = perf_evsel__object_config(sizeof(struct hists_evsel),
  1199. hists_evsel__init, NULL);
  1200. if (err)
  1201. fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
  1202. return err;
  1203. }