hist.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414
  1. #include "util.h"
  2. #include "build-id.h"
  3. #include "hist.h"
  4. #include "session.h"
  5. #include "sort.h"
  6. #include "evsel.h"
  7. #include "annotate.h"
  8. #include <math.h>
  9. static bool hists__filter_entry_by_dso(struct hists *hists,
  10. struct hist_entry *he);
  11. static bool hists__filter_entry_by_thread(struct hists *hists,
  12. struct hist_entry *he);
  13. static bool hists__filter_entry_by_symbol(struct hists *hists,
  14. struct hist_entry *he);
  15. struct callchain_param callchain_param = {
  16. .mode = CHAIN_GRAPH_REL,
  17. .min_percent = 0.5,
  18. .order = ORDER_CALLEE,
  19. .key = CCKEY_FUNCTION
  20. };
  21. u16 hists__col_len(struct hists *hists, enum hist_column col)
  22. {
  23. return hists->col_len[col];
  24. }
  25. void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
  26. {
  27. hists->col_len[col] = len;
  28. }
  29. bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
  30. {
  31. if (len > hists__col_len(hists, col)) {
  32. hists__set_col_len(hists, col, len);
  33. return true;
  34. }
  35. return false;
  36. }
  37. void hists__reset_col_len(struct hists *hists)
  38. {
  39. enum hist_column col;
  40. for (col = 0; col < HISTC_NR_COLS; ++col)
  41. hists__set_col_len(hists, col, 0);
  42. }
  43. static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
  44. {
  45. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  46. if (hists__col_len(hists, dso) < unresolved_col_width &&
  47. !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  48. !symbol_conf.dso_list)
  49. hists__set_col_len(hists, dso, unresolved_col_width);
  50. }
  51. void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
  52. {
  53. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  54. int symlen;
  55. u16 len;
  56. /*
  57. * +4 accounts for '[x] ' priv level info
  58. * +2 accounts for 0x prefix on raw addresses
  59. * +3 accounts for ' y ' symtab origin info
  60. */
  61. if (h->ms.sym) {
  62. symlen = h->ms.sym->namelen + 4;
  63. if (verbose)
  64. symlen += BITS_PER_LONG / 4 + 2 + 3;
  65. hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  66. } else {
  67. symlen = unresolved_col_width + 4 + 2;
  68. hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  69. hists__set_unres_dso_col_len(hists, HISTC_DSO);
  70. }
  71. len = thread__comm_len(h->thread);
  72. if (hists__new_col_len(hists, HISTC_COMM, len))
  73. hists__set_col_len(hists, HISTC_THREAD, len + 6);
  74. if (h->ms.map) {
  75. len = dso__name_len(h->ms.map->dso);
  76. hists__new_col_len(hists, HISTC_DSO, len);
  77. }
  78. if (h->parent)
  79. hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
  80. if (h->branch_info) {
  81. if (h->branch_info->from.sym) {
  82. symlen = (int)h->branch_info->from.sym->namelen + 4;
  83. if (verbose)
  84. symlen += BITS_PER_LONG / 4 + 2 + 3;
  85. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  86. symlen = dso__name_len(h->branch_info->from.map->dso);
  87. hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
  88. } else {
  89. symlen = unresolved_col_width + 4 + 2;
  90. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  91. hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
  92. }
  93. if (h->branch_info->to.sym) {
  94. symlen = (int)h->branch_info->to.sym->namelen + 4;
  95. if (verbose)
  96. symlen += BITS_PER_LONG / 4 + 2 + 3;
  97. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  98. symlen = dso__name_len(h->branch_info->to.map->dso);
  99. hists__new_col_len(hists, HISTC_DSO_TO, symlen);
  100. } else {
  101. symlen = unresolved_col_width + 4 + 2;
  102. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  103. hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
  104. }
  105. }
  106. if (h->mem_info) {
  107. if (h->mem_info->daddr.sym) {
  108. symlen = (int)h->mem_info->daddr.sym->namelen + 4
  109. + unresolved_col_width + 2;
  110. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
  111. symlen);
  112. hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
  113. symlen + 1);
  114. } else {
  115. symlen = unresolved_col_width + 4 + 2;
  116. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
  117. symlen);
  118. }
  119. if (h->mem_info->daddr.map) {
  120. symlen = dso__name_len(h->mem_info->daddr.map->dso);
  121. hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
  122. symlen);
  123. } else {
  124. symlen = unresolved_col_width + 4 + 2;
  125. hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
  126. }
  127. } else {
  128. symlen = unresolved_col_width + 4 + 2;
  129. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
  130. hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
  131. }
  132. hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
  133. hists__new_col_len(hists, HISTC_MEM_TLB, 22);
  134. hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
  135. hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
  136. hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
  137. hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
  138. if (h->transaction)
  139. hists__new_col_len(hists, HISTC_TRANSACTION,
  140. hist_entry__transaction_len());
  141. }
  142. void hists__output_recalc_col_len(struct hists *hists, int max_rows)
  143. {
  144. struct rb_node *next = rb_first(&hists->entries);
  145. struct hist_entry *n;
  146. int row = 0;
  147. hists__reset_col_len(hists);
  148. while (next && row++ < max_rows) {
  149. n = rb_entry(next, struct hist_entry, rb_node);
  150. if (!n->filtered)
  151. hists__calc_col_len(hists, n);
  152. next = rb_next(&n->rb_node);
  153. }
  154. }
  155. static void he_stat__add_cpumode_period(struct he_stat *he_stat,
  156. unsigned int cpumode, u64 period)
  157. {
  158. switch (cpumode) {
  159. case PERF_RECORD_MISC_KERNEL:
  160. he_stat->period_sys += period;
  161. break;
  162. case PERF_RECORD_MISC_USER:
  163. he_stat->period_us += period;
  164. break;
  165. case PERF_RECORD_MISC_GUEST_KERNEL:
  166. he_stat->period_guest_sys += period;
  167. break;
  168. case PERF_RECORD_MISC_GUEST_USER:
  169. he_stat->period_guest_us += period;
  170. break;
  171. default:
  172. break;
  173. }
  174. }
  175. static void he_stat__add_period(struct he_stat *he_stat, u64 period,
  176. u64 weight)
  177. {
  178. he_stat->period += period;
  179. he_stat->weight += weight;
  180. he_stat->nr_events += 1;
  181. }
  182. static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
  183. {
  184. dest->period += src->period;
  185. dest->period_sys += src->period_sys;
  186. dest->period_us += src->period_us;
  187. dest->period_guest_sys += src->period_guest_sys;
  188. dest->period_guest_us += src->period_guest_us;
  189. dest->nr_events += src->nr_events;
  190. dest->weight += src->weight;
  191. }
  192. static void he_stat__decay(struct he_stat *he_stat)
  193. {
  194. he_stat->period = (he_stat->period * 7) / 8;
  195. he_stat->nr_events = (he_stat->nr_events * 7) / 8;
  196. /* XXX need decay for weight too? */
  197. }
  198. static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
  199. {
  200. u64 prev_period = he->stat.period;
  201. u64 diff;
  202. if (prev_period == 0)
  203. return true;
  204. he_stat__decay(&he->stat);
  205. if (symbol_conf.cumulate_callchain)
  206. he_stat__decay(he->stat_acc);
  207. diff = prev_period - he->stat.period;
  208. hists->stats.total_period -= diff;
  209. if (!he->filtered)
  210. hists->stats.total_non_filtered_period -= diff;
  211. return he->stat.period == 0;
  212. }
  213. void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
  214. {
  215. struct rb_node *next = rb_first(&hists->entries);
  216. struct hist_entry *n;
  217. while (next) {
  218. n = rb_entry(next, struct hist_entry, rb_node);
  219. next = rb_next(&n->rb_node);
  220. /*
  221. * We may be annotating this, for instance, so keep it here in
  222. * case some it gets new samples, we'll eventually free it when
  223. * the user stops browsing and it agains gets fully decayed.
  224. */
  225. if (((zap_user && n->level == '.') ||
  226. (zap_kernel && n->level != '.') ||
  227. hists__decay_entry(hists, n)) &&
  228. !n->used) {
  229. rb_erase(&n->rb_node, &hists->entries);
  230. if (sort__need_collapse)
  231. rb_erase(&n->rb_node_in, &hists->entries_collapsed);
  232. --hists->nr_entries;
  233. if (!n->filtered)
  234. --hists->nr_non_filtered_entries;
  235. hist_entry__free(n);
  236. }
  237. }
  238. }
  239. /*
  240. * histogram, sorted on item, collects periods
  241. */
  242. static struct hist_entry *hist_entry__new(struct hist_entry *template,
  243. bool sample_self)
  244. {
  245. size_t callchain_size = 0;
  246. struct hist_entry *he;
  247. if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain)
  248. callchain_size = sizeof(struct callchain_root);
  249. he = zalloc(sizeof(*he) + callchain_size);
  250. if (he != NULL) {
  251. *he = *template;
  252. if (symbol_conf.cumulate_callchain) {
  253. he->stat_acc = malloc(sizeof(he->stat));
  254. if (he->stat_acc == NULL) {
  255. free(he);
  256. return NULL;
  257. }
  258. memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
  259. if (!sample_self)
  260. memset(&he->stat, 0, sizeof(he->stat));
  261. }
  262. if (he->ms.map)
  263. he->ms.map->referenced = true;
  264. if (he->branch_info) {
  265. /*
  266. * This branch info is (a part of) allocated from
  267. * sample__resolve_bstack() and will be freed after
  268. * adding new entries. So we need to save a copy.
  269. */
  270. he->branch_info = malloc(sizeof(*he->branch_info));
  271. if (he->branch_info == NULL) {
  272. free(he->stat_acc);
  273. free(he);
  274. return NULL;
  275. }
  276. memcpy(he->branch_info, template->branch_info,
  277. sizeof(*he->branch_info));
  278. if (he->branch_info->from.map)
  279. he->branch_info->from.map->referenced = true;
  280. if (he->branch_info->to.map)
  281. he->branch_info->to.map->referenced = true;
  282. }
  283. if (he->mem_info) {
  284. if (he->mem_info->iaddr.map)
  285. he->mem_info->iaddr.map->referenced = true;
  286. if (he->mem_info->daddr.map)
  287. he->mem_info->daddr.map->referenced = true;
  288. }
  289. if (symbol_conf.use_callchain)
  290. callchain_init(he->callchain);
  291. INIT_LIST_HEAD(&he->pairs.node);
  292. }
  293. return he;
  294. }
  295. static u8 symbol__parent_filter(const struct symbol *parent)
  296. {
  297. if (symbol_conf.exclude_other && parent == NULL)
  298. return 1 << HIST_FILTER__PARENT;
  299. return 0;
  300. }
  301. static struct hist_entry *add_hist_entry(struct hists *hists,
  302. struct hist_entry *entry,
  303. struct addr_location *al,
  304. bool sample_self)
  305. {
  306. struct rb_node **p;
  307. struct rb_node *parent = NULL;
  308. struct hist_entry *he;
  309. int64_t cmp;
  310. u64 period = entry->stat.period;
  311. u64 weight = entry->stat.weight;
  312. p = &hists->entries_in->rb_node;
  313. while (*p != NULL) {
  314. parent = *p;
  315. he = rb_entry(parent, struct hist_entry, rb_node_in);
  316. /*
  317. * Make sure that it receives arguments in a same order as
  318. * hist_entry__collapse() so that we can use an appropriate
  319. * function when searching an entry regardless which sort
  320. * keys were used.
  321. */
  322. cmp = hist_entry__cmp(he, entry);
  323. if (!cmp) {
  324. if (sample_self)
  325. he_stat__add_period(&he->stat, period, weight);
  326. if (symbol_conf.cumulate_callchain)
  327. he_stat__add_period(he->stat_acc, period, weight);
  328. /*
  329. * This mem info was allocated from sample__resolve_mem
  330. * and will not be used anymore.
  331. */
  332. zfree(&entry->mem_info);
  333. /* If the map of an existing hist_entry has
  334. * become out-of-date due to an exec() or
  335. * similar, update it. Otherwise we will
  336. * mis-adjust symbol addresses when computing
  337. * the history counter to increment.
  338. */
  339. if (he->ms.map != entry->ms.map) {
  340. he->ms.map = entry->ms.map;
  341. if (he->ms.map)
  342. he->ms.map->referenced = true;
  343. }
  344. goto out;
  345. }
  346. if (cmp < 0)
  347. p = &(*p)->rb_left;
  348. else
  349. p = &(*p)->rb_right;
  350. }
  351. he = hist_entry__new(entry, sample_self);
  352. if (!he)
  353. return NULL;
  354. rb_link_node(&he->rb_node_in, parent, p);
  355. rb_insert_color(&he->rb_node_in, hists->entries_in);
  356. out:
  357. if (sample_self)
  358. he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
  359. if (symbol_conf.cumulate_callchain)
  360. he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
  361. return he;
  362. }
  363. struct hist_entry *__hists__add_entry(struct hists *hists,
  364. struct addr_location *al,
  365. struct symbol *sym_parent,
  366. struct branch_info *bi,
  367. struct mem_info *mi,
  368. u64 period, u64 weight, u64 transaction,
  369. bool sample_self)
  370. {
  371. struct hist_entry entry = {
  372. .thread = al->thread,
  373. .comm = thread__comm(al->thread),
  374. .ms = {
  375. .map = al->map,
  376. .sym = al->sym,
  377. },
  378. .cpu = al->cpu,
  379. .cpumode = al->cpumode,
  380. .ip = al->addr,
  381. .level = al->level,
  382. .stat = {
  383. .nr_events = 1,
  384. .period = period,
  385. .weight = weight,
  386. },
  387. .parent = sym_parent,
  388. .filtered = symbol__parent_filter(sym_parent) | al->filtered,
  389. .hists = hists,
  390. .branch_info = bi,
  391. .mem_info = mi,
  392. .transaction = transaction,
  393. };
  394. return add_hist_entry(hists, &entry, al, sample_self);
  395. }
  396. static int
  397. iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
  398. struct addr_location *al __maybe_unused)
  399. {
  400. return 0;
  401. }
  402. static int
  403. iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
  404. struct addr_location *al __maybe_unused)
  405. {
  406. return 0;
  407. }
  408. static int
  409. iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
  410. {
  411. struct perf_sample *sample = iter->sample;
  412. struct mem_info *mi;
  413. mi = sample__resolve_mem(sample, al);
  414. if (mi == NULL)
  415. return -ENOMEM;
  416. iter->priv = mi;
  417. return 0;
  418. }
  419. static int
  420. iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
  421. {
  422. u64 cost;
  423. struct mem_info *mi = iter->priv;
  424. struct hist_entry *he;
  425. if (mi == NULL)
  426. return -EINVAL;
  427. cost = iter->sample->weight;
  428. if (!cost)
  429. cost = 1;
  430. /*
  431. * must pass period=weight in order to get the correct
  432. * sorting from hists__collapse_resort() which is solely
  433. * based on periods. We want sorting be done on nr_events * weight
  434. * and this is indirectly achieved by passing period=weight here
  435. * and the he_stat__add_period() function.
  436. */
  437. he = __hists__add_entry(&iter->evsel->hists, al, iter->parent, NULL, mi,
  438. cost, cost, 0, true);
  439. if (!he)
  440. return -ENOMEM;
  441. iter->he = he;
  442. return 0;
  443. }
  444. static int
  445. iter_finish_mem_entry(struct hist_entry_iter *iter,
  446. struct addr_location *al __maybe_unused)
  447. {
  448. struct perf_evsel *evsel = iter->evsel;
  449. struct hist_entry *he = iter->he;
  450. int err = -EINVAL;
  451. if (he == NULL)
  452. goto out;
  453. hists__inc_nr_samples(&evsel->hists, he->filtered);
  454. err = hist_entry__append_callchain(he, iter->sample);
  455. out:
  456. /*
  457. * We don't need to free iter->priv (mem_info) here since
  458. * the mem info was either already freed in add_hist_entry() or
  459. * passed to a new hist entry by hist_entry__new().
  460. */
  461. iter->priv = NULL;
  462. iter->he = NULL;
  463. return err;
  464. }
  465. static int
  466. iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
  467. {
  468. struct branch_info *bi;
  469. struct perf_sample *sample = iter->sample;
  470. bi = sample__resolve_bstack(sample, al);
  471. if (!bi)
  472. return -ENOMEM;
  473. iter->curr = 0;
  474. iter->total = sample->branch_stack->nr;
  475. iter->priv = bi;
  476. return 0;
  477. }
  478. static int
  479. iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
  480. struct addr_location *al __maybe_unused)
  481. {
  482. /* to avoid calling callback function */
  483. iter->he = NULL;
  484. return 0;
  485. }
  486. static int
  487. iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
  488. {
  489. struct branch_info *bi = iter->priv;
  490. int i = iter->curr;
  491. if (bi == NULL)
  492. return 0;
  493. if (iter->curr >= iter->total)
  494. return 0;
  495. al->map = bi[i].to.map;
  496. al->sym = bi[i].to.sym;
  497. al->addr = bi[i].to.addr;
  498. return 1;
  499. }
  500. static int
  501. iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
  502. {
  503. struct branch_info *bi;
  504. struct perf_evsel *evsel = iter->evsel;
  505. struct hist_entry *he = NULL;
  506. int i = iter->curr;
  507. int err = 0;
  508. bi = iter->priv;
  509. if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
  510. goto out;
  511. /*
  512. * The report shows the percentage of total branches captured
  513. * and not events sampled. Thus we use a pseudo period of 1.
  514. */
  515. he = __hists__add_entry(&evsel->hists, al, iter->parent, &bi[i], NULL,
  516. 1, 1, 0, true);
  517. if (he == NULL)
  518. return -ENOMEM;
  519. hists__inc_nr_samples(&evsel->hists, he->filtered);
  520. out:
  521. iter->he = he;
  522. iter->curr++;
  523. return err;
  524. }
  525. static int
  526. iter_finish_branch_entry(struct hist_entry_iter *iter,
  527. struct addr_location *al __maybe_unused)
  528. {
  529. zfree(&iter->priv);
  530. iter->he = NULL;
  531. return iter->curr >= iter->total ? 0 : -1;
  532. }
  533. static int
  534. iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
  535. struct addr_location *al __maybe_unused)
  536. {
  537. return 0;
  538. }
  539. static int
  540. iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
  541. {
  542. struct perf_evsel *evsel = iter->evsel;
  543. struct perf_sample *sample = iter->sample;
  544. struct hist_entry *he;
  545. he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
  546. sample->period, sample->weight,
  547. sample->transaction, true);
  548. if (he == NULL)
  549. return -ENOMEM;
  550. iter->he = he;
  551. return 0;
  552. }
  553. static int
  554. iter_finish_normal_entry(struct hist_entry_iter *iter,
  555. struct addr_location *al __maybe_unused)
  556. {
  557. struct hist_entry *he = iter->he;
  558. struct perf_evsel *evsel = iter->evsel;
  559. struct perf_sample *sample = iter->sample;
  560. if (he == NULL)
  561. return 0;
  562. iter->he = NULL;
  563. hists__inc_nr_samples(&evsel->hists, he->filtered);
  564. return hist_entry__append_callchain(he, sample);
  565. }
  566. static int
  567. iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
  568. struct addr_location *al __maybe_unused)
  569. {
  570. struct hist_entry **he_cache;
  571. callchain_cursor_commit(&callchain_cursor);
  572. /*
  573. * This is for detecting cycles or recursions so that they're
  574. * cumulated only one time to prevent entries more than 100%
  575. * overhead.
  576. */
  577. he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
  578. if (he_cache == NULL)
  579. return -ENOMEM;
  580. iter->priv = he_cache;
  581. iter->curr = 0;
  582. return 0;
  583. }
  584. static int
  585. iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
  586. struct addr_location *al)
  587. {
  588. struct perf_evsel *evsel = iter->evsel;
  589. struct perf_sample *sample = iter->sample;
  590. struct hist_entry **he_cache = iter->priv;
  591. struct hist_entry *he;
  592. int err = 0;
  593. he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
  594. sample->period, sample->weight,
  595. sample->transaction, true);
  596. if (he == NULL)
  597. return -ENOMEM;
  598. iter->he = he;
  599. he_cache[iter->curr++] = he;
  600. callchain_append(he->callchain, &callchain_cursor, sample->period);
  601. /*
  602. * We need to re-initialize the cursor since callchain_append()
  603. * advanced the cursor to the end.
  604. */
  605. callchain_cursor_commit(&callchain_cursor);
  606. hists__inc_nr_samples(&evsel->hists, he->filtered);
  607. return err;
  608. }
  609. static int
  610. iter_next_cumulative_entry(struct hist_entry_iter *iter,
  611. struct addr_location *al)
  612. {
  613. struct callchain_cursor_node *node;
  614. node = callchain_cursor_current(&callchain_cursor);
  615. if (node == NULL)
  616. return 0;
  617. return fill_callchain_info(al, node, iter->hide_unresolved);
  618. }
  619. static int
  620. iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
  621. struct addr_location *al)
  622. {
  623. struct perf_evsel *evsel = iter->evsel;
  624. struct perf_sample *sample = iter->sample;
  625. struct hist_entry **he_cache = iter->priv;
  626. struct hist_entry *he;
  627. struct hist_entry he_tmp = {
  628. .cpu = al->cpu,
  629. .thread = al->thread,
  630. .comm = thread__comm(al->thread),
  631. .ip = al->addr,
  632. .ms = {
  633. .map = al->map,
  634. .sym = al->sym,
  635. },
  636. .parent = iter->parent,
  637. };
  638. int i;
  639. struct callchain_cursor cursor;
  640. callchain_cursor_snapshot(&cursor, &callchain_cursor);
  641. callchain_cursor_advance(&callchain_cursor);
  642. /*
  643. * Check if there's duplicate entries in the callchain.
  644. * It's possible that it has cycles or recursive calls.
  645. */
  646. for (i = 0; i < iter->curr; i++) {
  647. if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
  648. /* to avoid calling callback function */
  649. iter->he = NULL;
  650. return 0;
  651. }
  652. }
  653. he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
  654. sample->period, sample->weight,
  655. sample->transaction, false);
  656. if (he == NULL)
  657. return -ENOMEM;
  658. iter->he = he;
  659. he_cache[iter->curr++] = he;
  660. callchain_append(he->callchain, &cursor, sample->period);
  661. return 0;
  662. }
  663. static int
  664. iter_finish_cumulative_entry(struct hist_entry_iter *iter,
  665. struct addr_location *al __maybe_unused)
  666. {
  667. zfree(&iter->priv);
  668. iter->he = NULL;
  669. return 0;
  670. }
  671. const struct hist_iter_ops hist_iter_mem = {
  672. .prepare_entry = iter_prepare_mem_entry,
  673. .add_single_entry = iter_add_single_mem_entry,
  674. .next_entry = iter_next_nop_entry,
  675. .add_next_entry = iter_add_next_nop_entry,
  676. .finish_entry = iter_finish_mem_entry,
  677. };
  678. const struct hist_iter_ops hist_iter_branch = {
  679. .prepare_entry = iter_prepare_branch_entry,
  680. .add_single_entry = iter_add_single_branch_entry,
  681. .next_entry = iter_next_branch_entry,
  682. .add_next_entry = iter_add_next_branch_entry,
  683. .finish_entry = iter_finish_branch_entry,
  684. };
  685. const struct hist_iter_ops hist_iter_normal = {
  686. .prepare_entry = iter_prepare_normal_entry,
  687. .add_single_entry = iter_add_single_normal_entry,
  688. .next_entry = iter_next_nop_entry,
  689. .add_next_entry = iter_add_next_nop_entry,
  690. .finish_entry = iter_finish_normal_entry,
  691. };
  692. const struct hist_iter_ops hist_iter_cumulative = {
  693. .prepare_entry = iter_prepare_cumulative_entry,
  694. .add_single_entry = iter_add_single_cumulative_entry,
  695. .next_entry = iter_next_cumulative_entry,
  696. .add_next_entry = iter_add_next_cumulative_entry,
  697. .finish_entry = iter_finish_cumulative_entry,
  698. };
  699. int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
  700. struct perf_evsel *evsel, struct perf_sample *sample,
  701. int max_stack_depth, void *arg)
  702. {
  703. int err, err2;
  704. err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
  705. max_stack_depth);
  706. if (err)
  707. return err;
  708. iter->evsel = evsel;
  709. iter->sample = sample;
  710. err = iter->ops->prepare_entry(iter, al);
  711. if (err)
  712. goto out;
  713. err = iter->ops->add_single_entry(iter, al);
  714. if (err)
  715. goto out;
  716. if (iter->he && iter->add_entry_cb) {
  717. err = iter->add_entry_cb(iter, al, true, arg);
  718. if (err)
  719. goto out;
  720. }
  721. while (iter->ops->next_entry(iter, al)) {
  722. err = iter->ops->add_next_entry(iter, al);
  723. if (err)
  724. break;
  725. if (iter->he && iter->add_entry_cb) {
  726. err = iter->add_entry_cb(iter, al, false, arg);
  727. if (err)
  728. goto out;
  729. }
  730. }
  731. out:
  732. err2 = iter->ops->finish_entry(iter, al);
  733. if (!err)
  734. err = err2;
  735. return err;
  736. }
  737. int64_t
  738. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  739. {
  740. struct perf_hpp_fmt *fmt;
  741. int64_t cmp = 0;
  742. perf_hpp__for_each_sort_list(fmt) {
  743. if (perf_hpp__should_skip(fmt))
  744. continue;
  745. cmp = fmt->cmp(left, right);
  746. if (cmp)
  747. break;
  748. }
  749. return cmp;
  750. }
  751. int64_t
  752. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  753. {
  754. struct perf_hpp_fmt *fmt;
  755. int64_t cmp = 0;
  756. perf_hpp__for_each_sort_list(fmt) {
  757. if (perf_hpp__should_skip(fmt))
  758. continue;
  759. cmp = fmt->collapse(left, right);
  760. if (cmp)
  761. break;
  762. }
  763. return cmp;
  764. }
  765. void hist_entry__free(struct hist_entry *he)
  766. {
  767. zfree(&he->branch_info);
  768. zfree(&he->mem_info);
  769. zfree(&he->stat_acc);
  770. free_srcline(he->srcline);
  771. free(he);
  772. }
  773. /*
  774. * collapse the histogram
  775. */
  776. static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
  777. struct rb_root *root,
  778. struct hist_entry *he)
  779. {
  780. struct rb_node **p = &root->rb_node;
  781. struct rb_node *parent = NULL;
  782. struct hist_entry *iter;
  783. int64_t cmp;
  784. while (*p != NULL) {
  785. parent = *p;
  786. iter = rb_entry(parent, struct hist_entry, rb_node_in);
  787. cmp = hist_entry__collapse(iter, he);
  788. if (!cmp) {
  789. he_stat__add_stat(&iter->stat, &he->stat);
  790. if (symbol_conf.cumulate_callchain)
  791. he_stat__add_stat(iter->stat_acc, he->stat_acc);
  792. if (symbol_conf.use_callchain) {
  793. callchain_cursor_reset(&callchain_cursor);
  794. callchain_merge(&callchain_cursor,
  795. iter->callchain,
  796. he->callchain);
  797. }
  798. hist_entry__free(he);
  799. return false;
  800. }
  801. if (cmp < 0)
  802. p = &(*p)->rb_left;
  803. else
  804. p = &(*p)->rb_right;
  805. }
  806. rb_link_node(&he->rb_node_in, parent, p);
  807. rb_insert_color(&he->rb_node_in, root);
  808. return true;
  809. }
  810. static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
  811. {
  812. struct rb_root *root;
  813. pthread_mutex_lock(&hists->lock);
  814. root = hists->entries_in;
  815. if (++hists->entries_in > &hists->entries_in_array[1])
  816. hists->entries_in = &hists->entries_in_array[0];
  817. pthread_mutex_unlock(&hists->lock);
  818. return root;
  819. }
  820. static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
  821. {
  822. hists__filter_entry_by_dso(hists, he);
  823. hists__filter_entry_by_thread(hists, he);
  824. hists__filter_entry_by_symbol(hists, he);
  825. }
  826. void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
  827. {
  828. struct rb_root *root;
  829. struct rb_node *next;
  830. struct hist_entry *n;
  831. if (!sort__need_collapse)
  832. return;
  833. root = hists__get_rotate_entries_in(hists);
  834. next = rb_first(root);
  835. while (next) {
  836. if (session_done())
  837. break;
  838. n = rb_entry(next, struct hist_entry, rb_node_in);
  839. next = rb_next(&n->rb_node_in);
  840. rb_erase(&n->rb_node_in, root);
  841. if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
  842. /*
  843. * If it wasn't combined with one of the entries already
  844. * collapsed, we need to apply the filters that may have
  845. * been set by, say, the hist_browser.
  846. */
  847. hists__apply_filters(hists, n);
  848. }
  849. if (prog)
  850. ui_progress__update(prog, 1);
  851. }
  852. }
  853. static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
  854. {
  855. struct perf_hpp_fmt *fmt;
  856. int64_t cmp = 0;
  857. perf_hpp__for_each_sort_list(fmt) {
  858. if (perf_hpp__should_skip(fmt))
  859. continue;
  860. cmp = fmt->sort(a, b);
  861. if (cmp)
  862. break;
  863. }
  864. return cmp;
  865. }
  866. static void hists__reset_filter_stats(struct hists *hists)
  867. {
  868. hists->nr_non_filtered_entries = 0;
  869. hists->stats.total_non_filtered_period = 0;
  870. }
  871. void hists__reset_stats(struct hists *hists)
  872. {
  873. hists->nr_entries = 0;
  874. hists->stats.total_period = 0;
  875. hists__reset_filter_stats(hists);
  876. }
  877. static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
  878. {
  879. hists->nr_non_filtered_entries++;
  880. hists->stats.total_non_filtered_period += h->stat.period;
  881. }
  882. void hists__inc_stats(struct hists *hists, struct hist_entry *h)
  883. {
  884. if (!h->filtered)
  885. hists__inc_filter_stats(hists, h);
  886. hists->nr_entries++;
  887. hists->stats.total_period += h->stat.period;
  888. }
  889. static void __hists__insert_output_entry(struct rb_root *entries,
  890. struct hist_entry *he,
  891. u64 min_callchain_hits)
  892. {
  893. struct rb_node **p = &entries->rb_node;
  894. struct rb_node *parent = NULL;
  895. struct hist_entry *iter;
  896. if (symbol_conf.use_callchain)
  897. callchain_param.sort(&he->sorted_chain, he->callchain,
  898. min_callchain_hits, &callchain_param);
  899. while (*p != NULL) {
  900. parent = *p;
  901. iter = rb_entry(parent, struct hist_entry, rb_node);
  902. if (hist_entry__sort(he, iter) > 0)
  903. p = &(*p)->rb_left;
  904. else
  905. p = &(*p)->rb_right;
  906. }
  907. rb_link_node(&he->rb_node, parent, p);
  908. rb_insert_color(&he->rb_node, entries);
  909. }
  910. void hists__output_resort(struct hists *hists)
  911. {
  912. struct rb_root *root;
  913. struct rb_node *next;
  914. struct hist_entry *n;
  915. u64 min_callchain_hits;
  916. min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
  917. if (sort__need_collapse)
  918. root = &hists->entries_collapsed;
  919. else
  920. root = hists->entries_in;
  921. next = rb_first(root);
  922. hists->entries = RB_ROOT;
  923. hists__reset_stats(hists);
  924. hists__reset_col_len(hists);
  925. while (next) {
  926. n = rb_entry(next, struct hist_entry, rb_node_in);
  927. next = rb_next(&n->rb_node_in);
  928. __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
  929. hists__inc_stats(hists, n);
  930. if (!n->filtered)
  931. hists__calc_col_len(hists, n);
  932. }
  933. }
  934. static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
  935. enum hist_filter filter)
  936. {
  937. h->filtered &= ~(1 << filter);
  938. if (h->filtered)
  939. return;
  940. /* force fold unfiltered entry for simplicity */
  941. h->ms.unfolded = false;
  942. h->row_offset = 0;
  943. hists->stats.nr_non_filtered_samples += h->stat.nr_events;
  944. hists__inc_filter_stats(hists, h);
  945. hists__calc_col_len(hists, h);
  946. }
  947. static bool hists__filter_entry_by_dso(struct hists *hists,
  948. struct hist_entry *he)
  949. {
  950. if (hists->dso_filter != NULL &&
  951. (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
  952. he->filtered |= (1 << HIST_FILTER__DSO);
  953. return true;
  954. }
  955. return false;
  956. }
  957. void hists__filter_by_dso(struct hists *hists)
  958. {
  959. struct rb_node *nd;
  960. hists->stats.nr_non_filtered_samples = 0;
  961. hists__reset_filter_stats(hists);
  962. hists__reset_col_len(hists);
  963. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  964. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  965. if (symbol_conf.exclude_other && !h->parent)
  966. continue;
  967. if (hists__filter_entry_by_dso(hists, h))
  968. continue;
  969. hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
  970. }
  971. }
  972. static bool hists__filter_entry_by_thread(struct hists *hists,
  973. struct hist_entry *he)
  974. {
  975. if (hists->thread_filter != NULL &&
  976. he->thread != hists->thread_filter) {
  977. he->filtered |= (1 << HIST_FILTER__THREAD);
  978. return true;
  979. }
  980. return false;
  981. }
  982. void hists__filter_by_thread(struct hists *hists)
  983. {
  984. struct rb_node *nd;
  985. hists->stats.nr_non_filtered_samples = 0;
  986. hists__reset_filter_stats(hists);
  987. hists__reset_col_len(hists);
  988. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  989. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  990. if (hists__filter_entry_by_thread(hists, h))
  991. continue;
  992. hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
  993. }
  994. }
  995. static bool hists__filter_entry_by_symbol(struct hists *hists,
  996. struct hist_entry *he)
  997. {
  998. if (hists->symbol_filter_str != NULL &&
  999. (!he->ms.sym || strstr(he->ms.sym->name,
  1000. hists->symbol_filter_str) == NULL)) {
  1001. he->filtered |= (1 << HIST_FILTER__SYMBOL);
  1002. return true;
  1003. }
  1004. return false;
  1005. }
  1006. void hists__filter_by_symbol(struct hists *hists)
  1007. {
  1008. struct rb_node *nd;
  1009. hists->stats.nr_non_filtered_samples = 0;
  1010. hists__reset_filter_stats(hists);
  1011. hists__reset_col_len(hists);
  1012. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  1013. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  1014. if (hists__filter_entry_by_symbol(hists, h))
  1015. continue;
  1016. hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
  1017. }
  1018. }
  1019. void events_stats__inc(struct events_stats *stats, u32 type)
  1020. {
  1021. ++stats->nr_events[0];
  1022. ++stats->nr_events[type];
  1023. }
  1024. void hists__inc_nr_events(struct hists *hists, u32 type)
  1025. {
  1026. events_stats__inc(&hists->stats, type);
  1027. }
  1028. void hists__inc_nr_samples(struct hists *hists, bool filtered)
  1029. {
  1030. events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
  1031. if (!filtered)
  1032. hists->stats.nr_non_filtered_samples++;
  1033. }
  1034. static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
  1035. struct hist_entry *pair)
  1036. {
  1037. struct rb_root *root;
  1038. struct rb_node **p;
  1039. struct rb_node *parent = NULL;
  1040. struct hist_entry *he;
  1041. int64_t cmp;
  1042. if (sort__need_collapse)
  1043. root = &hists->entries_collapsed;
  1044. else
  1045. root = hists->entries_in;
  1046. p = &root->rb_node;
  1047. while (*p != NULL) {
  1048. parent = *p;
  1049. he = rb_entry(parent, struct hist_entry, rb_node_in);
  1050. cmp = hist_entry__collapse(he, pair);
  1051. if (!cmp)
  1052. goto out;
  1053. if (cmp < 0)
  1054. p = &(*p)->rb_left;
  1055. else
  1056. p = &(*p)->rb_right;
  1057. }
  1058. he = hist_entry__new(pair, true);
  1059. if (he) {
  1060. memset(&he->stat, 0, sizeof(he->stat));
  1061. he->hists = hists;
  1062. rb_link_node(&he->rb_node_in, parent, p);
  1063. rb_insert_color(&he->rb_node_in, root);
  1064. hists__inc_stats(hists, he);
  1065. he->dummy = true;
  1066. }
  1067. out:
  1068. return he;
  1069. }
  1070. static struct hist_entry *hists__find_entry(struct hists *hists,
  1071. struct hist_entry *he)
  1072. {
  1073. struct rb_node *n;
  1074. if (sort__need_collapse)
  1075. n = hists->entries_collapsed.rb_node;
  1076. else
  1077. n = hists->entries_in->rb_node;
  1078. while (n) {
  1079. struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
  1080. int64_t cmp = hist_entry__collapse(iter, he);
  1081. if (cmp < 0)
  1082. n = n->rb_left;
  1083. else if (cmp > 0)
  1084. n = n->rb_right;
  1085. else
  1086. return iter;
  1087. }
  1088. return NULL;
  1089. }
  1090. /*
  1091. * Look for pairs to link to the leader buckets (hist_entries):
  1092. */
  1093. void hists__match(struct hists *leader, struct hists *other)
  1094. {
  1095. struct rb_root *root;
  1096. struct rb_node *nd;
  1097. struct hist_entry *pos, *pair;
  1098. if (sort__need_collapse)
  1099. root = &leader->entries_collapsed;
  1100. else
  1101. root = leader->entries_in;
  1102. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  1103. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  1104. pair = hists__find_entry(other, pos);
  1105. if (pair)
  1106. hist_entry__add_pair(pair, pos);
  1107. }
  1108. }
  1109. /*
  1110. * Look for entries in the other hists that are not present in the leader, if
  1111. * we find them, just add a dummy entry on the leader hists, with period=0,
  1112. * nr_events=0, to serve as the list header.
  1113. */
  1114. int hists__link(struct hists *leader, struct hists *other)
  1115. {
  1116. struct rb_root *root;
  1117. struct rb_node *nd;
  1118. struct hist_entry *pos, *pair;
  1119. if (sort__need_collapse)
  1120. root = &other->entries_collapsed;
  1121. else
  1122. root = other->entries_in;
  1123. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  1124. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  1125. if (!hist_entry__has_pairs(pos)) {
  1126. pair = hists__add_dummy_entry(leader, pos);
  1127. if (pair == NULL)
  1128. return -1;
  1129. hist_entry__add_pair(pos, pair);
  1130. }
  1131. }
  1132. return 0;
  1133. }
  1134. u64 hists__total_period(struct hists *hists)
  1135. {
  1136. return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
  1137. hists->stats.total_period;
  1138. }
  1139. int parse_filter_percentage(const struct option *opt __maybe_unused,
  1140. const char *arg, int unset __maybe_unused)
  1141. {
  1142. if (!strcmp(arg, "relative"))
  1143. symbol_conf.filter_relative = true;
  1144. else if (!strcmp(arg, "absolute"))
  1145. symbol_conf.filter_relative = false;
  1146. else
  1147. return -1;
  1148. return 0;
  1149. }
  1150. int perf_hist_config(const char *var, const char *value)
  1151. {
  1152. if (!strcmp(var, "hist.percentage"))
  1153. return parse_filter_percentage(NULL, value, 0);
  1154. return 0;
  1155. }