hist.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651
  1. #include "util.h"
  2. #include "build-id.h"
  3. #include "hist.h"
  4. #include "session.h"
  5. #include "sort.h"
  6. #include "evlist.h"
  7. #include "evsel.h"
  8. #include "annotate.h"
  9. #include "ui/progress.h"
  10. #include <math.h>
  11. static bool hists__filter_entry_by_dso(struct hists *hists,
  12. struct hist_entry *he);
  13. static bool hists__filter_entry_by_thread(struct hists *hists,
  14. struct hist_entry *he);
  15. static bool hists__filter_entry_by_symbol(struct hists *hists,
  16. struct hist_entry *he);
  17. static bool hists__filter_entry_by_socket(struct hists *hists,
  18. struct hist_entry *he);
  19. u16 hists__col_len(struct hists *hists, enum hist_column col)
  20. {
  21. return hists->col_len[col];
  22. }
  23. void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
  24. {
  25. hists->col_len[col] = len;
  26. }
  27. bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
  28. {
  29. if (len > hists__col_len(hists, col)) {
  30. hists__set_col_len(hists, col, len);
  31. return true;
  32. }
  33. return false;
  34. }
  35. void hists__reset_col_len(struct hists *hists)
  36. {
  37. enum hist_column col;
  38. for (col = 0; col < HISTC_NR_COLS; ++col)
  39. hists__set_col_len(hists, col, 0);
  40. }
  41. static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
  42. {
  43. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  44. if (hists__col_len(hists, dso) < unresolved_col_width &&
  45. !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  46. !symbol_conf.dso_list)
  47. hists__set_col_len(hists, dso, unresolved_col_width);
  48. }
  49. void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
  50. {
  51. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  52. int symlen;
  53. u16 len;
  54. /*
  55. * +4 accounts for '[x] ' priv level info
  56. * +2 accounts for 0x prefix on raw addresses
  57. * +3 accounts for ' y ' symtab origin info
  58. */
  59. if (h->ms.sym) {
  60. symlen = h->ms.sym->namelen + 4;
  61. if (verbose)
  62. symlen += BITS_PER_LONG / 4 + 2 + 3;
  63. hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  64. } else {
  65. symlen = unresolved_col_width + 4 + 2;
  66. hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  67. hists__set_unres_dso_col_len(hists, HISTC_DSO);
  68. }
  69. len = thread__comm_len(h->thread);
  70. if (hists__new_col_len(hists, HISTC_COMM, len))
  71. hists__set_col_len(hists, HISTC_THREAD, len + 6);
  72. if (h->ms.map) {
  73. len = dso__name_len(h->ms.map->dso);
  74. hists__new_col_len(hists, HISTC_DSO, len);
  75. }
  76. if (h->parent)
  77. hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
  78. if (h->branch_info) {
  79. if (h->branch_info->from.sym) {
  80. symlen = (int)h->branch_info->from.sym->namelen + 4;
  81. if (verbose)
  82. symlen += BITS_PER_LONG / 4 + 2 + 3;
  83. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  84. symlen = dso__name_len(h->branch_info->from.map->dso);
  85. hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
  86. } else {
  87. symlen = unresolved_col_width + 4 + 2;
  88. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  89. hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
  90. }
  91. if (h->branch_info->to.sym) {
  92. symlen = (int)h->branch_info->to.sym->namelen + 4;
  93. if (verbose)
  94. symlen += BITS_PER_LONG / 4 + 2 + 3;
  95. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  96. symlen = dso__name_len(h->branch_info->to.map->dso);
  97. hists__new_col_len(hists, HISTC_DSO_TO, symlen);
  98. } else {
  99. symlen = unresolved_col_width + 4 + 2;
  100. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  101. hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
  102. }
  103. }
  104. if (h->mem_info) {
  105. if (h->mem_info->daddr.sym) {
  106. symlen = (int)h->mem_info->daddr.sym->namelen + 4
  107. + unresolved_col_width + 2;
  108. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
  109. symlen);
  110. hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
  111. symlen + 1);
  112. } else {
  113. symlen = unresolved_col_width + 4 + 2;
  114. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
  115. symlen);
  116. hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
  117. symlen);
  118. }
  119. if (h->mem_info->iaddr.sym) {
  120. symlen = (int)h->mem_info->iaddr.sym->namelen + 4
  121. + unresolved_col_width + 2;
  122. hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
  123. symlen);
  124. } else {
  125. symlen = unresolved_col_width + 4 + 2;
  126. hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
  127. symlen);
  128. }
  129. if (h->mem_info->daddr.map) {
  130. symlen = dso__name_len(h->mem_info->daddr.map->dso);
  131. hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
  132. symlen);
  133. } else {
  134. symlen = unresolved_col_width + 4 + 2;
  135. hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
  136. }
  137. } else {
  138. symlen = unresolved_col_width + 4 + 2;
  139. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
  140. hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
  141. hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
  142. }
  143. hists__new_col_len(hists, HISTC_CPU, 3);
  144. hists__new_col_len(hists, HISTC_SOCKET, 6);
  145. hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
  146. hists__new_col_len(hists, HISTC_MEM_TLB, 22);
  147. hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
  148. hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
  149. hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
  150. hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
  151. if (h->srcline)
  152. hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
  153. if (h->srcfile)
  154. hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
  155. if (h->transaction)
  156. hists__new_col_len(hists, HISTC_TRANSACTION,
  157. hist_entry__transaction_len());
  158. }
  159. void hists__output_recalc_col_len(struct hists *hists, int max_rows)
  160. {
  161. struct rb_node *next = rb_first(&hists->entries);
  162. struct hist_entry *n;
  163. int row = 0;
  164. hists__reset_col_len(hists);
  165. while (next && row++ < max_rows) {
  166. n = rb_entry(next, struct hist_entry, rb_node);
  167. if (!n->filtered)
  168. hists__calc_col_len(hists, n);
  169. next = rb_next(&n->rb_node);
  170. }
  171. }
  172. static void he_stat__add_cpumode_period(struct he_stat *he_stat,
  173. unsigned int cpumode, u64 period)
  174. {
  175. switch (cpumode) {
  176. case PERF_RECORD_MISC_KERNEL:
  177. he_stat->period_sys += period;
  178. break;
  179. case PERF_RECORD_MISC_USER:
  180. he_stat->period_us += period;
  181. break;
  182. case PERF_RECORD_MISC_GUEST_KERNEL:
  183. he_stat->period_guest_sys += period;
  184. break;
  185. case PERF_RECORD_MISC_GUEST_USER:
  186. he_stat->period_guest_us += period;
  187. break;
  188. default:
  189. break;
  190. }
  191. }
  192. static void he_stat__add_period(struct he_stat *he_stat, u64 period,
  193. u64 weight)
  194. {
  195. he_stat->period += period;
  196. he_stat->weight += weight;
  197. he_stat->nr_events += 1;
  198. }
  199. static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
  200. {
  201. dest->period += src->period;
  202. dest->period_sys += src->period_sys;
  203. dest->period_us += src->period_us;
  204. dest->period_guest_sys += src->period_guest_sys;
  205. dest->period_guest_us += src->period_guest_us;
  206. dest->nr_events += src->nr_events;
  207. dest->weight += src->weight;
  208. }
  209. static void he_stat__decay(struct he_stat *he_stat)
  210. {
  211. he_stat->period = (he_stat->period * 7) / 8;
  212. he_stat->nr_events = (he_stat->nr_events * 7) / 8;
  213. /* XXX need decay for weight too? */
  214. }
  215. static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
  216. {
  217. u64 prev_period = he->stat.period;
  218. u64 diff;
  219. if (prev_period == 0)
  220. return true;
  221. he_stat__decay(&he->stat);
  222. if (symbol_conf.cumulate_callchain)
  223. he_stat__decay(he->stat_acc);
  224. decay_callchain(he->callchain);
  225. diff = prev_period - he->stat.period;
  226. hists->stats.total_period -= diff;
  227. if (!he->filtered)
  228. hists->stats.total_non_filtered_period -= diff;
  229. return he->stat.period == 0;
  230. }
  231. static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
  232. {
  233. rb_erase(&he->rb_node, &hists->entries);
  234. if (sort__need_collapse)
  235. rb_erase(&he->rb_node_in, &hists->entries_collapsed);
  236. else
  237. rb_erase(&he->rb_node_in, hists->entries_in);
  238. --hists->nr_entries;
  239. if (!he->filtered)
  240. --hists->nr_non_filtered_entries;
  241. hist_entry__delete(he);
  242. }
  243. void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
  244. {
  245. struct rb_node *next = rb_first(&hists->entries);
  246. struct hist_entry *n;
  247. while (next) {
  248. n = rb_entry(next, struct hist_entry, rb_node);
  249. next = rb_next(&n->rb_node);
  250. if (((zap_user && n->level == '.') ||
  251. (zap_kernel && n->level != '.') ||
  252. hists__decay_entry(hists, n))) {
  253. hists__delete_entry(hists, n);
  254. }
  255. }
  256. }
  257. void hists__delete_entries(struct hists *hists)
  258. {
  259. struct rb_node *next = rb_first(&hists->entries);
  260. struct hist_entry *n;
  261. while (next) {
  262. n = rb_entry(next, struct hist_entry, rb_node);
  263. next = rb_next(&n->rb_node);
  264. hists__delete_entry(hists, n);
  265. }
  266. }
  267. /*
  268. * histogram, sorted on item, collects periods
  269. */
  270. static struct hist_entry *hist_entry__new(struct hist_entry *template,
  271. bool sample_self)
  272. {
  273. size_t callchain_size = 0;
  274. struct hist_entry *he;
  275. if (symbol_conf.use_callchain)
  276. callchain_size = sizeof(struct callchain_root);
  277. he = zalloc(sizeof(*he) + callchain_size);
  278. if (he != NULL) {
  279. *he = *template;
  280. if (symbol_conf.cumulate_callchain) {
  281. he->stat_acc = malloc(sizeof(he->stat));
  282. if (he->stat_acc == NULL) {
  283. free(he);
  284. return NULL;
  285. }
  286. memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
  287. if (!sample_self)
  288. memset(&he->stat, 0, sizeof(he->stat));
  289. }
  290. map__get(he->ms.map);
  291. if (he->branch_info) {
  292. /*
  293. * This branch info is (a part of) allocated from
  294. * sample__resolve_bstack() and will be freed after
  295. * adding new entries. So we need to save a copy.
  296. */
  297. he->branch_info = malloc(sizeof(*he->branch_info));
  298. if (he->branch_info == NULL) {
  299. map__zput(he->ms.map);
  300. free(he->stat_acc);
  301. free(he);
  302. return NULL;
  303. }
  304. memcpy(he->branch_info, template->branch_info,
  305. sizeof(*he->branch_info));
  306. map__get(he->branch_info->from.map);
  307. map__get(he->branch_info->to.map);
  308. }
  309. if (he->mem_info) {
  310. map__get(he->mem_info->iaddr.map);
  311. map__get(he->mem_info->daddr.map);
  312. }
  313. if (symbol_conf.use_callchain)
  314. callchain_init(he->callchain);
  315. if (he->raw_data) {
  316. he->raw_data = memdup(he->raw_data, he->raw_size);
  317. if (he->raw_data == NULL) {
  318. map__put(he->ms.map);
  319. if (he->branch_info) {
  320. map__put(he->branch_info->from.map);
  321. map__put(he->branch_info->to.map);
  322. free(he->branch_info);
  323. }
  324. if (he->mem_info) {
  325. map__put(he->mem_info->iaddr.map);
  326. map__put(he->mem_info->daddr.map);
  327. }
  328. free(he->stat_acc);
  329. free(he);
  330. return NULL;
  331. }
  332. }
  333. INIT_LIST_HEAD(&he->pairs.node);
  334. thread__get(he->thread);
  335. }
  336. return he;
  337. }
  338. static u8 symbol__parent_filter(const struct symbol *parent)
  339. {
  340. if (symbol_conf.exclude_other && parent == NULL)
  341. return 1 << HIST_FILTER__PARENT;
  342. return 0;
  343. }
  344. static struct hist_entry *hists__findnew_entry(struct hists *hists,
  345. struct hist_entry *entry,
  346. struct addr_location *al,
  347. bool sample_self)
  348. {
  349. struct rb_node **p;
  350. struct rb_node *parent = NULL;
  351. struct hist_entry *he;
  352. int64_t cmp;
  353. u64 period = entry->stat.period;
  354. u64 weight = entry->stat.weight;
  355. p = &hists->entries_in->rb_node;
  356. while (*p != NULL) {
  357. parent = *p;
  358. he = rb_entry(parent, struct hist_entry, rb_node_in);
  359. /*
  360. * Make sure that it receives arguments in a same order as
  361. * hist_entry__collapse() so that we can use an appropriate
  362. * function when searching an entry regardless which sort
  363. * keys were used.
  364. */
  365. cmp = hist_entry__cmp(he, entry);
  366. if (!cmp) {
  367. if (sample_self)
  368. he_stat__add_period(&he->stat, period, weight);
  369. if (symbol_conf.cumulate_callchain)
  370. he_stat__add_period(he->stat_acc, period, weight);
  371. /*
  372. * This mem info was allocated from sample__resolve_mem
  373. * and will not be used anymore.
  374. */
  375. zfree(&entry->mem_info);
  376. /* If the map of an existing hist_entry has
  377. * become out-of-date due to an exec() or
  378. * similar, update it. Otherwise we will
  379. * mis-adjust symbol addresses when computing
  380. * the history counter to increment.
  381. */
  382. if (he->ms.map != entry->ms.map) {
  383. map__put(he->ms.map);
  384. he->ms.map = map__get(entry->ms.map);
  385. }
  386. goto out;
  387. }
  388. if (cmp < 0)
  389. p = &(*p)->rb_left;
  390. else
  391. p = &(*p)->rb_right;
  392. }
  393. he = hist_entry__new(entry, sample_self);
  394. if (!he)
  395. return NULL;
  396. hists->nr_entries++;
  397. rb_link_node(&he->rb_node_in, parent, p);
  398. rb_insert_color(&he->rb_node_in, hists->entries_in);
  399. out:
  400. if (sample_self)
  401. he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
  402. if (symbol_conf.cumulate_callchain)
  403. he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
  404. return he;
  405. }
  406. struct hist_entry *__hists__add_entry(struct hists *hists,
  407. struct addr_location *al,
  408. struct symbol *sym_parent,
  409. struct branch_info *bi,
  410. struct mem_info *mi,
  411. struct perf_sample *sample,
  412. bool sample_self)
  413. {
  414. struct hist_entry entry = {
  415. .thread = al->thread,
  416. .comm = thread__comm(al->thread),
  417. .ms = {
  418. .map = al->map,
  419. .sym = al->sym,
  420. },
  421. .socket = al->socket,
  422. .cpu = al->cpu,
  423. .cpumode = al->cpumode,
  424. .ip = al->addr,
  425. .level = al->level,
  426. .stat = {
  427. .nr_events = 1,
  428. .period = sample->period,
  429. .weight = sample->weight,
  430. },
  431. .parent = sym_parent,
  432. .filtered = symbol__parent_filter(sym_parent) | al->filtered,
  433. .hists = hists,
  434. .branch_info = bi,
  435. .mem_info = mi,
  436. .transaction = sample->transaction,
  437. .raw_data = sample->raw_data,
  438. .raw_size = sample->raw_size,
  439. };
  440. return hists__findnew_entry(hists, &entry, al, sample_self);
  441. }
  442. static int
  443. iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
  444. struct addr_location *al __maybe_unused)
  445. {
  446. return 0;
  447. }
  448. static int
  449. iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
  450. struct addr_location *al __maybe_unused)
  451. {
  452. return 0;
  453. }
  454. static int
  455. iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
  456. {
  457. struct perf_sample *sample = iter->sample;
  458. struct mem_info *mi;
  459. mi = sample__resolve_mem(sample, al);
  460. if (mi == NULL)
  461. return -ENOMEM;
  462. iter->priv = mi;
  463. return 0;
  464. }
  465. static int
  466. iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
  467. {
  468. u64 cost;
  469. struct mem_info *mi = iter->priv;
  470. struct hists *hists = evsel__hists(iter->evsel);
  471. struct perf_sample *sample = iter->sample;
  472. struct hist_entry *he;
  473. if (mi == NULL)
  474. return -EINVAL;
  475. cost = sample->weight;
  476. if (!cost)
  477. cost = 1;
  478. /*
  479. * must pass period=weight in order to get the correct
  480. * sorting from hists__collapse_resort() which is solely
  481. * based on periods. We want sorting be done on nr_events * weight
  482. * and this is indirectly achieved by passing period=weight here
  483. * and the he_stat__add_period() function.
  484. */
  485. sample->period = cost;
  486. he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
  487. sample, true);
  488. if (!he)
  489. return -ENOMEM;
  490. iter->he = he;
  491. return 0;
  492. }
  493. static int
  494. iter_finish_mem_entry(struct hist_entry_iter *iter,
  495. struct addr_location *al __maybe_unused)
  496. {
  497. struct perf_evsel *evsel = iter->evsel;
  498. struct hists *hists = evsel__hists(evsel);
  499. struct hist_entry *he = iter->he;
  500. int err = -EINVAL;
  501. if (he == NULL)
  502. goto out;
  503. hists__inc_nr_samples(hists, he->filtered);
  504. err = hist_entry__append_callchain(he, iter->sample);
  505. out:
  506. /*
  507. * We don't need to free iter->priv (mem_info) here since the mem info
  508. * was either already freed in hists__findnew_entry() or passed to a
  509. * new hist entry by hist_entry__new().
  510. */
  511. iter->priv = NULL;
  512. iter->he = NULL;
  513. return err;
  514. }
  515. static int
  516. iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
  517. {
  518. struct branch_info *bi;
  519. struct perf_sample *sample = iter->sample;
  520. bi = sample__resolve_bstack(sample, al);
  521. if (!bi)
  522. return -ENOMEM;
  523. iter->curr = 0;
  524. iter->total = sample->branch_stack->nr;
  525. iter->priv = bi;
  526. return 0;
  527. }
  528. static int
  529. iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
  530. struct addr_location *al __maybe_unused)
  531. {
  532. /* to avoid calling callback function */
  533. iter->he = NULL;
  534. return 0;
  535. }
  536. static int
  537. iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
  538. {
  539. struct branch_info *bi = iter->priv;
  540. int i = iter->curr;
  541. if (bi == NULL)
  542. return 0;
  543. if (iter->curr >= iter->total)
  544. return 0;
  545. al->map = bi[i].to.map;
  546. al->sym = bi[i].to.sym;
  547. al->addr = bi[i].to.addr;
  548. return 1;
  549. }
  550. static int
  551. iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
  552. {
  553. struct branch_info *bi;
  554. struct perf_evsel *evsel = iter->evsel;
  555. struct hists *hists = evsel__hists(evsel);
  556. struct perf_sample *sample = iter->sample;
  557. struct hist_entry *he = NULL;
  558. int i = iter->curr;
  559. int err = 0;
  560. bi = iter->priv;
  561. if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
  562. goto out;
  563. /*
  564. * The report shows the percentage of total branches captured
  565. * and not events sampled. Thus we use a pseudo period of 1.
  566. */
  567. sample->period = 1;
  568. sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
  569. he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
  570. sample, true);
  571. if (he == NULL)
  572. return -ENOMEM;
  573. hists__inc_nr_samples(hists, he->filtered);
  574. out:
  575. iter->he = he;
  576. iter->curr++;
  577. return err;
  578. }
  579. static int
  580. iter_finish_branch_entry(struct hist_entry_iter *iter,
  581. struct addr_location *al __maybe_unused)
  582. {
  583. zfree(&iter->priv);
  584. iter->he = NULL;
  585. return iter->curr >= iter->total ? 0 : -1;
  586. }
  587. static int
  588. iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
  589. struct addr_location *al __maybe_unused)
  590. {
  591. return 0;
  592. }
  593. static int
  594. iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
  595. {
  596. struct perf_evsel *evsel = iter->evsel;
  597. struct perf_sample *sample = iter->sample;
  598. struct hist_entry *he;
  599. he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
  600. sample, true);
  601. if (he == NULL)
  602. return -ENOMEM;
  603. iter->he = he;
  604. return 0;
  605. }
  606. static int
  607. iter_finish_normal_entry(struct hist_entry_iter *iter,
  608. struct addr_location *al __maybe_unused)
  609. {
  610. struct hist_entry *he = iter->he;
  611. struct perf_evsel *evsel = iter->evsel;
  612. struct perf_sample *sample = iter->sample;
  613. if (he == NULL)
  614. return 0;
  615. iter->he = NULL;
  616. hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
  617. return hist_entry__append_callchain(he, sample);
  618. }
  619. static int
  620. iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
  621. struct addr_location *al __maybe_unused)
  622. {
  623. struct hist_entry **he_cache;
  624. callchain_cursor_commit(&callchain_cursor);
  625. /*
  626. * This is for detecting cycles or recursions so that they're
  627. * cumulated only one time to prevent entries more than 100%
  628. * overhead.
  629. */
  630. he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
  631. if (he_cache == NULL)
  632. return -ENOMEM;
  633. iter->priv = he_cache;
  634. iter->curr = 0;
  635. return 0;
  636. }
  637. static int
  638. iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
  639. struct addr_location *al)
  640. {
  641. struct perf_evsel *evsel = iter->evsel;
  642. struct hists *hists = evsel__hists(evsel);
  643. struct perf_sample *sample = iter->sample;
  644. struct hist_entry **he_cache = iter->priv;
  645. struct hist_entry *he;
  646. int err = 0;
  647. he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
  648. sample, true);
  649. if (he == NULL)
  650. return -ENOMEM;
  651. iter->he = he;
  652. he_cache[iter->curr++] = he;
  653. hist_entry__append_callchain(he, sample);
  654. /*
  655. * We need to re-initialize the cursor since callchain_append()
  656. * advanced the cursor to the end.
  657. */
  658. callchain_cursor_commit(&callchain_cursor);
  659. hists__inc_nr_samples(hists, he->filtered);
  660. return err;
  661. }
  662. static int
  663. iter_next_cumulative_entry(struct hist_entry_iter *iter,
  664. struct addr_location *al)
  665. {
  666. struct callchain_cursor_node *node;
  667. node = callchain_cursor_current(&callchain_cursor);
  668. if (node == NULL)
  669. return 0;
  670. return fill_callchain_info(al, node, iter->hide_unresolved);
  671. }
  672. static int
  673. iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
  674. struct addr_location *al)
  675. {
  676. struct perf_evsel *evsel = iter->evsel;
  677. struct perf_sample *sample = iter->sample;
  678. struct hist_entry **he_cache = iter->priv;
  679. struct hist_entry *he;
  680. struct hist_entry he_tmp = {
  681. .hists = evsel__hists(evsel),
  682. .cpu = al->cpu,
  683. .thread = al->thread,
  684. .comm = thread__comm(al->thread),
  685. .ip = al->addr,
  686. .ms = {
  687. .map = al->map,
  688. .sym = al->sym,
  689. },
  690. .parent = iter->parent,
  691. .raw_data = sample->raw_data,
  692. .raw_size = sample->raw_size,
  693. };
  694. int i;
  695. struct callchain_cursor cursor;
  696. callchain_cursor_snapshot(&cursor, &callchain_cursor);
  697. callchain_cursor_advance(&callchain_cursor);
  698. /*
  699. * Check if there's duplicate entries in the callchain.
  700. * It's possible that it has cycles or recursive calls.
  701. */
  702. for (i = 0; i < iter->curr; i++) {
  703. if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
  704. /* to avoid calling callback function */
  705. iter->he = NULL;
  706. return 0;
  707. }
  708. }
  709. he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
  710. sample, false);
  711. if (he == NULL)
  712. return -ENOMEM;
  713. iter->he = he;
  714. he_cache[iter->curr++] = he;
  715. if (symbol_conf.use_callchain)
  716. callchain_append(he->callchain, &cursor, sample->period);
  717. return 0;
  718. }
  719. static int
  720. iter_finish_cumulative_entry(struct hist_entry_iter *iter,
  721. struct addr_location *al __maybe_unused)
  722. {
  723. zfree(&iter->priv);
  724. iter->he = NULL;
  725. return 0;
  726. }
  727. const struct hist_iter_ops hist_iter_mem = {
  728. .prepare_entry = iter_prepare_mem_entry,
  729. .add_single_entry = iter_add_single_mem_entry,
  730. .next_entry = iter_next_nop_entry,
  731. .add_next_entry = iter_add_next_nop_entry,
  732. .finish_entry = iter_finish_mem_entry,
  733. };
  734. const struct hist_iter_ops hist_iter_branch = {
  735. .prepare_entry = iter_prepare_branch_entry,
  736. .add_single_entry = iter_add_single_branch_entry,
  737. .next_entry = iter_next_branch_entry,
  738. .add_next_entry = iter_add_next_branch_entry,
  739. .finish_entry = iter_finish_branch_entry,
  740. };
  741. const struct hist_iter_ops hist_iter_normal = {
  742. .prepare_entry = iter_prepare_normal_entry,
  743. .add_single_entry = iter_add_single_normal_entry,
  744. .next_entry = iter_next_nop_entry,
  745. .add_next_entry = iter_add_next_nop_entry,
  746. .finish_entry = iter_finish_normal_entry,
  747. };
  748. const struct hist_iter_ops hist_iter_cumulative = {
  749. .prepare_entry = iter_prepare_cumulative_entry,
  750. .add_single_entry = iter_add_single_cumulative_entry,
  751. .next_entry = iter_next_cumulative_entry,
  752. .add_next_entry = iter_add_next_cumulative_entry,
  753. .finish_entry = iter_finish_cumulative_entry,
  754. };
  755. int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
  756. int max_stack_depth, void *arg)
  757. {
  758. int err, err2;
  759. err = sample__resolve_callchain(iter->sample, &iter->parent,
  760. iter->evsel, al, max_stack_depth);
  761. if (err)
  762. return err;
  763. iter->max_stack = max_stack_depth;
  764. err = iter->ops->prepare_entry(iter, al);
  765. if (err)
  766. goto out;
  767. err = iter->ops->add_single_entry(iter, al);
  768. if (err)
  769. goto out;
  770. if (iter->he && iter->add_entry_cb) {
  771. err = iter->add_entry_cb(iter, al, true, arg);
  772. if (err)
  773. goto out;
  774. }
  775. while (iter->ops->next_entry(iter, al)) {
  776. err = iter->ops->add_next_entry(iter, al);
  777. if (err)
  778. break;
  779. if (iter->he && iter->add_entry_cb) {
  780. err = iter->add_entry_cb(iter, al, false, arg);
  781. if (err)
  782. goto out;
  783. }
  784. }
  785. out:
  786. err2 = iter->ops->finish_entry(iter, al);
  787. if (!err)
  788. err = err2;
  789. return err;
  790. }
  791. int64_t
  792. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  793. {
  794. struct perf_hpp_fmt *fmt;
  795. int64_t cmp = 0;
  796. perf_hpp__for_each_sort_list(fmt) {
  797. cmp = fmt->cmp(fmt, left, right);
  798. if (cmp)
  799. break;
  800. }
  801. return cmp;
  802. }
  803. int64_t
  804. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  805. {
  806. struct perf_hpp_fmt *fmt;
  807. int64_t cmp = 0;
  808. perf_hpp__for_each_sort_list(fmt) {
  809. cmp = fmt->collapse(fmt, left, right);
  810. if (cmp)
  811. break;
  812. }
  813. return cmp;
  814. }
  815. void hist_entry__delete(struct hist_entry *he)
  816. {
  817. thread__zput(he->thread);
  818. map__zput(he->ms.map);
  819. if (he->branch_info) {
  820. map__zput(he->branch_info->from.map);
  821. map__zput(he->branch_info->to.map);
  822. zfree(&he->branch_info);
  823. }
  824. if (he->mem_info) {
  825. map__zput(he->mem_info->iaddr.map);
  826. map__zput(he->mem_info->daddr.map);
  827. zfree(&he->mem_info);
  828. }
  829. zfree(&he->stat_acc);
  830. free_srcline(he->srcline);
  831. if (he->srcfile && he->srcfile[0])
  832. free(he->srcfile);
  833. free_callchain(he->callchain);
  834. free(he->trace_output);
  835. free(he->raw_data);
  836. free(he);
  837. }
  838. /*
  839. * collapse the histogram
  840. */
  841. bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
  842. struct rb_root *root, struct hist_entry *he)
  843. {
  844. struct rb_node **p = &root->rb_node;
  845. struct rb_node *parent = NULL;
  846. struct hist_entry *iter;
  847. int64_t cmp;
  848. while (*p != NULL) {
  849. parent = *p;
  850. iter = rb_entry(parent, struct hist_entry, rb_node_in);
  851. cmp = hist_entry__collapse(iter, he);
  852. if (!cmp) {
  853. he_stat__add_stat(&iter->stat, &he->stat);
  854. if (symbol_conf.cumulate_callchain)
  855. he_stat__add_stat(iter->stat_acc, he->stat_acc);
  856. if (symbol_conf.use_callchain) {
  857. callchain_cursor_reset(&callchain_cursor);
  858. callchain_merge(&callchain_cursor,
  859. iter->callchain,
  860. he->callchain);
  861. }
  862. hist_entry__delete(he);
  863. return false;
  864. }
  865. if (cmp < 0)
  866. p = &(*p)->rb_left;
  867. else
  868. p = &(*p)->rb_right;
  869. }
  870. hists->nr_entries++;
  871. rb_link_node(&he->rb_node_in, parent, p);
  872. rb_insert_color(&he->rb_node_in, root);
  873. return true;
  874. }
  875. struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
  876. {
  877. struct rb_root *root;
  878. pthread_mutex_lock(&hists->lock);
  879. root = hists->entries_in;
  880. if (++hists->entries_in > &hists->entries_in_array[1])
  881. hists->entries_in = &hists->entries_in_array[0];
  882. pthread_mutex_unlock(&hists->lock);
  883. return root;
  884. }
  885. static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
  886. {
  887. hists__filter_entry_by_dso(hists, he);
  888. hists__filter_entry_by_thread(hists, he);
  889. hists__filter_entry_by_symbol(hists, he);
  890. hists__filter_entry_by_socket(hists, he);
  891. }
  892. void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
  893. {
  894. struct rb_root *root;
  895. struct rb_node *next;
  896. struct hist_entry *n;
  897. if (!sort__need_collapse)
  898. return;
  899. hists->nr_entries = 0;
  900. root = hists__get_rotate_entries_in(hists);
  901. next = rb_first(root);
  902. while (next) {
  903. if (session_done())
  904. break;
  905. n = rb_entry(next, struct hist_entry, rb_node_in);
  906. next = rb_next(&n->rb_node_in);
  907. rb_erase(&n->rb_node_in, root);
  908. if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
  909. /*
  910. * If it wasn't combined with one of the entries already
  911. * collapsed, we need to apply the filters that may have
  912. * been set by, say, the hist_browser.
  913. */
  914. hists__apply_filters(hists, n);
  915. }
  916. if (prog)
  917. ui_progress__update(prog, 1);
  918. }
  919. }
  920. static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
  921. {
  922. struct perf_hpp_fmt *fmt;
  923. int64_t cmp = 0;
  924. perf_hpp__for_each_sort_list(fmt) {
  925. if (perf_hpp__should_skip(fmt, a->hists))
  926. continue;
  927. cmp = fmt->sort(fmt, a, b);
  928. if (cmp)
  929. break;
  930. }
  931. return cmp;
  932. }
  933. static void hists__reset_filter_stats(struct hists *hists)
  934. {
  935. hists->nr_non_filtered_entries = 0;
  936. hists->stats.total_non_filtered_period = 0;
  937. }
  938. void hists__reset_stats(struct hists *hists)
  939. {
  940. hists->nr_entries = 0;
  941. hists->stats.total_period = 0;
  942. hists__reset_filter_stats(hists);
  943. }
  944. static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
  945. {
  946. hists->nr_non_filtered_entries++;
  947. hists->stats.total_non_filtered_period += h->stat.period;
  948. }
  949. void hists__inc_stats(struct hists *hists, struct hist_entry *h)
  950. {
  951. if (!h->filtered)
  952. hists__inc_filter_stats(hists, h);
  953. hists->nr_entries++;
  954. hists->stats.total_period += h->stat.period;
  955. }
  956. static void __hists__insert_output_entry(struct rb_root *entries,
  957. struct hist_entry *he,
  958. u64 min_callchain_hits,
  959. bool use_callchain)
  960. {
  961. struct rb_node **p = &entries->rb_node;
  962. struct rb_node *parent = NULL;
  963. struct hist_entry *iter;
  964. if (use_callchain)
  965. callchain_param.sort(&he->sorted_chain, he->callchain,
  966. min_callchain_hits, &callchain_param);
  967. while (*p != NULL) {
  968. parent = *p;
  969. iter = rb_entry(parent, struct hist_entry, rb_node);
  970. if (hist_entry__sort(he, iter) > 0)
  971. p = &(*p)->rb_left;
  972. else
  973. p = &(*p)->rb_right;
  974. }
  975. rb_link_node(&he->rb_node, parent, p);
  976. rb_insert_color(&he->rb_node, entries);
  977. }
  978. void hists__output_resort(struct hists *hists, struct ui_progress *prog)
  979. {
  980. struct rb_root *root;
  981. struct rb_node *next;
  982. struct hist_entry *n;
  983. u64 min_callchain_hits;
  984. struct perf_evsel *evsel = hists_to_evsel(hists);
  985. bool use_callchain;
  986. if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
  987. use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
  988. else
  989. use_callchain = symbol_conf.use_callchain;
  990. min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
  991. if (sort__need_collapse)
  992. root = &hists->entries_collapsed;
  993. else
  994. root = hists->entries_in;
  995. next = rb_first(root);
  996. hists->entries = RB_ROOT;
  997. hists__reset_stats(hists);
  998. hists__reset_col_len(hists);
  999. while (next) {
  1000. n = rb_entry(next, struct hist_entry, rb_node_in);
  1001. next = rb_next(&n->rb_node_in);
  1002. __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
  1003. hists__inc_stats(hists, n);
  1004. if (!n->filtered)
  1005. hists__calc_col_len(hists, n);
  1006. if (prog)
  1007. ui_progress__update(prog, 1);
  1008. }
  1009. }
  1010. static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
  1011. enum hist_filter filter)
  1012. {
  1013. h->filtered &= ~(1 << filter);
  1014. if (h->filtered)
  1015. return;
  1016. /* force fold unfiltered entry for simplicity */
  1017. h->unfolded = false;
  1018. h->row_offset = 0;
  1019. h->nr_rows = 0;
  1020. hists->stats.nr_non_filtered_samples += h->stat.nr_events;
  1021. hists__inc_filter_stats(hists, h);
  1022. hists__calc_col_len(hists, h);
  1023. }
  1024. static bool hists__filter_entry_by_dso(struct hists *hists,
  1025. struct hist_entry *he)
  1026. {
  1027. if (hists->dso_filter != NULL &&
  1028. (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
  1029. he->filtered |= (1 << HIST_FILTER__DSO);
  1030. return true;
  1031. }
  1032. return false;
  1033. }
  1034. void hists__filter_by_dso(struct hists *hists)
  1035. {
  1036. struct rb_node *nd;
  1037. hists->stats.nr_non_filtered_samples = 0;
  1038. hists__reset_filter_stats(hists);
  1039. hists__reset_col_len(hists);
  1040. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  1041. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  1042. if (symbol_conf.exclude_other && !h->parent)
  1043. continue;
  1044. if (hists__filter_entry_by_dso(hists, h))
  1045. continue;
  1046. hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
  1047. }
  1048. }
  1049. static bool hists__filter_entry_by_thread(struct hists *hists,
  1050. struct hist_entry *he)
  1051. {
  1052. if (hists->thread_filter != NULL &&
  1053. he->thread != hists->thread_filter) {
  1054. he->filtered |= (1 << HIST_FILTER__THREAD);
  1055. return true;
  1056. }
  1057. return false;
  1058. }
  1059. void hists__filter_by_thread(struct hists *hists)
  1060. {
  1061. struct rb_node *nd;
  1062. hists->stats.nr_non_filtered_samples = 0;
  1063. hists__reset_filter_stats(hists);
  1064. hists__reset_col_len(hists);
  1065. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  1066. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  1067. if (hists__filter_entry_by_thread(hists, h))
  1068. continue;
  1069. hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
  1070. }
  1071. }
  1072. static bool hists__filter_entry_by_symbol(struct hists *hists,
  1073. struct hist_entry *he)
  1074. {
  1075. if (hists->symbol_filter_str != NULL &&
  1076. (!he->ms.sym || strstr(he->ms.sym->name,
  1077. hists->symbol_filter_str) == NULL)) {
  1078. he->filtered |= (1 << HIST_FILTER__SYMBOL);
  1079. return true;
  1080. }
  1081. return false;
  1082. }
  1083. void hists__filter_by_symbol(struct hists *hists)
  1084. {
  1085. struct rb_node *nd;
  1086. hists->stats.nr_non_filtered_samples = 0;
  1087. hists__reset_filter_stats(hists);
  1088. hists__reset_col_len(hists);
  1089. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  1090. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  1091. if (hists__filter_entry_by_symbol(hists, h))
  1092. continue;
  1093. hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
  1094. }
  1095. }
  1096. static bool hists__filter_entry_by_socket(struct hists *hists,
  1097. struct hist_entry *he)
  1098. {
  1099. if ((hists->socket_filter > -1) &&
  1100. (he->socket != hists->socket_filter)) {
  1101. he->filtered |= (1 << HIST_FILTER__SOCKET);
  1102. return true;
  1103. }
  1104. return false;
  1105. }
  1106. void hists__filter_by_socket(struct hists *hists)
  1107. {
  1108. struct rb_node *nd;
  1109. hists->stats.nr_non_filtered_samples = 0;
  1110. hists__reset_filter_stats(hists);
  1111. hists__reset_col_len(hists);
  1112. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  1113. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  1114. if (hists__filter_entry_by_socket(hists, h))
  1115. continue;
  1116. hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
  1117. }
  1118. }
  1119. void events_stats__inc(struct events_stats *stats, u32 type)
  1120. {
  1121. ++stats->nr_events[0];
  1122. ++stats->nr_events[type];
  1123. }
  1124. void hists__inc_nr_events(struct hists *hists, u32 type)
  1125. {
  1126. events_stats__inc(&hists->stats, type);
  1127. }
  1128. void hists__inc_nr_samples(struct hists *hists, bool filtered)
  1129. {
  1130. events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
  1131. if (!filtered)
  1132. hists->stats.nr_non_filtered_samples++;
  1133. }
  1134. static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
  1135. struct hist_entry *pair)
  1136. {
  1137. struct rb_root *root;
  1138. struct rb_node **p;
  1139. struct rb_node *parent = NULL;
  1140. struct hist_entry *he;
  1141. int64_t cmp;
  1142. if (sort__need_collapse)
  1143. root = &hists->entries_collapsed;
  1144. else
  1145. root = hists->entries_in;
  1146. p = &root->rb_node;
  1147. while (*p != NULL) {
  1148. parent = *p;
  1149. he = rb_entry(parent, struct hist_entry, rb_node_in);
  1150. cmp = hist_entry__collapse(he, pair);
  1151. if (!cmp)
  1152. goto out;
  1153. if (cmp < 0)
  1154. p = &(*p)->rb_left;
  1155. else
  1156. p = &(*p)->rb_right;
  1157. }
  1158. he = hist_entry__new(pair, true);
  1159. if (he) {
  1160. memset(&he->stat, 0, sizeof(he->stat));
  1161. he->hists = hists;
  1162. rb_link_node(&he->rb_node_in, parent, p);
  1163. rb_insert_color(&he->rb_node_in, root);
  1164. hists__inc_stats(hists, he);
  1165. he->dummy = true;
  1166. }
  1167. out:
  1168. return he;
  1169. }
  1170. static struct hist_entry *hists__find_entry(struct hists *hists,
  1171. struct hist_entry *he)
  1172. {
  1173. struct rb_node *n;
  1174. if (sort__need_collapse)
  1175. n = hists->entries_collapsed.rb_node;
  1176. else
  1177. n = hists->entries_in->rb_node;
  1178. while (n) {
  1179. struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
  1180. int64_t cmp = hist_entry__collapse(iter, he);
  1181. if (cmp < 0)
  1182. n = n->rb_left;
  1183. else if (cmp > 0)
  1184. n = n->rb_right;
  1185. else
  1186. return iter;
  1187. }
  1188. return NULL;
  1189. }
  1190. /*
  1191. * Look for pairs to link to the leader buckets (hist_entries):
  1192. */
  1193. void hists__match(struct hists *leader, struct hists *other)
  1194. {
  1195. struct rb_root *root;
  1196. struct rb_node *nd;
  1197. struct hist_entry *pos, *pair;
  1198. if (sort__need_collapse)
  1199. root = &leader->entries_collapsed;
  1200. else
  1201. root = leader->entries_in;
  1202. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  1203. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  1204. pair = hists__find_entry(other, pos);
  1205. if (pair)
  1206. hist_entry__add_pair(pair, pos);
  1207. }
  1208. }
  1209. /*
  1210. * Look for entries in the other hists that are not present in the leader, if
  1211. * we find them, just add a dummy entry on the leader hists, with period=0,
  1212. * nr_events=0, to serve as the list header.
  1213. */
  1214. int hists__link(struct hists *leader, struct hists *other)
  1215. {
  1216. struct rb_root *root;
  1217. struct rb_node *nd;
  1218. struct hist_entry *pos, *pair;
  1219. if (sort__need_collapse)
  1220. root = &other->entries_collapsed;
  1221. else
  1222. root = other->entries_in;
  1223. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  1224. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  1225. if (!hist_entry__has_pairs(pos)) {
  1226. pair = hists__add_dummy_entry(leader, pos);
  1227. if (pair == NULL)
  1228. return -1;
  1229. hist_entry__add_pair(pos, pair);
  1230. }
  1231. }
  1232. return 0;
  1233. }
  1234. void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
  1235. struct perf_sample *sample, bool nonany_branch_mode)
  1236. {
  1237. struct branch_info *bi;
  1238. /* If we have branch cycles always annotate them. */
  1239. if (bs && bs->nr && bs->entries[0].flags.cycles) {
  1240. int i;
  1241. bi = sample__resolve_bstack(sample, al);
  1242. if (bi) {
  1243. struct addr_map_symbol *prev = NULL;
  1244. /*
  1245. * Ignore errors, still want to process the
  1246. * other entries.
  1247. *
  1248. * For non standard branch modes always
  1249. * force no IPC (prev == NULL)
  1250. *
  1251. * Note that perf stores branches reversed from
  1252. * program order!
  1253. */
  1254. for (i = bs->nr - 1; i >= 0; i--) {
  1255. addr_map_symbol__account_cycles(&bi[i].from,
  1256. nonany_branch_mode ? NULL : prev,
  1257. bi[i].flags.cycles);
  1258. prev = &bi[i].to;
  1259. }
  1260. free(bi);
  1261. }
  1262. }
  1263. }
  1264. size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
  1265. {
  1266. struct perf_evsel *pos;
  1267. size_t ret = 0;
  1268. evlist__for_each(evlist, pos) {
  1269. ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
  1270. ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
  1271. }
  1272. return ret;
  1273. }
  1274. u64 hists__total_period(struct hists *hists)
  1275. {
  1276. return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
  1277. hists->stats.total_period;
  1278. }
  1279. int parse_filter_percentage(const struct option *opt __maybe_unused,
  1280. const char *arg, int unset __maybe_unused)
  1281. {
  1282. if (!strcmp(arg, "relative"))
  1283. symbol_conf.filter_relative = true;
  1284. else if (!strcmp(arg, "absolute"))
  1285. symbol_conf.filter_relative = false;
  1286. else
  1287. return -1;
  1288. return 0;
  1289. }
  1290. int perf_hist_config(const char *var, const char *value)
  1291. {
  1292. if (!strcmp(var, "hist.percentage"))
  1293. return parse_filter_percentage(NULL, value, 0);
  1294. return 0;
  1295. }
  1296. int __hists__init(struct hists *hists)
  1297. {
  1298. memset(hists, 0, sizeof(*hists));
  1299. hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
  1300. hists->entries_in = &hists->entries_in_array[0];
  1301. hists->entries_collapsed = RB_ROOT;
  1302. hists->entries = RB_ROOT;
  1303. pthread_mutex_init(&hists->lock, NULL);
  1304. hists->socket_filter = -1;
  1305. return 0;
  1306. }
  1307. static void hists__delete_remaining_entries(struct rb_root *root)
  1308. {
  1309. struct rb_node *node;
  1310. struct hist_entry *he;
  1311. while (!RB_EMPTY_ROOT(root)) {
  1312. node = rb_first(root);
  1313. rb_erase(node, root);
  1314. he = rb_entry(node, struct hist_entry, rb_node_in);
  1315. hist_entry__delete(he);
  1316. }
  1317. }
  1318. static void hists__delete_all_entries(struct hists *hists)
  1319. {
  1320. hists__delete_entries(hists);
  1321. hists__delete_remaining_entries(&hists->entries_in_array[0]);
  1322. hists__delete_remaining_entries(&hists->entries_in_array[1]);
  1323. hists__delete_remaining_entries(&hists->entries_collapsed);
  1324. }
  1325. static void hists_evsel__exit(struct perf_evsel *evsel)
  1326. {
  1327. struct hists *hists = evsel__hists(evsel);
  1328. hists__delete_all_entries(hists);
  1329. }
  1330. static int hists_evsel__init(struct perf_evsel *evsel)
  1331. {
  1332. struct hists *hists = evsel__hists(evsel);
  1333. __hists__init(hists);
  1334. return 0;
  1335. }
  1336. /*
  1337. * XXX We probably need a hists_evsel__exit() to free the hist_entries
  1338. * stored in the rbtree...
  1339. */
  1340. int hists__init(void)
  1341. {
  1342. int err = perf_evsel__object_config(sizeof(struct hists_evsel),
  1343. hists_evsel__init,
  1344. hists_evsel__exit);
  1345. if (err)
  1346. fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
  1347. return err;
  1348. }