hist.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589
  1. #include "util.h"
  2. #include "build-id.h"
  3. #include "hist.h"
  4. #include "session.h"
  5. #include "sort.h"
  6. #include "evlist.h"
  7. #include "evsel.h"
  8. #include "annotate.h"
  9. #include "ui/progress.h"
  10. #include <math.h>
  11. static bool hists__filter_entry_by_dso(struct hists *hists,
  12. struct hist_entry *he);
  13. static bool hists__filter_entry_by_thread(struct hists *hists,
  14. struct hist_entry *he);
  15. static bool hists__filter_entry_by_symbol(struct hists *hists,
  16. struct hist_entry *he);
  17. static bool hists__filter_entry_by_socket(struct hists *hists,
  18. struct hist_entry *he);
  19. u16 hists__col_len(struct hists *hists, enum hist_column col)
  20. {
  21. return hists->col_len[col];
  22. }
  23. void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
  24. {
  25. hists->col_len[col] = len;
  26. }
  27. bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
  28. {
  29. if (len > hists__col_len(hists, col)) {
  30. hists__set_col_len(hists, col, len);
  31. return true;
  32. }
  33. return false;
  34. }
  35. void hists__reset_col_len(struct hists *hists)
  36. {
  37. enum hist_column col;
  38. for (col = 0; col < HISTC_NR_COLS; ++col)
  39. hists__set_col_len(hists, col, 0);
  40. }
  41. static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
  42. {
  43. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  44. if (hists__col_len(hists, dso) < unresolved_col_width &&
  45. !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  46. !symbol_conf.dso_list)
  47. hists__set_col_len(hists, dso, unresolved_col_width);
  48. }
  49. void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
  50. {
  51. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  52. int symlen;
  53. u16 len;
  54. /*
  55. * +4 accounts for '[x] ' priv level info
  56. * +2 accounts for 0x prefix on raw addresses
  57. * +3 accounts for ' y ' symtab origin info
  58. */
  59. if (h->ms.sym) {
  60. symlen = h->ms.sym->namelen + 4;
  61. if (verbose)
  62. symlen += BITS_PER_LONG / 4 + 2 + 3;
  63. hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  64. } else {
  65. symlen = unresolved_col_width + 4 + 2;
  66. hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  67. hists__set_unres_dso_col_len(hists, HISTC_DSO);
  68. }
  69. len = thread__comm_len(h->thread);
  70. if (hists__new_col_len(hists, HISTC_COMM, len))
  71. hists__set_col_len(hists, HISTC_THREAD, len + 6);
  72. if (h->ms.map) {
  73. len = dso__name_len(h->ms.map->dso);
  74. hists__new_col_len(hists, HISTC_DSO, len);
  75. }
  76. if (h->parent)
  77. hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
  78. if (h->branch_info) {
  79. if (h->branch_info->from.sym) {
  80. symlen = (int)h->branch_info->from.sym->namelen + 4;
  81. if (verbose)
  82. symlen += BITS_PER_LONG / 4 + 2 + 3;
  83. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  84. symlen = dso__name_len(h->branch_info->from.map->dso);
  85. hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
  86. } else {
  87. symlen = unresolved_col_width + 4 + 2;
  88. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  89. hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
  90. }
  91. if (h->branch_info->to.sym) {
  92. symlen = (int)h->branch_info->to.sym->namelen + 4;
  93. if (verbose)
  94. symlen += BITS_PER_LONG / 4 + 2 + 3;
  95. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  96. symlen = dso__name_len(h->branch_info->to.map->dso);
  97. hists__new_col_len(hists, HISTC_DSO_TO, symlen);
  98. } else {
  99. symlen = unresolved_col_width + 4 + 2;
  100. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  101. hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
  102. }
  103. }
  104. if (h->mem_info) {
  105. if (h->mem_info->daddr.sym) {
  106. symlen = (int)h->mem_info->daddr.sym->namelen + 4
  107. + unresolved_col_width + 2;
  108. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
  109. symlen);
  110. hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
  111. symlen + 1);
  112. } else {
  113. symlen = unresolved_col_width + 4 + 2;
  114. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
  115. symlen);
  116. }
  117. if (h->mem_info->iaddr.sym) {
  118. symlen = (int)h->mem_info->iaddr.sym->namelen + 4
  119. + unresolved_col_width + 2;
  120. hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
  121. symlen);
  122. } else {
  123. symlen = unresolved_col_width + 4 + 2;
  124. hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
  125. symlen);
  126. }
  127. if (h->mem_info->daddr.map) {
  128. symlen = dso__name_len(h->mem_info->daddr.map->dso);
  129. hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
  130. symlen);
  131. } else {
  132. symlen = unresolved_col_width + 4 + 2;
  133. hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
  134. }
  135. } else {
  136. symlen = unresolved_col_width + 4 + 2;
  137. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
  138. hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
  139. hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
  140. }
  141. hists__new_col_len(hists, HISTC_CPU, 3);
  142. hists__new_col_len(hists, HISTC_SOCKET, 6);
  143. hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
  144. hists__new_col_len(hists, HISTC_MEM_TLB, 22);
  145. hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
  146. hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
  147. hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
  148. hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
  149. if (h->srcline)
  150. hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
  151. if (h->srcfile)
  152. hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
  153. if (h->transaction)
  154. hists__new_col_len(hists, HISTC_TRANSACTION,
  155. hist_entry__transaction_len());
  156. }
  157. void hists__output_recalc_col_len(struct hists *hists, int max_rows)
  158. {
  159. struct rb_node *next = rb_first(&hists->entries);
  160. struct hist_entry *n;
  161. int row = 0;
  162. hists__reset_col_len(hists);
  163. while (next && row++ < max_rows) {
  164. n = rb_entry(next, struct hist_entry, rb_node);
  165. if (!n->filtered)
  166. hists__calc_col_len(hists, n);
  167. next = rb_next(&n->rb_node);
  168. }
  169. }
  170. static void he_stat__add_cpumode_period(struct he_stat *he_stat,
  171. unsigned int cpumode, u64 period)
  172. {
  173. switch (cpumode) {
  174. case PERF_RECORD_MISC_KERNEL:
  175. he_stat->period_sys += period;
  176. break;
  177. case PERF_RECORD_MISC_USER:
  178. he_stat->period_us += period;
  179. break;
  180. case PERF_RECORD_MISC_GUEST_KERNEL:
  181. he_stat->period_guest_sys += period;
  182. break;
  183. case PERF_RECORD_MISC_GUEST_USER:
  184. he_stat->period_guest_us += period;
  185. break;
  186. default:
  187. break;
  188. }
  189. }
  190. static void he_stat__add_period(struct he_stat *he_stat, u64 period,
  191. u64 weight)
  192. {
  193. he_stat->period += period;
  194. he_stat->weight += weight;
  195. he_stat->nr_events += 1;
  196. }
  197. static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
  198. {
  199. dest->period += src->period;
  200. dest->period_sys += src->period_sys;
  201. dest->period_us += src->period_us;
  202. dest->period_guest_sys += src->period_guest_sys;
  203. dest->period_guest_us += src->period_guest_us;
  204. dest->nr_events += src->nr_events;
  205. dest->weight += src->weight;
  206. }
  207. static void he_stat__decay(struct he_stat *he_stat)
  208. {
  209. he_stat->period = (he_stat->period * 7) / 8;
  210. he_stat->nr_events = (he_stat->nr_events * 7) / 8;
  211. /* XXX need decay for weight too? */
  212. }
  213. static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
  214. {
  215. u64 prev_period = he->stat.period;
  216. u64 diff;
  217. if (prev_period == 0)
  218. return true;
  219. he_stat__decay(&he->stat);
  220. if (symbol_conf.cumulate_callchain)
  221. he_stat__decay(he->stat_acc);
  222. diff = prev_period - he->stat.period;
  223. hists->stats.total_period -= diff;
  224. if (!he->filtered)
  225. hists->stats.total_non_filtered_period -= diff;
  226. return he->stat.period == 0;
  227. }
  228. static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
  229. {
  230. rb_erase(&he->rb_node, &hists->entries);
  231. if (sort__need_collapse)
  232. rb_erase(&he->rb_node_in, &hists->entries_collapsed);
  233. --hists->nr_entries;
  234. if (!he->filtered)
  235. --hists->nr_non_filtered_entries;
  236. hist_entry__delete(he);
  237. }
  238. void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
  239. {
  240. struct rb_node *next = rb_first(&hists->entries);
  241. struct hist_entry *n;
  242. while (next) {
  243. n = rb_entry(next, struct hist_entry, rb_node);
  244. next = rb_next(&n->rb_node);
  245. if (((zap_user && n->level == '.') ||
  246. (zap_kernel && n->level != '.') ||
  247. hists__decay_entry(hists, n))) {
  248. hists__delete_entry(hists, n);
  249. }
  250. }
  251. }
  252. void hists__delete_entries(struct hists *hists)
  253. {
  254. struct rb_node *next = rb_first(&hists->entries);
  255. struct hist_entry *n;
  256. while (next) {
  257. n = rb_entry(next, struct hist_entry, rb_node);
  258. next = rb_next(&n->rb_node);
  259. hists__delete_entry(hists, n);
  260. }
  261. }
  262. /*
  263. * histogram, sorted on item, collects periods
  264. */
  265. static struct hist_entry *hist_entry__new(struct hist_entry *template,
  266. bool sample_self)
  267. {
  268. size_t callchain_size = 0;
  269. struct hist_entry *he;
  270. if (symbol_conf.use_callchain)
  271. callchain_size = sizeof(struct callchain_root);
  272. he = zalloc(sizeof(*he) + callchain_size);
  273. if (he != NULL) {
  274. *he = *template;
  275. if (symbol_conf.cumulate_callchain) {
  276. he->stat_acc = malloc(sizeof(he->stat));
  277. if (he->stat_acc == NULL) {
  278. free(he);
  279. return NULL;
  280. }
  281. memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
  282. if (!sample_self)
  283. memset(&he->stat, 0, sizeof(he->stat));
  284. }
  285. map__get(he->ms.map);
  286. if (he->branch_info) {
  287. /*
  288. * This branch info is (a part of) allocated from
  289. * sample__resolve_bstack() and will be freed after
  290. * adding new entries. So we need to save a copy.
  291. */
  292. he->branch_info = malloc(sizeof(*he->branch_info));
  293. if (he->branch_info == NULL) {
  294. map__zput(he->ms.map);
  295. free(he->stat_acc);
  296. free(he);
  297. return NULL;
  298. }
  299. memcpy(he->branch_info, template->branch_info,
  300. sizeof(*he->branch_info));
  301. map__get(he->branch_info->from.map);
  302. map__get(he->branch_info->to.map);
  303. }
  304. if (he->mem_info) {
  305. map__get(he->mem_info->iaddr.map);
  306. map__get(he->mem_info->daddr.map);
  307. }
  308. if (symbol_conf.use_callchain)
  309. callchain_init(he->callchain);
  310. INIT_LIST_HEAD(&he->pairs.node);
  311. thread__get(he->thread);
  312. }
  313. return he;
  314. }
  315. static u8 symbol__parent_filter(const struct symbol *parent)
  316. {
  317. if (symbol_conf.exclude_other && parent == NULL)
  318. return 1 << HIST_FILTER__PARENT;
  319. return 0;
  320. }
  321. static struct hist_entry *hists__findnew_entry(struct hists *hists,
  322. struct hist_entry *entry,
  323. struct addr_location *al,
  324. bool sample_self)
  325. {
  326. struct rb_node **p;
  327. struct rb_node *parent = NULL;
  328. struct hist_entry *he;
  329. int64_t cmp;
  330. u64 period = entry->stat.period;
  331. u64 weight = entry->stat.weight;
  332. p = &hists->entries_in->rb_node;
  333. while (*p != NULL) {
  334. parent = *p;
  335. he = rb_entry(parent, struct hist_entry, rb_node_in);
  336. /*
  337. * Make sure that it receives arguments in a same order as
  338. * hist_entry__collapse() so that we can use an appropriate
  339. * function when searching an entry regardless which sort
  340. * keys were used.
  341. */
  342. cmp = hist_entry__cmp(he, entry);
  343. if (!cmp) {
  344. if (sample_self)
  345. he_stat__add_period(&he->stat, period, weight);
  346. if (symbol_conf.cumulate_callchain)
  347. he_stat__add_period(he->stat_acc, period, weight);
  348. /*
  349. * This mem info was allocated from sample__resolve_mem
  350. * and will not be used anymore.
  351. */
  352. zfree(&entry->mem_info);
  353. /* If the map of an existing hist_entry has
  354. * become out-of-date due to an exec() or
  355. * similar, update it. Otherwise we will
  356. * mis-adjust symbol addresses when computing
  357. * the history counter to increment.
  358. */
  359. if (he->ms.map != entry->ms.map) {
  360. map__put(he->ms.map);
  361. he->ms.map = map__get(entry->ms.map);
  362. }
  363. goto out;
  364. }
  365. if (cmp < 0)
  366. p = &(*p)->rb_left;
  367. else
  368. p = &(*p)->rb_right;
  369. }
  370. he = hist_entry__new(entry, sample_self);
  371. if (!he)
  372. return NULL;
  373. hists->nr_entries++;
  374. rb_link_node(&he->rb_node_in, parent, p);
  375. rb_insert_color(&he->rb_node_in, hists->entries_in);
  376. out:
  377. if (sample_self)
  378. he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
  379. if (symbol_conf.cumulate_callchain)
  380. he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
  381. return he;
  382. }
  383. struct hist_entry *__hists__add_entry(struct hists *hists,
  384. struct addr_location *al,
  385. struct symbol *sym_parent,
  386. struct branch_info *bi,
  387. struct mem_info *mi,
  388. u64 period, u64 weight, u64 transaction,
  389. bool sample_self)
  390. {
  391. struct hist_entry entry = {
  392. .thread = al->thread,
  393. .comm = thread__comm(al->thread),
  394. .ms = {
  395. .map = al->map,
  396. .sym = al->sym,
  397. },
  398. .socket = al->socket,
  399. .cpu = al->cpu,
  400. .cpumode = al->cpumode,
  401. .ip = al->addr,
  402. .level = al->level,
  403. .stat = {
  404. .nr_events = 1,
  405. .period = period,
  406. .weight = weight,
  407. },
  408. .parent = sym_parent,
  409. .filtered = symbol__parent_filter(sym_parent) | al->filtered,
  410. .hists = hists,
  411. .branch_info = bi,
  412. .mem_info = mi,
  413. .transaction = transaction,
  414. };
  415. return hists__findnew_entry(hists, &entry, al, sample_self);
  416. }
  417. static int
  418. iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
  419. struct addr_location *al __maybe_unused)
  420. {
  421. return 0;
  422. }
  423. static int
  424. iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
  425. struct addr_location *al __maybe_unused)
  426. {
  427. return 0;
  428. }
  429. static int
  430. iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
  431. {
  432. struct perf_sample *sample = iter->sample;
  433. struct mem_info *mi;
  434. mi = sample__resolve_mem(sample, al);
  435. if (mi == NULL)
  436. return -ENOMEM;
  437. iter->priv = mi;
  438. return 0;
  439. }
  440. static int
  441. iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
  442. {
  443. u64 cost;
  444. struct mem_info *mi = iter->priv;
  445. struct hists *hists = evsel__hists(iter->evsel);
  446. struct hist_entry *he;
  447. if (mi == NULL)
  448. return -EINVAL;
  449. cost = iter->sample->weight;
  450. if (!cost)
  451. cost = 1;
  452. /*
  453. * must pass period=weight in order to get the correct
  454. * sorting from hists__collapse_resort() which is solely
  455. * based on periods. We want sorting be done on nr_events * weight
  456. * and this is indirectly achieved by passing period=weight here
  457. * and the he_stat__add_period() function.
  458. */
  459. he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
  460. cost, cost, 0, true);
  461. if (!he)
  462. return -ENOMEM;
  463. iter->he = he;
  464. return 0;
  465. }
  466. static int
  467. iter_finish_mem_entry(struct hist_entry_iter *iter,
  468. struct addr_location *al __maybe_unused)
  469. {
  470. struct perf_evsel *evsel = iter->evsel;
  471. struct hists *hists = evsel__hists(evsel);
  472. struct hist_entry *he = iter->he;
  473. int err = -EINVAL;
  474. if (he == NULL)
  475. goto out;
  476. hists__inc_nr_samples(hists, he->filtered);
  477. err = hist_entry__append_callchain(he, iter->sample);
  478. out:
  479. /*
  480. * We don't need to free iter->priv (mem_info) here since the mem info
  481. * was either already freed in hists__findnew_entry() or passed to a
  482. * new hist entry by hist_entry__new().
  483. */
  484. iter->priv = NULL;
  485. iter->he = NULL;
  486. return err;
  487. }
  488. static int
  489. iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
  490. {
  491. struct branch_info *bi;
  492. struct perf_sample *sample = iter->sample;
  493. bi = sample__resolve_bstack(sample, al);
  494. if (!bi)
  495. return -ENOMEM;
  496. iter->curr = 0;
  497. iter->total = sample->branch_stack->nr;
  498. iter->priv = bi;
  499. return 0;
  500. }
  501. static int
  502. iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
  503. struct addr_location *al __maybe_unused)
  504. {
  505. /* to avoid calling callback function */
  506. iter->he = NULL;
  507. return 0;
  508. }
  509. static int
  510. iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
  511. {
  512. struct branch_info *bi = iter->priv;
  513. int i = iter->curr;
  514. if (bi == NULL)
  515. return 0;
  516. if (iter->curr >= iter->total)
  517. return 0;
  518. al->map = bi[i].to.map;
  519. al->sym = bi[i].to.sym;
  520. al->addr = bi[i].to.addr;
  521. return 1;
  522. }
  523. static int
  524. iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
  525. {
  526. struct branch_info *bi;
  527. struct perf_evsel *evsel = iter->evsel;
  528. struct hists *hists = evsel__hists(evsel);
  529. struct hist_entry *he = NULL;
  530. int i = iter->curr;
  531. int err = 0;
  532. bi = iter->priv;
  533. if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
  534. goto out;
  535. /*
  536. * The report shows the percentage of total branches captured
  537. * and not events sampled. Thus we use a pseudo period of 1.
  538. */
  539. he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
  540. 1, bi->flags.cycles ? bi->flags.cycles : 1,
  541. 0, true);
  542. if (he == NULL)
  543. return -ENOMEM;
  544. hists__inc_nr_samples(hists, he->filtered);
  545. out:
  546. iter->he = he;
  547. iter->curr++;
  548. return err;
  549. }
  550. static int
  551. iter_finish_branch_entry(struct hist_entry_iter *iter,
  552. struct addr_location *al __maybe_unused)
  553. {
  554. zfree(&iter->priv);
  555. iter->he = NULL;
  556. return iter->curr >= iter->total ? 0 : -1;
  557. }
  558. static int
  559. iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
  560. struct addr_location *al __maybe_unused)
  561. {
  562. return 0;
  563. }
  564. static int
  565. iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
  566. {
  567. struct perf_evsel *evsel = iter->evsel;
  568. struct perf_sample *sample = iter->sample;
  569. struct hist_entry *he;
  570. he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
  571. sample->period, sample->weight,
  572. sample->transaction, true);
  573. if (he == NULL)
  574. return -ENOMEM;
  575. iter->he = he;
  576. return 0;
  577. }
  578. static int
  579. iter_finish_normal_entry(struct hist_entry_iter *iter,
  580. struct addr_location *al __maybe_unused)
  581. {
  582. struct hist_entry *he = iter->he;
  583. struct perf_evsel *evsel = iter->evsel;
  584. struct perf_sample *sample = iter->sample;
  585. if (he == NULL)
  586. return 0;
  587. iter->he = NULL;
  588. hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
  589. return hist_entry__append_callchain(he, sample);
  590. }
  591. static int
  592. iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
  593. struct addr_location *al __maybe_unused)
  594. {
  595. struct hist_entry **he_cache;
  596. callchain_cursor_commit(&callchain_cursor);
  597. /*
  598. * This is for detecting cycles or recursions so that they're
  599. * cumulated only one time to prevent entries more than 100%
  600. * overhead.
  601. */
  602. he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
  603. if (he_cache == NULL)
  604. return -ENOMEM;
  605. iter->priv = he_cache;
  606. iter->curr = 0;
  607. return 0;
  608. }
  609. static int
  610. iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
  611. struct addr_location *al)
  612. {
  613. struct perf_evsel *evsel = iter->evsel;
  614. struct hists *hists = evsel__hists(evsel);
  615. struct perf_sample *sample = iter->sample;
  616. struct hist_entry **he_cache = iter->priv;
  617. struct hist_entry *he;
  618. int err = 0;
  619. he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
  620. sample->period, sample->weight,
  621. sample->transaction, true);
  622. if (he == NULL)
  623. return -ENOMEM;
  624. iter->he = he;
  625. he_cache[iter->curr++] = he;
  626. hist_entry__append_callchain(he, sample);
  627. /*
  628. * We need to re-initialize the cursor since callchain_append()
  629. * advanced the cursor to the end.
  630. */
  631. callchain_cursor_commit(&callchain_cursor);
  632. hists__inc_nr_samples(hists, he->filtered);
  633. return err;
  634. }
  635. static int
  636. iter_next_cumulative_entry(struct hist_entry_iter *iter,
  637. struct addr_location *al)
  638. {
  639. struct callchain_cursor_node *node;
  640. node = callchain_cursor_current(&callchain_cursor);
  641. if (node == NULL)
  642. return 0;
  643. return fill_callchain_info(al, node, iter->hide_unresolved);
  644. }
  645. static int
  646. iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
  647. struct addr_location *al)
  648. {
  649. struct perf_evsel *evsel = iter->evsel;
  650. struct perf_sample *sample = iter->sample;
  651. struct hist_entry **he_cache = iter->priv;
  652. struct hist_entry *he;
  653. struct hist_entry he_tmp = {
  654. .hists = evsel__hists(evsel),
  655. .cpu = al->cpu,
  656. .thread = al->thread,
  657. .comm = thread__comm(al->thread),
  658. .ip = al->addr,
  659. .ms = {
  660. .map = al->map,
  661. .sym = al->sym,
  662. },
  663. .parent = iter->parent,
  664. };
  665. int i;
  666. struct callchain_cursor cursor;
  667. callchain_cursor_snapshot(&cursor, &callchain_cursor);
  668. callchain_cursor_advance(&callchain_cursor);
  669. /*
  670. * Check if there's duplicate entries in the callchain.
  671. * It's possible that it has cycles or recursive calls.
  672. */
  673. for (i = 0; i < iter->curr; i++) {
  674. if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
  675. /* to avoid calling callback function */
  676. iter->he = NULL;
  677. return 0;
  678. }
  679. }
  680. he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
  681. sample->period, sample->weight,
  682. sample->transaction, false);
  683. if (he == NULL)
  684. return -ENOMEM;
  685. iter->he = he;
  686. he_cache[iter->curr++] = he;
  687. if (symbol_conf.use_callchain)
  688. callchain_append(he->callchain, &cursor, sample->period);
  689. return 0;
  690. }
  691. static int
  692. iter_finish_cumulative_entry(struct hist_entry_iter *iter,
  693. struct addr_location *al __maybe_unused)
  694. {
  695. zfree(&iter->priv);
  696. iter->he = NULL;
  697. return 0;
  698. }
  699. const struct hist_iter_ops hist_iter_mem = {
  700. .prepare_entry = iter_prepare_mem_entry,
  701. .add_single_entry = iter_add_single_mem_entry,
  702. .next_entry = iter_next_nop_entry,
  703. .add_next_entry = iter_add_next_nop_entry,
  704. .finish_entry = iter_finish_mem_entry,
  705. };
  706. const struct hist_iter_ops hist_iter_branch = {
  707. .prepare_entry = iter_prepare_branch_entry,
  708. .add_single_entry = iter_add_single_branch_entry,
  709. .next_entry = iter_next_branch_entry,
  710. .add_next_entry = iter_add_next_branch_entry,
  711. .finish_entry = iter_finish_branch_entry,
  712. };
  713. const struct hist_iter_ops hist_iter_normal = {
  714. .prepare_entry = iter_prepare_normal_entry,
  715. .add_single_entry = iter_add_single_normal_entry,
  716. .next_entry = iter_next_nop_entry,
  717. .add_next_entry = iter_add_next_nop_entry,
  718. .finish_entry = iter_finish_normal_entry,
  719. };
  720. const struct hist_iter_ops hist_iter_cumulative = {
  721. .prepare_entry = iter_prepare_cumulative_entry,
  722. .add_single_entry = iter_add_single_cumulative_entry,
  723. .next_entry = iter_next_cumulative_entry,
  724. .add_next_entry = iter_add_next_cumulative_entry,
  725. .finish_entry = iter_finish_cumulative_entry,
  726. };
  727. int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
  728. int max_stack_depth, void *arg)
  729. {
  730. int err, err2;
  731. err = sample__resolve_callchain(iter->sample, &iter->parent,
  732. iter->evsel, al, max_stack_depth);
  733. if (err)
  734. return err;
  735. iter->max_stack = max_stack_depth;
  736. err = iter->ops->prepare_entry(iter, al);
  737. if (err)
  738. goto out;
  739. err = iter->ops->add_single_entry(iter, al);
  740. if (err)
  741. goto out;
  742. if (iter->he && iter->add_entry_cb) {
  743. err = iter->add_entry_cb(iter, al, true, arg);
  744. if (err)
  745. goto out;
  746. }
  747. while (iter->ops->next_entry(iter, al)) {
  748. err = iter->ops->add_next_entry(iter, al);
  749. if (err)
  750. break;
  751. if (iter->he && iter->add_entry_cb) {
  752. err = iter->add_entry_cb(iter, al, false, arg);
  753. if (err)
  754. goto out;
  755. }
  756. }
  757. out:
  758. err2 = iter->ops->finish_entry(iter, al);
  759. if (!err)
  760. err = err2;
  761. return err;
  762. }
  763. int64_t
  764. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  765. {
  766. struct perf_hpp_fmt *fmt;
  767. int64_t cmp = 0;
  768. perf_hpp__for_each_sort_list(fmt) {
  769. if (perf_hpp__should_skip(fmt))
  770. continue;
  771. cmp = fmt->cmp(fmt, left, right);
  772. if (cmp)
  773. break;
  774. }
  775. return cmp;
  776. }
  777. int64_t
  778. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  779. {
  780. struct perf_hpp_fmt *fmt;
  781. int64_t cmp = 0;
  782. perf_hpp__for_each_sort_list(fmt) {
  783. if (perf_hpp__should_skip(fmt))
  784. continue;
  785. cmp = fmt->collapse(fmt, left, right);
  786. if (cmp)
  787. break;
  788. }
  789. return cmp;
  790. }
  791. void hist_entry__delete(struct hist_entry *he)
  792. {
  793. thread__zput(he->thread);
  794. map__zput(he->ms.map);
  795. if (he->branch_info) {
  796. map__zput(he->branch_info->from.map);
  797. map__zput(he->branch_info->to.map);
  798. zfree(&he->branch_info);
  799. }
  800. if (he->mem_info) {
  801. map__zput(he->mem_info->iaddr.map);
  802. map__zput(he->mem_info->daddr.map);
  803. zfree(&he->mem_info);
  804. }
  805. zfree(&he->stat_acc);
  806. free_srcline(he->srcline);
  807. if (he->srcfile && he->srcfile[0])
  808. free(he->srcfile);
  809. free_callchain(he->callchain);
  810. free(he);
  811. }
  812. /*
  813. * collapse the histogram
  814. */
  815. static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
  816. struct rb_root *root,
  817. struct hist_entry *he)
  818. {
  819. struct rb_node **p = &root->rb_node;
  820. struct rb_node *parent = NULL;
  821. struct hist_entry *iter;
  822. int64_t cmp;
  823. while (*p != NULL) {
  824. parent = *p;
  825. iter = rb_entry(parent, struct hist_entry, rb_node_in);
  826. cmp = hist_entry__collapse(iter, he);
  827. if (!cmp) {
  828. he_stat__add_stat(&iter->stat, &he->stat);
  829. if (symbol_conf.cumulate_callchain)
  830. he_stat__add_stat(iter->stat_acc, he->stat_acc);
  831. if (symbol_conf.use_callchain) {
  832. callchain_cursor_reset(&callchain_cursor);
  833. callchain_merge(&callchain_cursor,
  834. iter->callchain,
  835. he->callchain);
  836. }
  837. hist_entry__delete(he);
  838. return false;
  839. }
  840. if (cmp < 0)
  841. p = &(*p)->rb_left;
  842. else
  843. p = &(*p)->rb_right;
  844. }
  845. hists->nr_entries++;
  846. rb_link_node(&he->rb_node_in, parent, p);
  847. rb_insert_color(&he->rb_node_in, root);
  848. return true;
  849. }
  850. static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
  851. {
  852. struct rb_root *root;
  853. pthread_mutex_lock(&hists->lock);
  854. root = hists->entries_in;
  855. if (++hists->entries_in > &hists->entries_in_array[1])
  856. hists->entries_in = &hists->entries_in_array[0];
  857. pthread_mutex_unlock(&hists->lock);
  858. return root;
  859. }
  860. static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
  861. {
  862. hists__filter_entry_by_dso(hists, he);
  863. hists__filter_entry_by_thread(hists, he);
  864. hists__filter_entry_by_symbol(hists, he);
  865. hists__filter_entry_by_socket(hists, he);
  866. }
  867. void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
  868. {
  869. struct rb_root *root;
  870. struct rb_node *next;
  871. struct hist_entry *n;
  872. if (!sort__need_collapse)
  873. return;
  874. hists->nr_entries = 0;
  875. root = hists__get_rotate_entries_in(hists);
  876. next = rb_first(root);
  877. while (next) {
  878. if (session_done())
  879. break;
  880. n = rb_entry(next, struct hist_entry, rb_node_in);
  881. next = rb_next(&n->rb_node_in);
  882. rb_erase(&n->rb_node_in, root);
  883. if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
  884. /*
  885. * If it wasn't combined with one of the entries already
  886. * collapsed, we need to apply the filters that may have
  887. * been set by, say, the hist_browser.
  888. */
  889. hists__apply_filters(hists, n);
  890. }
  891. if (prog)
  892. ui_progress__update(prog, 1);
  893. }
  894. }
  895. static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
  896. {
  897. struct perf_hpp_fmt *fmt;
  898. int64_t cmp = 0;
  899. perf_hpp__for_each_sort_list(fmt) {
  900. if (perf_hpp__should_skip(fmt))
  901. continue;
  902. cmp = fmt->sort(fmt, a, b);
  903. if (cmp)
  904. break;
  905. }
  906. return cmp;
  907. }
  908. static void hists__reset_filter_stats(struct hists *hists)
  909. {
  910. hists->nr_non_filtered_entries = 0;
  911. hists->stats.total_non_filtered_period = 0;
  912. }
  913. void hists__reset_stats(struct hists *hists)
  914. {
  915. hists->nr_entries = 0;
  916. hists->stats.total_period = 0;
  917. hists__reset_filter_stats(hists);
  918. }
  919. static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
  920. {
  921. hists->nr_non_filtered_entries++;
  922. hists->stats.total_non_filtered_period += h->stat.period;
  923. }
  924. void hists__inc_stats(struct hists *hists, struct hist_entry *h)
  925. {
  926. if (!h->filtered)
  927. hists__inc_filter_stats(hists, h);
  928. hists->nr_entries++;
  929. hists->stats.total_period += h->stat.period;
  930. }
  931. static void __hists__insert_output_entry(struct rb_root *entries,
  932. struct hist_entry *he,
  933. u64 min_callchain_hits,
  934. bool use_callchain)
  935. {
  936. struct rb_node **p = &entries->rb_node;
  937. struct rb_node *parent = NULL;
  938. struct hist_entry *iter;
  939. if (use_callchain)
  940. callchain_param.sort(&he->sorted_chain, he->callchain,
  941. min_callchain_hits, &callchain_param);
  942. while (*p != NULL) {
  943. parent = *p;
  944. iter = rb_entry(parent, struct hist_entry, rb_node);
  945. if (hist_entry__sort(he, iter) > 0)
  946. p = &(*p)->rb_left;
  947. else
  948. p = &(*p)->rb_right;
  949. }
  950. rb_link_node(&he->rb_node, parent, p);
  951. rb_insert_color(&he->rb_node, entries);
  952. }
  953. void hists__output_resort(struct hists *hists, struct ui_progress *prog)
  954. {
  955. struct rb_root *root;
  956. struct rb_node *next;
  957. struct hist_entry *n;
  958. u64 min_callchain_hits;
  959. struct perf_evsel *evsel = hists_to_evsel(hists);
  960. bool use_callchain;
  961. if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
  962. use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
  963. else
  964. use_callchain = symbol_conf.use_callchain;
  965. min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
  966. if (sort__need_collapse)
  967. root = &hists->entries_collapsed;
  968. else
  969. root = hists->entries_in;
  970. next = rb_first(root);
  971. hists->entries = RB_ROOT;
  972. hists__reset_stats(hists);
  973. hists__reset_col_len(hists);
  974. while (next) {
  975. n = rb_entry(next, struct hist_entry, rb_node_in);
  976. next = rb_next(&n->rb_node_in);
  977. __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
  978. hists__inc_stats(hists, n);
  979. if (!n->filtered)
  980. hists__calc_col_len(hists, n);
  981. if (prog)
  982. ui_progress__update(prog, 1);
  983. }
  984. }
  985. static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
  986. enum hist_filter filter)
  987. {
  988. h->filtered &= ~(1 << filter);
  989. if (h->filtered)
  990. return;
  991. /* force fold unfiltered entry for simplicity */
  992. h->unfolded = false;
  993. h->row_offset = 0;
  994. h->nr_rows = 0;
  995. hists->stats.nr_non_filtered_samples += h->stat.nr_events;
  996. hists__inc_filter_stats(hists, h);
  997. hists__calc_col_len(hists, h);
  998. }
  999. static bool hists__filter_entry_by_dso(struct hists *hists,
  1000. struct hist_entry *he)
  1001. {
  1002. if (hists->dso_filter != NULL &&
  1003. (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
  1004. he->filtered |= (1 << HIST_FILTER__DSO);
  1005. return true;
  1006. }
  1007. return false;
  1008. }
  1009. void hists__filter_by_dso(struct hists *hists)
  1010. {
  1011. struct rb_node *nd;
  1012. hists->stats.nr_non_filtered_samples = 0;
  1013. hists__reset_filter_stats(hists);
  1014. hists__reset_col_len(hists);
  1015. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  1016. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  1017. if (symbol_conf.exclude_other && !h->parent)
  1018. continue;
  1019. if (hists__filter_entry_by_dso(hists, h))
  1020. continue;
  1021. hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
  1022. }
  1023. }
  1024. static bool hists__filter_entry_by_thread(struct hists *hists,
  1025. struct hist_entry *he)
  1026. {
  1027. if (hists->thread_filter != NULL &&
  1028. he->thread != hists->thread_filter) {
  1029. he->filtered |= (1 << HIST_FILTER__THREAD);
  1030. return true;
  1031. }
  1032. return false;
  1033. }
  1034. void hists__filter_by_thread(struct hists *hists)
  1035. {
  1036. struct rb_node *nd;
  1037. hists->stats.nr_non_filtered_samples = 0;
  1038. hists__reset_filter_stats(hists);
  1039. hists__reset_col_len(hists);
  1040. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  1041. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  1042. if (hists__filter_entry_by_thread(hists, h))
  1043. continue;
  1044. hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
  1045. }
  1046. }
  1047. static bool hists__filter_entry_by_symbol(struct hists *hists,
  1048. struct hist_entry *he)
  1049. {
  1050. if (hists->symbol_filter_str != NULL &&
  1051. (!he->ms.sym || strstr(he->ms.sym->name,
  1052. hists->symbol_filter_str) == NULL)) {
  1053. he->filtered |= (1 << HIST_FILTER__SYMBOL);
  1054. return true;
  1055. }
  1056. return false;
  1057. }
  1058. void hists__filter_by_symbol(struct hists *hists)
  1059. {
  1060. struct rb_node *nd;
  1061. hists->stats.nr_non_filtered_samples = 0;
  1062. hists__reset_filter_stats(hists);
  1063. hists__reset_col_len(hists);
  1064. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  1065. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  1066. if (hists__filter_entry_by_symbol(hists, h))
  1067. continue;
  1068. hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
  1069. }
  1070. }
  1071. static bool hists__filter_entry_by_socket(struct hists *hists,
  1072. struct hist_entry *he)
  1073. {
  1074. if ((hists->socket_filter > -1) &&
  1075. (he->socket != hists->socket_filter)) {
  1076. he->filtered |= (1 << HIST_FILTER__SOCKET);
  1077. return true;
  1078. }
  1079. return false;
  1080. }
  1081. void hists__filter_by_socket(struct hists *hists)
  1082. {
  1083. struct rb_node *nd;
  1084. hists->stats.nr_non_filtered_samples = 0;
  1085. hists__reset_filter_stats(hists);
  1086. hists__reset_col_len(hists);
  1087. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  1088. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  1089. if (hists__filter_entry_by_socket(hists, h))
  1090. continue;
  1091. hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
  1092. }
  1093. }
  1094. void events_stats__inc(struct events_stats *stats, u32 type)
  1095. {
  1096. ++stats->nr_events[0];
  1097. ++stats->nr_events[type];
  1098. }
  1099. void hists__inc_nr_events(struct hists *hists, u32 type)
  1100. {
  1101. events_stats__inc(&hists->stats, type);
  1102. }
  1103. void hists__inc_nr_samples(struct hists *hists, bool filtered)
  1104. {
  1105. events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
  1106. if (!filtered)
  1107. hists->stats.nr_non_filtered_samples++;
  1108. }
  1109. static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
  1110. struct hist_entry *pair)
  1111. {
  1112. struct rb_root *root;
  1113. struct rb_node **p;
  1114. struct rb_node *parent = NULL;
  1115. struct hist_entry *he;
  1116. int64_t cmp;
  1117. if (sort__need_collapse)
  1118. root = &hists->entries_collapsed;
  1119. else
  1120. root = hists->entries_in;
  1121. p = &root->rb_node;
  1122. while (*p != NULL) {
  1123. parent = *p;
  1124. he = rb_entry(parent, struct hist_entry, rb_node_in);
  1125. cmp = hist_entry__collapse(he, pair);
  1126. if (!cmp)
  1127. goto out;
  1128. if (cmp < 0)
  1129. p = &(*p)->rb_left;
  1130. else
  1131. p = &(*p)->rb_right;
  1132. }
  1133. he = hist_entry__new(pair, true);
  1134. if (he) {
  1135. memset(&he->stat, 0, sizeof(he->stat));
  1136. he->hists = hists;
  1137. rb_link_node(&he->rb_node_in, parent, p);
  1138. rb_insert_color(&he->rb_node_in, root);
  1139. hists__inc_stats(hists, he);
  1140. he->dummy = true;
  1141. }
  1142. out:
  1143. return he;
  1144. }
  1145. static struct hist_entry *hists__find_entry(struct hists *hists,
  1146. struct hist_entry *he)
  1147. {
  1148. struct rb_node *n;
  1149. if (sort__need_collapse)
  1150. n = hists->entries_collapsed.rb_node;
  1151. else
  1152. n = hists->entries_in->rb_node;
  1153. while (n) {
  1154. struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
  1155. int64_t cmp = hist_entry__collapse(iter, he);
  1156. if (cmp < 0)
  1157. n = n->rb_left;
  1158. else if (cmp > 0)
  1159. n = n->rb_right;
  1160. else
  1161. return iter;
  1162. }
  1163. return NULL;
  1164. }
  1165. /*
  1166. * Look for pairs to link to the leader buckets (hist_entries):
  1167. */
  1168. void hists__match(struct hists *leader, struct hists *other)
  1169. {
  1170. struct rb_root *root;
  1171. struct rb_node *nd;
  1172. struct hist_entry *pos, *pair;
  1173. if (sort__need_collapse)
  1174. root = &leader->entries_collapsed;
  1175. else
  1176. root = leader->entries_in;
  1177. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  1178. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  1179. pair = hists__find_entry(other, pos);
  1180. if (pair)
  1181. hist_entry__add_pair(pair, pos);
  1182. }
  1183. }
  1184. /*
  1185. * Look for entries in the other hists that are not present in the leader, if
  1186. * we find them, just add a dummy entry on the leader hists, with period=0,
  1187. * nr_events=0, to serve as the list header.
  1188. */
  1189. int hists__link(struct hists *leader, struct hists *other)
  1190. {
  1191. struct rb_root *root;
  1192. struct rb_node *nd;
  1193. struct hist_entry *pos, *pair;
  1194. if (sort__need_collapse)
  1195. root = &other->entries_collapsed;
  1196. else
  1197. root = other->entries_in;
  1198. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  1199. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  1200. if (!hist_entry__has_pairs(pos)) {
  1201. pair = hists__add_dummy_entry(leader, pos);
  1202. if (pair == NULL)
  1203. return -1;
  1204. hist_entry__add_pair(pos, pair);
  1205. }
  1206. }
  1207. return 0;
  1208. }
  1209. void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
  1210. struct perf_sample *sample, bool nonany_branch_mode)
  1211. {
  1212. struct branch_info *bi;
  1213. /* If we have branch cycles always annotate them. */
  1214. if (bs && bs->nr && bs->entries[0].flags.cycles) {
  1215. int i;
  1216. bi = sample__resolve_bstack(sample, al);
  1217. if (bi) {
  1218. struct addr_map_symbol *prev = NULL;
  1219. /*
  1220. * Ignore errors, still want to process the
  1221. * other entries.
  1222. *
  1223. * For non standard branch modes always
  1224. * force no IPC (prev == NULL)
  1225. *
  1226. * Note that perf stores branches reversed from
  1227. * program order!
  1228. */
  1229. for (i = bs->nr - 1; i >= 0; i--) {
  1230. addr_map_symbol__account_cycles(&bi[i].from,
  1231. nonany_branch_mode ? NULL : prev,
  1232. bi[i].flags.cycles);
  1233. prev = &bi[i].to;
  1234. }
  1235. free(bi);
  1236. }
  1237. }
  1238. }
  1239. size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
  1240. {
  1241. struct perf_evsel *pos;
  1242. size_t ret = 0;
  1243. evlist__for_each(evlist, pos) {
  1244. ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
  1245. ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
  1246. }
  1247. return ret;
  1248. }
  1249. u64 hists__total_period(struct hists *hists)
  1250. {
  1251. return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
  1252. hists->stats.total_period;
  1253. }
  1254. int parse_filter_percentage(const struct option *opt __maybe_unused,
  1255. const char *arg, int unset __maybe_unused)
  1256. {
  1257. if (!strcmp(arg, "relative"))
  1258. symbol_conf.filter_relative = true;
  1259. else if (!strcmp(arg, "absolute"))
  1260. symbol_conf.filter_relative = false;
  1261. else
  1262. return -1;
  1263. return 0;
  1264. }
  1265. int perf_hist_config(const char *var, const char *value)
  1266. {
  1267. if (!strcmp(var, "hist.percentage"))
  1268. return parse_filter_percentage(NULL, value, 0);
  1269. return 0;
  1270. }
  1271. static int hists_evsel__init(struct perf_evsel *evsel)
  1272. {
  1273. struct hists *hists = evsel__hists(evsel);
  1274. memset(hists, 0, sizeof(*hists));
  1275. hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
  1276. hists->entries_in = &hists->entries_in_array[0];
  1277. hists->entries_collapsed = RB_ROOT;
  1278. hists->entries = RB_ROOT;
  1279. pthread_mutex_init(&hists->lock, NULL);
  1280. hists->socket_filter = -1;
  1281. return 0;
  1282. }
  1283. /*
  1284. * XXX We probably need a hists_evsel__exit() to free the hist_entries
  1285. * stored in the rbtree...
  1286. */
  1287. int hists__init(void)
  1288. {
  1289. int err = perf_evsel__object_config(sizeof(struct hists_evsel),
  1290. hists_evsel__init, NULL);
  1291. if (err)
  1292. fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
  1293. return err;
  1294. }