hist.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include "util.h"
  3. #include "build-id.h"
  4. #include "hist.h"
  5. #include "map.h"
  6. #include "session.h"
  7. #include "namespaces.h"
  8. #include "sort.h"
  9. #include "units.h"
  10. #include "evlist.h"
  11. #include "evsel.h"
  12. #include "annotate.h"
  13. #include "srcline.h"
  14. #include "thread.h"
  15. #include "ui/progress.h"
  16. #include <errno.h>
  17. #include <math.h>
  18. #include <inttypes.h>
  19. #include <sys/param.h>
  20. static bool hists__filter_entry_by_dso(struct hists *hists,
  21. struct hist_entry *he);
  22. static bool hists__filter_entry_by_thread(struct hists *hists,
  23. struct hist_entry *he);
  24. static bool hists__filter_entry_by_symbol(struct hists *hists,
  25. struct hist_entry *he);
  26. static bool hists__filter_entry_by_socket(struct hists *hists,
  27. struct hist_entry *he);
  28. u16 hists__col_len(struct hists *hists, enum hist_column col)
  29. {
  30. return hists->col_len[col];
  31. }
  32. void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
  33. {
  34. hists->col_len[col] = len;
  35. }
  36. bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
  37. {
  38. if (len > hists__col_len(hists, col)) {
  39. hists__set_col_len(hists, col, len);
  40. return true;
  41. }
  42. return false;
  43. }
  44. void hists__reset_col_len(struct hists *hists)
  45. {
  46. enum hist_column col;
  47. for (col = 0; col < HISTC_NR_COLS; ++col)
  48. hists__set_col_len(hists, col, 0);
  49. }
  50. static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
  51. {
  52. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  53. if (hists__col_len(hists, dso) < unresolved_col_width &&
  54. !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  55. !symbol_conf.dso_list)
  56. hists__set_col_len(hists, dso, unresolved_col_width);
  57. }
  58. void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
  59. {
  60. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  61. int symlen;
  62. u16 len;
  63. /*
  64. * +4 accounts for '[x] ' priv level info
  65. * +2 accounts for 0x prefix on raw addresses
  66. * +3 accounts for ' y ' symtab origin info
  67. */
  68. if (h->ms.sym) {
  69. symlen = h->ms.sym->namelen + 4;
  70. if (verbose > 0)
  71. symlen += BITS_PER_LONG / 4 + 2 + 3;
  72. hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  73. } else {
  74. symlen = unresolved_col_width + 4 + 2;
  75. hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  76. hists__set_unres_dso_col_len(hists, HISTC_DSO);
  77. }
  78. len = thread__comm_len(h->thread);
  79. if (hists__new_col_len(hists, HISTC_COMM, len))
  80. hists__set_col_len(hists, HISTC_THREAD, len + 8);
  81. if (h->ms.map) {
  82. len = dso__name_len(h->ms.map->dso);
  83. hists__new_col_len(hists, HISTC_DSO, len);
  84. }
  85. if (h->parent)
  86. hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
  87. if (h->branch_info) {
  88. if (h->branch_info->from.sym) {
  89. symlen = (int)h->branch_info->from.sym->namelen + 4;
  90. if (verbose > 0)
  91. symlen += BITS_PER_LONG / 4 + 2 + 3;
  92. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  93. symlen = dso__name_len(h->branch_info->from.map->dso);
  94. hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
  95. } else {
  96. symlen = unresolved_col_width + 4 + 2;
  97. hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  98. hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
  99. }
  100. if (h->branch_info->to.sym) {
  101. symlen = (int)h->branch_info->to.sym->namelen + 4;
  102. if (verbose > 0)
  103. symlen += BITS_PER_LONG / 4 + 2 + 3;
  104. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  105. symlen = dso__name_len(h->branch_info->to.map->dso);
  106. hists__new_col_len(hists, HISTC_DSO_TO, symlen);
  107. } else {
  108. symlen = unresolved_col_width + 4 + 2;
  109. hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
  110. hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
  111. }
  112. if (h->branch_info->srcline_from)
  113. hists__new_col_len(hists, HISTC_SRCLINE_FROM,
  114. strlen(h->branch_info->srcline_from));
  115. if (h->branch_info->srcline_to)
  116. hists__new_col_len(hists, HISTC_SRCLINE_TO,
  117. strlen(h->branch_info->srcline_to));
  118. }
  119. if (h->mem_info) {
  120. if (h->mem_info->daddr.sym) {
  121. symlen = (int)h->mem_info->daddr.sym->namelen + 4
  122. + unresolved_col_width + 2;
  123. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
  124. symlen);
  125. hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
  126. symlen + 1);
  127. } else {
  128. symlen = unresolved_col_width + 4 + 2;
  129. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
  130. symlen);
  131. hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
  132. symlen);
  133. }
  134. if (h->mem_info->iaddr.sym) {
  135. symlen = (int)h->mem_info->iaddr.sym->namelen + 4
  136. + unresolved_col_width + 2;
  137. hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
  138. symlen);
  139. } else {
  140. symlen = unresolved_col_width + 4 + 2;
  141. hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
  142. symlen);
  143. }
  144. if (h->mem_info->daddr.map) {
  145. symlen = dso__name_len(h->mem_info->daddr.map->dso);
  146. hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
  147. symlen);
  148. } else {
  149. symlen = unresolved_col_width + 4 + 2;
  150. hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
  151. }
  152. hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
  153. unresolved_col_width + 4 + 2);
  154. } else {
  155. symlen = unresolved_col_width + 4 + 2;
  156. hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
  157. hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
  158. hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
  159. }
  160. hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
  161. hists__new_col_len(hists, HISTC_CPU, 3);
  162. hists__new_col_len(hists, HISTC_SOCKET, 6);
  163. hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
  164. hists__new_col_len(hists, HISTC_MEM_TLB, 22);
  165. hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
  166. hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
  167. hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
  168. hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
  169. if (h->srcline) {
  170. len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
  171. hists__new_col_len(hists, HISTC_SRCLINE, len);
  172. }
  173. if (h->srcfile)
  174. hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
  175. if (h->transaction)
  176. hists__new_col_len(hists, HISTC_TRANSACTION,
  177. hist_entry__transaction_len());
  178. if (h->trace_output)
  179. hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
  180. }
  181. void hists__output_recalc_col_len(struct hists *hists, int max_rows)
  182. {
  183. struct rb_node *next = rb_first(&hists->entries);
  184. struct hist_entry *n;
  185. int row = 0;
  186. hists__reset_col_len(hists);
  187. while (next && row++ < max_rows) {
  188. n = rb_entry(next, struct hist_entry, rb_node);
  189. if (!n->filtered)
  190. hists__calc_col_len(hists, n);
  191. next = rb_next(&n->rb_node);
  192. }
  193. }
  194. static void he_stat__add_cpumode_period(struct he_stat *he_stat,
  195. unsigned int cpumode, u64 period)
  196. {
  197. switch (cpumode) {
  198. case PERF_RECORD_MISC_KERNEL:
  199. he_stat->period_sys += period;
  200. break;
  201. case PERF_RECORD_MISC_USER:
  202. he_stat->period_us += period;
  203. break;
  204. case PERF_RECORD_MISC_GUEST_KERNEL:
  205. he_stat->period_guest_sys += period;
  206. break;
  207. case PERF_RECORD_MISC_GUEST_USER:
  208. he_stat->period_guest_us += period;
  209. break;
  210. default:
  211. break;
  212. }
  213. }
  214. static void he_stat__add_period(struct he_stat *he_stat, u64 period,
  215. u64 weight)
  216. {
  217. he_stat->period += period;
  218. he_stat->weight += weight;
  219. he_stat->nr_events += 1;
  220. }
  221. static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
  222. {
  223. dest->period += src->period;
  224. dest->period_sys += src->period_sys;
  225. dest->period_us += src->period_us;
  226. dest->period_guest_sys += src->period_guest_sys;
  227. dest->period_guest_us += src->period_guest_us;
  228. dest->nr_events += src->nr_events;
  229. dest->weight += src->weight;
  230. }
  231. static void he_stat__decay(struct he_stat *he_stat)
  232. {
  233. he_stat->period = (he_stat->period * 7) / 8;
  234. he_stat->nr_events = (he_stat->nr_events * 7) / 8;
  235. /* XXX need decay for weight too? */
  236. }
  237. static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
  238. static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
  239. {
  240. u64 prev_period = he->stat.period;
  241. u64 diff;
  242. if (prev_period == 0)
  243. return true;
  244. he_stat__decay(&he->stat);
  245. if (symbol_conf.cumulate_callchain)
  246. he_stat__decay(he->stat_acc);
  247. decay_callchain(he->callchain);
  248. diff = prev_period - he->stat.period;
  249. if (!he->depth) {
  250. hists->stats.total_period -= diff;
  251. if (!he->filtered)
  252. hists->stats.total_non_filtered_period -= diff;
  253. }
  254. if (!he->leaf) {
  255. struct hist_entry *child;
  256. struct rb_node *node = rb_first(&he->hroot_out);
  257. while (node) {
  258. child = rb_entry(node, struct hist_entry, rb_node);
  259. node = rb_next(node);
  260. if (hists__decay_entry(hists, child))
  261. hists__delete_entry(hists, child);
  262. }
  263. }
  264. return he->stat.period == 0;
  265. }
  266. static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
  267. {
  268. struct rb_root *root_in;
  269. struct rb_root *root_out;
  270. if (he->parent_he) {
  271. root_in = &he->parent_he->hroot_in;
  272. root_out = &he->parent_he->hroot_out;
  273. } else {
  274. if (hists__has(hists, need_collapse))
  275. root_in = &hists->entries_collapsed;
  276. else
  277. root_in = hists->entries_in;
  278. root_out = &hists->entries;
  279. }
  280. rb_erase(&he->rb_node_in, root_in);
  281. rb_erase(&he->rb_node, root_out);
  282. --hists->nr_entries;
  283. if (!he->filtered)
  284. --hists->nr_non_filtered_entries;
  285. hist_entry__delete(he);
  286. }
  287. void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
  288. {
  289. struct rb_node *next = rb_first(&hists->entries);
  290. struct hist_entry *n;
  291. while (next) {
  292. n = rb_entry(next, struct hist_entry, rb_node);
  293. next = rb_next(&n->rb_node);
  294. if (((zap_user && n->level == '.') ||
  295. (zap_kernel && n->level != '.') ||
  296. hists__decay_entry(hists, n))) {
  297. hists__delete_entry(hists, n);
  298. }
  299. }
  300. }
  301. void hists__delete_entries(struct hists *hists)
  302. {
  303. struct rb_node *next = rb_first(&hists->entries);
  304. struct hist_entry *n;
  305. while (next) {
  306. n = rb_entry(next, struct hist_entry, rb_node);
  307. next = rb_next(&n->rb_node);
  308. hists__delete_entry(hists, n);
  309. }
  310. }
  311. /*
  312. * histogram, sorted on item, collects periods
  313. */
  314. static int hist_entry__init(struct hist_entry *he,
  315. struct hist_entry *template,
  316. bool sample_self)
  317. {
  318. *he = *template;
  319. if (symbol_conf.cumulate_callchain) {
  320. he->stat_acc = malloc(sizeof(he->stat));
  321. if (he->stat_acc == NULL)
  322. return -ENOMEM;
  323. memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
  324. if (!sample_self)
  325. memset(&he->stat, 0, sizeof(he->stat));
  326. }
  327. map__get(he->ms.map);
  328. if (he->branch_info) {
  329. /*
  330. * This branch info is (a part of) allocated from
  331. * sample__resolve_bstack() and will be freed after
  332. * adding new entries. So we need to save a copy.
  333. */
  334. he->branch_info = malloc(sizeof(*he->branch_info));
  335. if (he->branch_info == NULL) {
  336. map__zput(he->ms.map);
  337. free(he->stat_acc);
  338. return -ENOMEM;
  339. }
  340. memcpy(he->branch_info, template->branch_info,
  341. sizeof(*he->branch_info));
  342. map__get(he->branch_info->from.map);
  343. map__get(he->branch_info->to.map);
  344. }
  345. if (he->mem_info) {
  346. map__get(he->mem_info->iaddr.map);
  347. map__get(he->mem_info->daddr.map);
  348. }
  349. if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
  350. callchain_init(he->callchain);
  351. if (he->raw_data) {
  352. he->raw_data = memdup(he->raw_data, he->raw_size);
  353. if (he->raw_data == NULL) {
  354. map__put(he->ms.map);
  355. if (he->branch_info) {
  356. map__put(he->branch_info->from.map);
  357. map__put(he->branch_info->to.map);
  358. free(he->branch_info);
  359. }
  360. if (he->mem_info) {
  361. map__put(he->mem_info->iaddr.map);
  362. map__put(he->mem_info->daddr.map);
  363. }
  364. free(he->stat_acc);
  365. return -ENOMEM;
  366. }
  367. }
  368. INIT_LIST_HEAD(&he->pairs.node);
  369. thread__get(he->thread);
  370. he->hroot_in = RB_ROOT;
  371. he->hroot_out = RB_ROOT;
  372. if (!symbol_conf.report_hierarchy)
  373. he->leaf = true;
  374. return 0;
  375. }
  376. static void *hist_entry__zalloc(size_t size)
  377. {
  378. return zalloc(size + sizeof(struct hist_entry));
  379. }
  380. static void hist_entry__free(void *ptr)
  381. {
  382. free(ptr);
  383. }
  384. static struct hist_entry_ops default_ops = {
  385. .new = hist_entry__zalloc,
  386. .free = hist_entry__free,
  387. };
  388. static struct hist_entry *hist_entry__new(struct hist_entry *template,
  389. bool sample_self)
  390. {
  391. struct hist_entry_ops *ops = template->ops;
  392. size_t callchain_size = 0;
  393. struct hist_entry *he;
  394. int err = 0;
  395. if (!ops)
  396. ops = template->ops = &default_ops;
  397. if (symbol_conf.use_callchain)
  398. callchain_size = sizeof(struct callchain_root);
  399. he = ops->new(callchain_size);
  400. if (he) {
  401. err = hist_entry__init(he, template, sample_self);
  402. if (err) {
  403. ops->free(he);
  404. he = NULL;
  405. }
  406. }
  407. return he;
  408. }
  409. static u8 symbol__parent_filter(const struct symbol *parent)
  410. {
  411. if (symbol_conf.exclude_other && parent == NULL)
  412. return 1 << HIST_FILTER__PARENT;
  413. return 0;
  414. }
  415. static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
  416. {
  417. if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
  418. return;
  419. he->hists->callchain_period += period;
  420. if (!he->filtered)
  421. he->hists->callchain_non_filtered_period += period;
  422. }
  423. static struct hist_entry *hists__findnew_entry(struct hists *hists,
  424. struct hist_entry *entry,
  425. struct addr_location *al,
  426. bool sample_self)
  427. {
  428. struct rb_node **p;
  429. struct rb_node *parent = NULL;
  430. struct hist_entry *he;
  431. int64_t cmp;
  432. u64 period = entry->stat.period;
  433. u64 weight = entry->stat.weight;
  434. p = &hists->entries_in->rb_node;
  435. while (*p != NULL) {
  436. parent = *p;
  437. he = rb_entry(parent, struct hist_entry, rb_node_in);
  438. /*
  439. * Make sure that it receives arguments in a same order as
  440. * hist_entry__collapse() so that we can use an appropriate
  441. * function when searching an entry regardless which sort
  442. * keys were used.
  443. */
  444. cmp = hist_entry__cmp(he, entry);
  445. if (!cmp) {
  446. if (sample_self) {
  447. he_stat__add_period(&he->stat, period, weight);
  448. hist_entry__add_callchain_period(he, period);
  449. }
  450. if (symbol_conf.cumulate_callchain)
  451. he_stat__add_period(he->stat_acc, period, weight);
  452. /*
  453. * This mem info was allocated from sample__resolve_mem
  454. * and will not be used anymore.
  455. */
  456. mem_info__zput(entry->mem_info);
  457. /* If the map of an existing hist_entry has
  458. * become out-of-date due to an exec() or
  459. * similar, update it. Otherwise we will
  460. * mis-adjust symbol addresses when computing
  461. * the history counter to increment.
  462. */
  463. if (he->ms.map != entry->ms.map) {
  464. map__put(he->ms.map);
  465. he->ms.map = map__get(entry->ms.map);
  466. }
  467. goto out;
  468. }
  469. if (cmp < 0)
  470. p = &(*p)->rb_left;
  471. else
  472. p = &(*p)->rb_right;
  473. }
  474. he = hist_entry__new(entry, sample_self);
  475. if (!he)
  476. return NULL;
  477. if (sample_self)
  478. hist_entry__add_callchain_period(he, period);
  479. hists->nr_entries++;
  480. rb_link_node(&he->rb_node_in, parent, p);
  481. rb_insert_color(&he->rb_node_in, hists->entries_in);
  482. out:
  483. if (sample_self)
  484. he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
  485. if (symbol_conf.cumulate_callchain)
  486. he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
  487. return he;
  488. }
  489. static struct hist_entry*
  490. __hists__add_entry(struct hists *hists,
  491. struct addr_location *al,
  492. struct symbol *sym_parent,
  493. struct branch_info *bi,
  494. struct mem_info *mi,
  495. struct perf_sample *sample,
  496. bool sample_self,
  497. struct hist_entry_ops *ops)
  498. {
  499. struct namespaces *ns = thread__namespaces(al->thread);
  500. struct hist_entry entry = {
  501. .thread = al->thread,
  502. .comm = thread__comm(al->thread),
  503. .cgroup_id = {
  504. .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
  505. .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
  506. },
  507. .ms = {
  508. .map = al->map,
  509. .sym = al->sym,
  510. },
  511. .srcline = al->srcline ? strdup(al->srcline) : NULL,
  512. .socket = al->socket,
  513. .cpu = al->cpu,
  514. .cpumode = al->cpumode,
  515. .ip = al->addr,
  516. .level = al->level,
  517. .stat = {
  518. .nr_events = 1,
  519. .period = sample->period,
  520. .weight = sample->weight,
  521. },
  522. .parent = sym_parent,
  523. .filtered = symbol__parent_filter(sym_parent) | al->filtered,
  524. .hists = hists,
  525. .branch_info = bi,
  526. .mem_info = mi,
  527. .transaction = sample->transaction,
  528. .raw_data = sample->raw_data,
  529. .raw_size = sample->raw_size,
  530. .ops = ops,
  531. };
  532. return hists__findnew_entry(hists, &entry, al, sample_self);
  533. }
  534. struct hist_entry *hists__add_entry(struct hists *hists,
  535. struct addr_location *al,
  536. struct symbol *sym_parent,
  537. struct branch_info *bi,
  538. struct mem_info *mi,
  539. struct perf_sample *sample,
  540. bool sample_self)
  541. {
  542. return __hists__add_entry(hists, al, sym_parent, bi, mi,
  543. sample, sample_self, NULL);
  544. }
  545. struct hist_entry *hists__add_entry_ops(struct hists *hists,
  546. struct hist_entry_ops *ops,
  547. struct addr_location *al,
  548. struct symbol *sym_parent,
  549. struct branch_info *bi,
  550. struct mem_info *mi,
  551. struct perf_sample *sample,
  552. bool sample_self)
  553. {
  554. return __hists__add_entry(hists, al, sym_parent, bi, mi,
  555. sample, sample_self, ops);
  556. }
  557. static int
  558. iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
  559. struct addr_location *al __maybe_unused)
  560. {
  561. return 0;
  562. }
  563. static int
  564. iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
  565. struct addr_location *al __maybe_unused)
  566. {
  567. return 0;
  568. }
  569. static int
  570. iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
  571. {
  572. struct perf_sample *sample = iter->sample;
  573. struct mem_info *mi;
  574. mi = sample__resolve_mem(sample, al);
  575. if (mi == NULL)
  576. return -ENOMEM;
  577. iter->priv = mi;
  578. return 0;
  579. }
  580. static int
  581. iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
  582. {
  583. u64 cost;
  584. struct mem_info *mi = iter->priv;
  585. struct hists *hists = evsel__hists(iter->evsel);
  586. struct perf_sample *sample = iter->sample;
  587. struct hist_entry *he;
  588. if (mi == NULL)
  589. return -EINVAL;
  590. cost = sample->weight;
  591. if (!cost)
  592. cost = 1;
  593. /*
  594. * must pass period=weight in order to get the correct
  595. * sorting from hists__collapse_resort() which is solely
  596. * based on periods. We want sorting be done on nr_events * weight
  597. * and this is indirectly achieved by passing period=weight here
  598. * and the he_stat__add_period() function.
  599. */
  600. sample->period = cost;
  601. he = hists__add_entry(hists, al, iter->parent, NULL, mi,
  602. sample, true);
  603. if (!he)
  604. return -ENOMEM;
  605. iter->he = he;
  606. return 0;
  607. }
  608. static int
  609. iter_finish_mem_entry(struct hist_entry_iter *iter,
  610. struct addr_location *al __maybe_unused)
  611. {
  612. struct perf_evsel *evsel = iter->evsel;
  613. struct hists *hists = evsel__hists(evsel);
  614. struct hist_entry *he = iter->he;
  615. int err = -EINVAL;
  616. if (he == NULL)
  617. goto out;
  618. hists__inc_nr_samples(hists, he->filtered);
  619. err = hist_entry__append_callchain(he, iter->sample);
  620. out:
  621. /*
  622. * We don't need to free iter->priv (mem_info) here since the mem info
  623. * was either already freed in hists__findnew_entry() or passed to a
  624. * new hist entry by hist_entry__new().
  625. */
  626. iter->priv = NULL;
  627. iter->he = NULL;
  628. return err;
  629. }
  630. static int
  631. iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
  632. {
  633. struct branch_info *bi;
  634. struct perf_sample *sample = iter->sample;
  635. bi = sample__resolve_bstack(sample, al);
  636. if (!bi)
  637. return -ENOMEM;
  638. iter->curr = 0;
  639. iter->total = sample->branch_stack->nr;
  640. iter->priv = bi;
  641. return 0;
  642. }
  643. static int
  644. iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
  645. struct addr_location *al __maybe_unused)
  646. {
  647. return 0;
  648. }
  649. static int
  650. iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
  651. {
  652. struct branch_info *bi = iter->priv;
  653. int i = iter->curr;
  654. if (bi == NULL)
  655. return 0;
  656. if (iter->curr >= iter->total)
  657. return 0;
  658. al->map = bi[i].to.map;
  659. al->sym = bi[i].to.sym;
  660. al->addr = bi[i].to.addr;
  661. return 1;
  662. }
  663. static int
  664. iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
  665. {
  666. struct branch_info *bi;
  667. struct perf_evsel *evsel = iter->evsel;
  668. struct hists *hists = evsel__hists(evsel);
  669. struct perf_sample *sample = iter->sample;
  670. struct hist_entry *he = NULL;
  671. int i = iter->curr;
  672. int err = 0;
  673. bi = iter->priv;
  674. if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
  675. goto out;
  676. /*
  677. * The report shows the percentage of total branches captured
  678. * and not events sampled. Thus we use a pseudo period of 1.
  679. */
  680. sample->period = 1;
  681. sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
  682. he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
  683. sample, true);
  684. if (he == NULL)
  685. return -ENOMEM;
  686. hists__inc_nr_samples(hists, he->filtered);
  687. out:
  688. iter->he = he;
  689. iter->curr++;
  690. return err;
  691. }
  692. static int
  693. iter_finish_branch_entry(struct hist_entry_iter *iter,
  694. struct addr_location *al __maybe_unused)
  695. {
  696. zfree(&iter->priv);
  697. iter->he = NULL;
  698. return iter->curr >= iter->total ? 0 : -1;
  699. }
  700. static int
  701. iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
  702. struct addr_location *al __maybe_unused)
  703. {
  704. return 0;
  705. }
  706. static int
  707. iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
  708. {
  709. struct perf_evsel *evsel = iter->evsel;
  710. struct perf_sample *sample = iter->sample;
  711. struct hist_entry *he;
  712. he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
  713. sample, true);
  714. if (he == NULL)
  715. return -ENOMEM;
  716. iter->he = he;
  717. return 0;
  718. }
  719. static int
  720. iter_finish_normal_entry(struct hist_entry_iter *iter,
  721. struct addr_location *al __maybe_unused)
  722. {
  723. struct hist_entry *he = iter->he;
  724. struct perf_evsel *evsel = iter->evsel;
  725. struct perf_sample *sample = iter->sample;
  726. if (he == NULL)
  727. return 0;
  728. iter->he = NULL;
  729. hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
  730. return hist_entry__append_callchain(he, sample);
  731. }
  732. static int
  733. iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
  734. struct addr_location *al __maybe_unused)
  735. {
  736. struct hist_entry **he_cache;
  737. callchain_cursor_commit(&callchain_cursor);
  738. /*
  739. * This is for detecting cycles or recursions so that they're
  740. * cumulated only one time to prevent entries more than 100%
  741. * overhead.
  742. */
  743. he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
  744. if (he_cache == NULL)
  745. return -ENOMEM;
  746. iter->priv = he_cache;
  747. iter->curr = 0;
  748. return 0;
  749. }
  750. static int
  751. iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
  752. struct addr_location *al)
  753. {
  754. struct perf_evsel *evsel = iter->evsel;
  755. struct hists *hists = evsel__hists(evsel);
  756. struct perf_sample *sample = iter->sample;
  757. struct hist_entry **he_cache = iter->priv;
  758. struct hist_entry *he;
  759. int err = 0;
  760. he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
  761. sample, true);
  762. if (he == NULL)
  763. return -ENOMEM;
  764. iter->he = he;
  765. he_cache[iter->curr++] = he;
  766. hist_entry__append_callchain(he, sample);
  767. /*
  768. * We need to re-initialize the cursor since callchain_append()
  769. * advanced the cursor to the end.
  770. */
  771. callchain_cursor_commit(&callchain_cursor);
  772. hists__inc_nr_samples(hists, he->filtered);
  773. return err;
  774. }
  775. static int
  776. iter_next_cumulative_entry(struct hist_entry_iter *iter,
  777. struct addr_location *al)
  778. {
  779. struct callchain_cursor_node *node;
  780. node = callchain_cursor_current(&callchain_cursor);
  781. if (node == NULL)
  782. return 0;
  783. return fill_callchain_info(al, node, iter->hide_unresolved);
  784. }
  785. static int
  786. iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
  787. struct addr_location *al)
  788. {
  789. struct perf_evsel *evsel = iter->evsel;
  790. struct perf_sample *sample = iter->sample;
  791. struct hist_entry **he_cache = iter->priv;
  792. struct hist_entry *he;
  793. struct hist_entry he_tmp = {
  794. .hists = evsel__hists(evsel),
  795. .cpu = al->cpu,
  796. .thread = al->thread,
  797. .comm = thread__comm(al->thread),
  798. .ip = al->addr,
  799. .ms = {
  800. .map = al->map,
  801. .sym = al->sym,
  802. },
  803. .srcline = al->srcline ? strdup(al->srcline) : NULL,
  804. .parent = iter->parent,
  805. .raw_data = sample->raw_data,
  806. .raw_size = sample->raw_size,
  807. };
  808. int i;
  809. struct callchain_cursor cursor;
  810. callchain_cursor_snapshot(&cursor, &callchain_cursor);
  811. callchain_cursor_advance(&callchain_cursor);
  812. /*
  813. * Check if there's duplicate entries in the callchain.
  814. * It's possible that it has cycles or recursive calls.
  815. */
  816. for (i = 0; i < iter->curr; i++) {
  817. if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
  818. /* to avoid calling callback function */
  819. iter->he = NULL;
  820. return 0;
  821. }
  822. }
  823. he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
  824. sample, false);
  825. if (he == NULL)
  826. return -ENOMEM;
  827. iter->he = he;
  828. he_cache[iter->curr++] = he;
  829. if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
  830. callchain_append(he->callchain, &cursor, sample->period);
  831. return 0;
  832. }
  833. static int
  834. iter_finish_cumulative_entry(struct hist_entry_iter *iter,
  835. struct addr_location *al __maybe_unused)
  836. {
  837. zfree(&iter->priv);
  838. iter->he = NULL;
  839. return 0;
  840. }
  841. const struct hist_iter_ops hist_iter_mem = {
  842. .prepare_entry = iter_prepare_mem_entry,
  843. .add_single_entry = iter_add_single_mem_entry,
  844. .next_entry = iter_next_nop_entry,
  845. .add_next_entry = iter_add_next_nop_entry,
  846. .finish_entry = iter_finish_mem_entry,
  847. };
  848. const struct hist_iter_ops hist_iter_branch = {
  849. .prepare_entry = iter_prepare_branch_entry,
  850. .add_single_entry = iter_add_single_branch_entry,
  851. .next_entry = iter_next_branch_entry,
  852. .add_next_entry = iter_add_next_branch_entry,
  853. .finish_entry = iter_finish_branch_entry,
  854. };
  855. const struct hist_iter_ops hist_iter_normal = {
  856. .prepare_entry = iter_prepare_normal_entry,
  857. .add_single_entry = iter_add_single_normal_entry,
  858. .next_entry = iter_next_nop_entry,
  859. .add_next_entry = iter_add_next_nop_entry,
  860. .finish_entry = iter_finish_normal_entry,
  861. };
  862. const struct hist_iter_ops hist_iter_cumulative = {
  863. .prepare_entry = iter_prepare_cumulative_entry,
  864. .add_single_entry = iter_add_single_cumulative_entry,
  865. .next_entry = iter_next_cumulative_entry,
  866. .add_next_entry = iter_add_next_cumulative_entry,
  867. .finish_entry = iter_finish_cumulative_entry,
  868. };
  869. int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
  870. int max_stack_depth, void *arg)
  871. {
  872. int err, err2;
  873. struct map *alm = NULL;
  874. if (al)
  875. alm = map__get(al->map);
  876. err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
  877. iter->evsel, al, max_stack_depth);
  878. if (err)
  879. return err;
  880. err = iter->ops->prepare_entry(iter, al);
  881. if (err)
  882. goto out;
  883. err = iter->ops->add_single_entry(iter, al);
  884. if (err)
  885. goto out;
  886. if (iter->he && iter->add_entry_cb) {
  887. err = iter->add_entry_cb(iter, al, true, arg);
  888. if (err)
  889. goto out;
  890. }
  891. while (iter->ops->next_entry(iter, al)) {
  892. err = iter->ops->add_next_entry(iter, al);
  893. if (err)
  894. break;
  895. if (iter->he && iter->add_entry_cb) {
  896. err = iter->add_entry_cb(iter, al, false, arg);
  897. if (err)
  898. goto out;
  899. }
  900. }
  901. out:
  902. err2 = iter->ops->finish_entry(iter, al);
  903. if (!err)
  904. err = err2;
  905. map__put(alm);
  906. return err;
  907. }
  908. int64_t
  909. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  910. {
  911. struct hists *hists = left->hists;
  912. struct perf_hpp_fmt *fmt;
  913. int64_t cmp = 0;
  914. hists__for_each_sort_list(hists, fmt) {
  915. if (perf_hpp__is_dynamic_entry(fmt) &&
  916. !perf_hpp__defined_dynamic_entry(fmt, hists))
  917. continue;
  918. cmp = fmt->cmp(fmt, left, right);
  919. if (cmp)
  920. break;
  921. }
  922. return cmp;
  923. }
  924. int64_t
  925. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  926. {
  927. struct hists *hists = left->hists;
  928. struct perf_hpp_fmt *fmt;
  929. int64_t cmp = 0;
  930. hists__for_each_sort_list(hists, fmt) {
  931. if (perf_hpp__is_dynamic_entry(fmt) &&
  932. !perf_hpp__defined_dynamic_entry(fmt, hists))
  933. continue;
  934. cmp = fmt->collapse(fmt, left, right);
  935. if (cmp)
  936. break;
  937. }
  938. return cmp;
  939. }
  940. void hist_entry__delete(struct hist_entry *he)
  941. {
  942. struct hist_entry_ops *ops = he->ops;
  943. thread__zput(he->thread);
  944. map__zput(he->ms.map);
  945. if (he->branch_info) {
  946. map__zput(he->branch_info->from.map);
  947. map__zput(he->branch_info->to.map);
  948. free_srcline(he->branch_info->srcline_from);
  949. free_srcline(he->branch_info->srcline_to);
  950. zfree(&he->branch_info);
  951. }
  952. if (he->mem_info) {
  953. map__zput(he->mem_info->iaddr.map);
  954. map__zput(he->mem_info->daddr.map);
  955. mem_info__zput(he->mem_info);
  956. }
  957. zfree(&he->stat_acc);
  958. free_srcline(he->srcline);
  959. if (he->srcfile && he->srcfile[0])
  960. free(he->srcfile);
  961. free_callchain(he->callchain);
  962. free(he->trace_output);
  963. free(he->raw_data);
  964. ops->free(he);
  965. }
  966. /*
  967. * If this is not the last column, then we need to pad it according to the
  968. * pre-calculated max lenght for this column, otherwise don't bother adding
  969. * spaces because that would break viewing this with, for instance, 'less',
  970. * that would show tons of trailing spaces when a long C++ demangled method
  971. * names is sampled.
  972. */
  973. int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
  974. struct perf_hpp_fmt *fmt, int printed)
  975. {
  976. if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
  977. const int width = fmt->width(fmt, hpp, he->hists);
  978. if (printed < width) {
  979. advance_hpp(hpp, printed);
  980. printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
  981. }
  982. }
  983. return printed;
  984. }
  985. /*
  986. * collapse the histogram
  987. */
  988. static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
  989. static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
  990. enum hist_filter type);
  991. typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
  992. static bool check_thread_entry(struct perf_hpp_fmt *fmt)
  993. {
  994. return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
  995. }
  996. static void hist_entry__check_and_remove_filter(struct hist_entry *he,
  997. enum hist_filter type,
  998. fmt_chk_fn check)
  999. {
  1000. struct perf_hpp_fmt *fmt;
  1001. bool type_match = false;
  1002. struct hist_entry *parent = he->parent_he;
  1003. switch (type) {
  1004. case HIST_FILTER__THREAD:
  1005. if (symbol_conf.comm_list == NULL &&
  1006. symbol_conf.pid_list == NULL &&
  1007. symbol_conf.tid_list == NULL)
  1008. return;
  1009. break;
  1010. case HIST_FILTER__DSO:
  1011. if (symbol_conf.dso_list == NULL)
  1012. return;
  1013. break;
  1014. case HIST_FILTER__SYMBOL:
  1015. if (symbol_conf.sym_list == NULL)
  1016. return;
  1017. break;
  1018. case HIST_FILTER__PARENT:
  1019. case HIST_FILTER__GUEST:
  1020. case HIST_FILTER__HOST:
  1021. case HIST_FILTER__SOCKET:
  1022. case HIST_FILTER__C2C:
  1023. default:
  1024. return;
  1025. }
  1026. /* if it's filtered by own fmt, it has to have filter bits */
  1027. perf_hpp_list__for_each_format(he->hpp_list, fmt) {
  1028. if (check(fmt)) {
  1029. type_match = true;
  1030. break;
  1031. }
  1032. }
  1033. if (type_match) {
  1034. /*
  1035. * If the filter is for current level entry, propagate
  1036. * filter marker to parents. The marker bit was
  1037. * already set by default so it only needs to clear
  1038. * non-filtered entries.
  1039. */
  1040. if (!(he->filtered & (1 << type))) {
  1041. while (parent) {
  1042. parent->filtered &= ~(1 << type);
  1043. parent = parent->parent_he;
  1044. }
  1045. }
  1046. } else {
  1047. /*
  1048. * If current entry doesn't have matching formats, set
  1049. * filter marker for upper level entries. it will be
  1050. * cleared if its lower level entries is not filtered.
  1051. *
  1052. * For lower-level entries, it inherits parent's
  1053. * filter bit so that lower level entries of a
  1054. * non-filtered entry won't set the filter marker.
  1055. */
  1056. if (parent == NULL)
  1057. he->filtered |= (1 << type);
  1058. else
  1059. he->filtered |= (parent->filtered & (1 << type));
  1060. }
  1061. }
  1062. static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
  1063. {
  1064. hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
  1065. check_thread_entry);
  1066. hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
  1067. perf_hpp__is_dso_entry);
  1068. hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
  1069. perf_hpp__is_sym_entry);
  1070. hists__apply_filters(he->hists, he);
  1071. }
  1072. static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
  1073. struct rb_root *root,
  1074. struct hist_entry *he,
  1075. struct hist_entry *parent_he,
  1076. struct perf_hpp_list *hpp_list)
  1077. {
  1078. struct rb_node **p = &root->rb_node;
  1079. struct rb_node *parent = NULL;
  1080. struct hist_entry *iter, *new;
  1081. struct perf_hpp_fmt *fmt;
  1082. int64_t cmp;
  1083. while (*p != NULL) {
  1084. parent = *p;
  1085. iter = rb_entry(parent, struct hist_entry, rb_node_in);
  1086. cmp = 0;
  1087. perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
  1088. cmp = fmt->collapse(fmt, iter, he);
  1089. if (cmp)
  1090. break;
  1091. }
  1092. if (!cmp) {
  1093. he_stat__add_stat(&iter->stat, &he->stat);
  1094. return iter;
  1095. }
  1096. if (cmp < 0)
  1097. p = &parent->rb_left;
  1098. else
  1099. p = &parent->rb_right;
  1100. }
  1101. new = hist_entry__new(he, true);
  1102. if (new == NULL)
  1103. return NULL;
  1104. hists->nr_entries++;
  1105. /* save related format list for output */
  1106. new->hpp_list = hpp_list;
  1107. new->parent_he = parent_he;
  1108. hist_entry__apply_hierarchy_filters(new);
  1109. /* some fields are now passed to 'new' */
  1110. perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
  1111. if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
  1112. he->trace_output = NULL;
  1113. else
  1114. new->trace_output = NULL;
  1115. if (perf_hpp__is_srcline_entry(fmt))
  1116. he->srcline = NULL;
  1117. else
  1118. new->srcline = NULL;
  1119. if (perf_hpp__is_srcfile_entry(fmt))
  1120. he->srcfile = NULL;
  1121. else
  1122. new->srcfile = NULL;
  1123. }
  1124. rb_link_node(&new->rb_node_in, parent, p);
  1125. rb_insert_color(&new->rb_node_in, root);
  1126. return new;
  1127. }
  1128. static int hists__hierarchy_insert_entry(struct hists *hists,
  1129. struct rb_root *root,
  1130. struct hist_entry *he)
  1131. {
  1132. struct perf_hpp_list_node *node;
  1133. struct hist_entry *new_he = NULL;
  1134. struct hist_entry *parent = NULL;
  1135. int depth = 0;
  1136. int ret = 0;
  1137. list_for_each_entry(node, &hists->hpp_formats, list) {
  1138. /* skip period (overhead) and elided columns */
  1139. if (node->level == 0 || node->skip)
  1140. continue;
  1141. /* insert copy of 'he' for each fmt into the hierarchy */
  1142. new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
  1143. if (new_he == NULL) {
  1144. ret = -1;
  1145. break;
  1146. }
  1147. root = &new_he->hroot_in;
  1148. new_he->depth = depth++;
  1149. parent = new_he;
  1150. }
  1151. if (new_he) {
  1152. new_he->leaf = true;
  1153. if (hist_entry__has_callchains(new_he) &&
  1154. symbol_conf.use_callchain) {
  1155. callchain_cursor_reset(&callchain_cursor);
  1156. if (callchain_merge(&callchain_cursor,
  1157. new_he->callchain,
  1158. he->callchain) < 0)
  1159. ret = -1;
  1160. }
  1161. }
  1162. /* 'he' is no longer used */
  1163. hist_entry__delete(he);
  1164. /* return 0 (or -1) since it already applied filters */
  1165. return ret;
  1166. }
  1167. static int hists__collapse_insert_entry(struct hists *hists,
  1168. struct rb_root *root,
  1169. struct hist_entry *he)
  1170. {
  1171. struct rb_node **p = &root->rb_node;
  1172. struct rb_node *parent = NULL;
  1173. struct hist_entry *iter;
  1174. int64_t cmp;
  1175. if (symbol_conf.report_hierarchy)
  1176. return hists__hierarchy_insert_entry(hists, root, he);
  1177. while (*p != NULL) {
  1178. parent = *p;
  1179. iter = rb_entry(parent, struct hist_entry, rb_node_in);
  1180. cmp = hist_entry__collapse(iter, he);
  1181. if (!cmp) {
  1182. int ret = 0;
  1183. he_stat__add_stat(&iter->stat, &he->stat);
  1184. if (symbol_conf.cumulate_callchain)
  1185. he_stat__add_stat(iter->stat_acc, he->stat_acc);
  1186. if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
  1187. callchain_cursor_reset(&callchain_cursor);
  1188. if (callchain_merge(&callchain_cursor,
  1189. iter->callchain,
  1190. he->callchain) < 0)
  1191. ret = -1;
  1192. }
  1193. hist_entry__delete(he);
  1194. return ret;
  1195. }
  1196. if (cmp < 0)
  1197. p = &(*p)->rb_left;
  1198. else
  1199. p = &(*p)->rb_right;
  1200. }
  1201. hists->nr_entries++;
  1202. rb_link_node(&he->rb_node_in, parent, p);
  1203. rb_insert_color(&he->rb_node_in, root);
  1204. return 1;
  1205. }
  1206. struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
  1207. {
  1208. struct rb_root *root;
  1209. pthread_mutex_lock(&hists->lock);
  1210. root = hists->entries_in;
  1211. if (++hists->entries_in > &hists->entries_in_array[1])
  1212. hists->entries_in = &hists->entries_in_array[0];
  1213. pthread_mutex_unlock(&hists->lock);
  1214. return root;
  1215. }
  1216. static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
  1217. {
  1218. hists__filter_entry_by_dso(hists, he);
  1219. hists__filter_entry_by_thread(hists, he);
  1220. hists__filter_entry_by_symbol(hists, he);
  1221. hists__filter_entry_by_socket(hists, he);
  1222. }
  1223. int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
  1224. {
  1225. struct rb_root *root;
  1226. struct rb_node *next;
  1227. struct hist_entry *n;
  1228. int ret;
  1229. if (!hists__has(hists, need_collapse))
  1230. return 0;
  1231. hists->nr_entries = 0;
  1232. root = hists__get_rotate_entries_in(hists);
  1233. next = rb_first(root);
  1234. while (next) {
  1235. if (session_done())
  1236. break;
  1237. n = rb_entry(next, struct hist_entry, rb_node_in);
  1238. next = rb_next(&n->rb_node_in);
  1239. rb_erase(&n->rb_node_in, root);
  1240. ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
  1241. if (ret < 0)
  1242. return -1;
  1243. if (ret) {
  1244. /*
  1245. * If it wasn't combined with one of the entries already
  1246. * collapsed, we need to apply the filters that may have
  1247. * been set by, say, the hist_browser.
  1248. */
  1249. hists__apply_filters(hists, n);
  1250. }
  1251. if (prog)
  1252. ui_progress__update(prog, 1);
  1253. }
  1254. return 0;
  1255. }
  1256. static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
  1257. {
  1258. struct hists *hists = a->hists;
  1259. struct perf_hpp_fmt *fmt;
  1260. int64_t cmp = 0;
  1261. hists__for_each_sort_list(hists, fmt) {
  1262. if (perf_hpp__should_skip(fmt, a->hists))
  1263. continue;
  1264. cmp = fmt->sort(fmt, a, b);
  1265. if (cmp)
  1266. break;
  1267. }
  1268. return cmp;
  1269. }
  1270. static void hists__reset_filter_stats(struct hists *hists)
  1271. {
  1272. hists->nr_non_filtered_entries = 0;
  1273. hists->stats.total_non_filtered_period = 0;
  1274. }
  1275. void hists__reset_stats(struct hists *hists)
  1276. {
  1277. hists->nr_entries = 0;
  1278. hists->stats.total_period = 0;
  1279. hists__reset_filter_stats(hists);
  1280. }
  1281. static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
  1282. {
  1283. hists->nr_non_filtered_entries++;
  1284. hists->stats.total_non_filtered_period += h->stat.period;
  1285. }
  1286. void hists__inc_stats(struct hists *hists, struct hist_entry *h)
  1287. {
  1288. if (!h->filtered)
  1289. hists__inc_filter_stats(hists, h);
  1290. hists->nr_entries++;
  1291. hists->stats.total_period += h->stat.period;
  1292. }
  1293. static void hierarchy_recalc_total_periods(struct hists *hists)
  1294. {
  1295. struct rb_node *node;
  1296. struct hist_entry *he;
  1297. node = rb_first(&hists->entries);
  1298. hists->stats.total_period = 0;
  1299. hists->stats.total_non_filtered_period = 0;
  1300. /*
  1301. * recalculate total period using top-level entries only
  1302. * since lower level entries only see non-filtered entries
  1303. * but upper level entries have sum of both entries.
  1304. */
  1305. while (node) {
  1306. he = rb_entry(node, struct hist_entry, rb_node);
  1307. node = rb_next(node);
  1308. hists->stats.total_period += he->stat.period;
  1309. if (!he->filtered)
  1310. hists->stats.total_non_filtered_period += he->stat.period;
  1311. }
  1312. }
  1313. static void hierarchy_insert_output_entry(struct rb_root *root,
  1314. struct hist_entry *he)
  1315. {
  1316. struct rb_node **p = &root->rb_node;
  1317. struct rb_node *parent = NULL;
  1318. struct hist_entry *iter;
  1319. struct perf_hpp_fmt *fmt;
  1320. while (*p != NULL) {
  1321. parent = *p;
  1322. iter = rb_entry(parent, struct hist_entry, rb_node);
  1323. if (hist_entry__sort(he, iter) > 0)
  1324. p = &parent->rb_left;
  1325. else
  1326. p = &parent->rb_right;
  1327. }
  1328. rb_link_node(&he->rb_node, parent, p);
  1329. rb_insert_color(&he->rb_node, root);
  1330. /* update column width of dynamic entry */
  1331. perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
  1332. if (perf_hpp__is_dynamic_entry(fmt))
  1333. fmt->sort(fmt, he, NULL);
  1334. }
  1335. }
  1336. static void hists__hierarchy_output_resort(struct hists *hists,
  1337. struct ui_progress *prog,
  1338. struct rb_root *root_in,
  1339. struct rb_root *root_out,
  1340. u64 min_callchain_hits,
  1341. bool use_callchain)
  1342. {
  1343. struct rb_node *node;
  1344. struct hist_entry *he;
  1345. *root_out = RB_ROOT;
  1346. node = rb_first(root_in);
  1347. while (node) {
  1348. he = rb_entry(node, struct hist_entry, rb_node_in);
  1349. node = rb_next(node);
  1350. hierarchy_insert_output_entry(root_out, he);
  1351. if (prog)
  1352. ui_progress__update(prog, 1);
  1353. hists->nr_entries++;
  1354. if (!he->filtered) {
  1355. hists->nr_non_filtered_entries++;
  1356. hists__calc_col_len(hists, he);
  1357. }
  1358. if (!he->leaf) {
  1359. hists__hierarchy_output_resort(hists, prog,
  1360. &he->hroot_in,
  1361. &he->hroot_out,
  1362. min_callchain_hits,
  1363. use_callchain);
  1364. continue;
  1365. }
  1366. if (!use_callchain)
  1367. continue;
  1368. if (callchain_param.mode == CHAIN_GRAPH_REL) {
  1369. u64 total = he->stat.period;
  1370. if (symbol_conf.cumulate_callchain)
  1371. total = he->stat_acc->period;
  1372. min_callchain_hits = total * (callchain_param.min_percent / 100);
  1373. }
  1374. callchain_param.sort(&he->sorted_chain, he->callchain,
  1375. min_callchain_hits, &callchain_param);
  1376. }
  1377. }
  1378. static void __hists__insert_output_entry(struct rb_root *entries,
  1379. struct hist_entry *he,
  1380. u64 min_callchain_hits,
  1381. bool use_callchain)
  1382. {
  1383. struct rb_node **p = &entries->rb_node;
  1384. struct rb_node *parent = NULL;
  1385. struct hist_entry *iter;
  1386. struct perf_hpp_fmt *fmt;
  1387. if (use_callchain) {
  1388. if (callchain_param.mode == CHAIN_GRAPH_REL) {
  1389. u64 total = he->stat.period;
  1390. if (symbol_conf.cumulate_callchain)
  1391. total = he->stat_acc->period;
  1392. min_callchain_hits = total * (callchain_param.min_percent / 100);
  1393. }
  1394. callchain_param.sort(&he->sorted_chain, he->callchain,
  1395. min_callchain_hits, &callchain_param);
  1396. }
  1397. while (*p != NULL) {
  1398. parent = *p;
  1399. iter = rb_entry(parent, struct hist_entry, rb_node);
  1400. if (hist_entry__sort(he, iter) > 0)
  1401. p = &(*p)->rb_left;
  1402. else
  1403. p = &(*p)->rb_right;
  1404. }
  1405. rb_link_node(&he->rb_node, parent, p);
  1406. rb_insert_color(&he->rb_node, entries);
  1407. perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
  1408. if (perf_hpp__is_dynamic_entry(fmt) &&
  1409. perf_hpp__defined_dynamic_entry(fmt, he->hists))
  1410. fmt->sort(fmt, he, NULL); /* update column width */
  1411. }
  1412. }
  1413. static void output_resort(struct hists *hists, struct ui_progress *prog,
  1414. bool use_callchain, hists__resort_cb_t cb)
  1415. {
  1416. struct rb_root *root;
  1417. struct rb_node *next;
  1418. struct hist_entry *n;
  1419. u64 callchain_total;
  1420. u64 min_callchain_hits;
  1421. callchain_total = hists->callchain_period;
  1422. if (symbol_conf.filter_relative)
  1423. callchain_total = hists->callchain_non_filtered_period;
  1424. min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
  1425. hists__reset_stats(hists);
  1426. hists__reset_col_len(hists);
  1427. if (symbol_conf.report_hierarchy) {
  1428. hists__hierarchy_output_resort(hists, prog,
  1429. &hists->entries_collapsed,
  1430. &hists->entries,
  1431. min_callchain_hits,
  1432. use_callchain);
  1433. hierarchy_recalc_total_periods(hists);
  1434. return;
  1435. }
  1436. if (hists__has(hists, need_collapse))
  1437. root = &hists->entries_collapsed;
  1438. else
  1439. root = hists->entries_in;
  1440. next = rb_first(root);
  1441. hists->entries = RB_ROOT;
  1442. while (next) {
  1443. n = rb_entry(next, struct hist_entry, rb_node_in);
  1444. next = rb_next(&n->rb_node_in);
  1445. if (cb && cb(n))
  1446. continue;
  1447. __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
  1448. hists__inc_stats(hists, n);
  1449. if (!n->filtered)
  1450. hists__calc_col_len(hists, n);
  1451. if (prog)
  1452. ui_progress__update(prog, 1);
  1453. }
  1454. }
  1455. void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
  1456. {
  1457. bool use_callchain;
  1458. if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
  1459. use_callchain = evsel__has_callchain(evsel);
  1460. else
  1461. use_callchain = symbol_conf.use_callchain;
  1462. use_callchain |= symbol_conf.show_branchflag_count;
  1463. output_resort(evsel__hists(evsel), prog, use_callchain, NULL);
  1464. }
  1465. void hists__output_resort(struct hists *hists, struct ui_progress *prog)
  1466. {
  1467. output_resort(hists, prog, symbol_conf.use_callchain, NULL);
  1468. }
  1469. void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
  1470. hists__resort_cb_t cb)
  1471. {
  1472. output_resort(hists, prog, symbol_conf.use_callchain, cb);
  1473. }
  1474. static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
  1475. {
  1476. if (he->leaf || hmd == HMD_FORCE_SIBLING)
  1477. return false;
  1478. if (he->unfolded || hmd == HMD_FORCE_CHILD)
  1479. return true;
  1480. return false;
  1481. }
  1482. struct rb_node *rb_hierarchy_last(struct rb_node *node)
  1483. {
  1484. struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
  1485. while (can_goto_child(he, HMD_NORMAL)) {
  1486. node = rb_last(&he->hroot_out);
  1487. he = rb_entry(node, struct hist_entry, rb_node);
  1488. }
  1489. return node;
  1490. }
  1491. struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
  1492. {
  1493. struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
  1494. if (can_goto_child(he, hmd))
  1495. node = rb_first(&he->hroot_out);
  1496. else
  1497. node = rb_next(node);
  1498. while (node == NULL) {
  1499. he = he->parent_he;
  1500. if (he == NULL)
  1501. break;
  1502. node = rb_next(&he->rb_node);
  1503. }
  1504. return node;
  1505. }
  1506. struct rb_node *rb_hierarchy_prev(struct rb_node *node)
  1507. {
  1508. struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
  1509. node = rb_prev(node);
  1510. if (node)
  1511. return rb_hierarchy_last(node);
  1512. he = he->parent_he;
  1513. if (he == NULL)
  1514. return NULL;
  1515. return &he->rb_node;
  1516. }
  1517. bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
  1518. {
  1519. struct rb_node *node;
  1520. struct hist_entry *child;
  1521. float percent;
  1522. if (he->leaf)
  1523. return false;
  1524. node = rb_first(&he->hroot_out);
  1525. child = rb_entry(node, struct hist_entry, rb_node);
  1526. while (node && child->filtered) {
  1527. node = rb_next(node);
  1528. child = rb_entry(node, struct hist_entry, rb_node);
  1529. }
  1530. if (node)
  1531. percent = hist_entry__get_percent_limit(child);
  1532. else
  1533. percent = 0;
  1534. return node && percent >= limit;
  1535. }
  1536. static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
  1537. enum hist_filter filter)
  1538. {
  1539. h->filtered &= ~(1 << filter);
  1540. if (symbol_conf.report_hierarchy) {
  1541. struct hist_entry *parent = h->parent_he;
  1542. while (parent) {
  1543. he_stat__add_stat(&parent->stat, &h->stat);
  1544. parent->filtered &= ~(1 << filter);
  1545. if (parent->filtered)
  1546. goto next;
  1547. /* force fold unfiltered entry for simplicity */
  1548. parent->unfolded = false;
  1549. parent->has_no_entry = false;
  1550. parent->row_offset = 0;
  1551. parent->nr_rows = 0;
  1552. next:
  1553. parent = parent->parent_he;
  1554. }
  1555. }
  1556. if (h->filtered)
  1557. return;
  1558. /* force fold unfiltered entry for simplicity */
  1559. h->unfolded = false;
  1560. h->has_no_entry = false;
  1561. h->row_offset = 0;
  1562. h->nr_rows = 0;
  1563. hists->stats.nr_non_filtered_samples += h->stat.nr_events;
  1564. hists__inc_filter_stats(hists, h);
  1565. hists__calc_col_len(hists, h);
  1566. }
  1567. static bool hists__filter_entry_by_dso(struct hists *hists,
  1568. struct hist_entry *he)
  1569. {
  1570. if (hists->dso_filter != NULL &&
  1571. (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
  1572. he->filtered |= (1 << HIST_FILTER__DSO);
  1573. return true;
  1574. }
  1575. return false;
  1576. }
  1577. static bool hists__filter_entry_by_thread(struct hists *hists,
  1578. struct hist_entry *he)
  1579. {
  1580. if (hists->thread_filter != NULL &&
  1581. he->thread != hists->thread_filter) {
  1582. he->filtered |= (1 << HIST_FILTER__THREAD);
  1583. return true;
  1584. }
  1585. return false;
  1586. }
  1587. static bool hists__filter_entry_by_symbol(struct hists *hists,
  1588. struct hist_entry *he)
  1589. {
  1590. if (hists->symbol_filter_str != NULL &&
  1591. (!he->ms.sym || strstr(he->ms.sym->name,
  1592. hists->symbol_filter_str) == NULL)) {
  1593. he->filtered |= (1 << HIST_FILTER__SYMBOL);
  1594. return true;
  1595. }
  1596. return false;
  1597. }
  1598. static bool hists__filter_entry_by_socket(struct hists *hists,
  1599. struct hist_entry *he)
  1600. {
  1601. if ((hists->socket_filter > -1) &&
  1602. (he->socket != hists->socket_filter)) {
  1603. he->filtered |= (1 << HIST_FILTER__SOCKET);
  1604. return true;
  1605. }
  1606. return false;
  1607. }
  1608. typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
  1609. static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
  1610. {
  1611. struct rb_node *nd;
  1612. hists->stats.nr_non_filtered_samples = 0;
  1613. hists__reset_filter_stats(hists);
  1614. hists__reset_col_len(hists);
  1615. for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
  1616. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  1617. if (filter(hists, h))
  1618. continue;
  1619. hists__remove_entry_filter(hists, h, type);
  1620. }
  1621. }
  1622. static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
  1623. {
  1624. struct rb_node **p = &root->rb_node;
  1625. struct rb_node *parent = NULL;
  1626. struct hist_entry *iter;
  1627. struct rb_root new_root = RB_ROOT;
  1628. struct rb_node *nd;
  1629. while (*p != NULL) {
  1630. parent = *p;
  1631. iter = rb_entry(parent, struct hist_entry, rb_node);
  1632. if (hist_entry__sort(he, iter) > 0)
  1633. p = &(*p)->rb_left;
  1634. else
  1635. p = &(*p)->rb_right;
  1636. }
  1637. rb_link_node(&he->rb_node, parent, p);
  1638. rb_insert_color(&he->rb_node, root);
  1639. if (he->leaf || he->filtered)
  1640. return;
  1641. nd = rb_first(&he->hroot_out);
  1642. while (nd) {
  1643. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  1644. nd = rb_next(nd);
  1645. rb_erase(&h->rb_node, &he->hroot_out);
  1646. resort_filtered_entry(&new_root, h);
  1647. }
  1648. he->hroot_out = new_root;
  1649. }
  1650. static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
  1651. {
  1652. struct rb_node *nd;
  1653. struct rb_root new_root = RB_ROOT;
  1654. hists->stats.nr_non_filtered_samples = 0;
  1655. hists__reset_filter_stats(hists);
  1656. hists__reset_col_len(hists);
  1657. nd = rb_first(&hists->entries);
  1658. while (nd) {
  1659. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  1660. int ret;
  1661. ret = hist_entry__filter(h, type, arg);
  1662. /*
  1663. * case 1. non-matching type
  1664. * zero out the period, set filter marker and move to child
  1665. */
  1666. if (ret < 0) {
  1667. memset(&h->stat, 0, sizeof(h->stat));
  1668. h->filtered |= (1 << type);
  1669. nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
  1670. }
  1671. /*
  1672. * case 2. matched type (filter out)
  1673. * set filter marker and move to next
  1674. */
  1675. else if (ret == 1) {
  1676. h->filtered |= (1 << type);
  1677. nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
  1678. }
  1679. /*
  1680. * case 3. ok (not filtered)
  1681. * add period to hists and parents, erase the filter marker
  1682. * and move to next sibling
  1683. */
  1684. else {
  1685. hists__remove_entry_filter(hists, h, type);
  1686. nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
  1687. }
  1688. }
  1689. hierarchy_recalc_total_periods(hists);
  1690. /*
  1691. * resort output after applying a new filter since filter in a lower
  1692. * hierarchy can change periods in a upper hierarchy.
  1693. */
  1694. nd = rb_first(&hists->entries);
  1695. while (nd) {
  1696. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  1697. nd = rb_next(nd);
  1698. rb_erase(&h->rb_node, &hists->entries);
  1699. resort_filtered_entry(&new_root, h);
  1700. }
  1701. hists->entries = new_root;
  1702. }
  1703. void hists__filter_by_thread(struct hists *hists)
  1704. {
  1705. if (symbol_conf.report_hierarchy)
  1706. hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
  1707. hists->thread_filter);
  1708. else
  1709. hists__filter_by_type(hists, HIST_FILTER__THREAD,
  1710. hists__filter_entry_by_thread);
  1711. }
  1712. void hists__filter_by_dso(struct hists *hists)
  1713. {
  1714. if (symbol_conf.report_hierarchy)
  1715. hists__filter_hierarchy(hists, HIST_FILTER__DSO,
  1716. hists->dso_filter);
  1717. else
  1718. hists__filter_by_type(hists, HIST_FILTER__DSO,
  1719. hists__filter_entry_by_dso);
  1720. }
  1721. void hists__filter_by_symbol(struct hists *hists)
  1722. {
  1723. if (symbol_conf.report_hierarchy)
  1724. hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
  1725. hists->symbol_filter_str);
  1726. else
  1727. hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
  1728. hists__filter_entry_by_symbol);
  1729. }
  1730. void hists__filter_by_socket(struct hists *hists)
  1731. {
  1732. if (symbol_conf.report_hierarchy)
  1733. hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
  1734. &hists->socket_filter);
  1735. else
  1736. hists__filter_by_type(hists, HIST_FILTER__SOCKET,
  1737. hists__filter_entry_by_socket);
  1738. }
  1739. void events_stats__inc(struct events_stats *stats, u32 type)
  1740. {
  1741. ++stats->nr_events[0];
  1742. ++stats->nr_events[type];
  1743. }
  1744. void hists__inc_nr_events(struct hists *hists, u32 type)
  1745. {
  1746. events_stats__inc(&hists->stats, type);
  1747. }
  1748. void hists__inc_nr_samples(struct hists *hists, bool filtered)
  1749. {
  1750. events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
  1751. if (!filtered)
  1752. hists->stats.nr_non_filtered_samples++;
  1753. }
  1754. static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
  1755. struct hist_entry *pair)
  1756. {
  1757. struct rb_root *root;
  1758. struct rb_node **p;
  1759. struct rb_node *parent = NULL;
  1760. struct hist_entry *he;
  1761. int64_t cmp;
  1762. if (hists__has(hists, need_collapse))
  1763. root = &hists->entries_collapsed;
  1764. else
  1765. root = hists->entries_in;
  1766. p = &root->rb_node;
  1767. while (*p != NULL) {
  1768. parent = *p;
  1769. he = rb_entry(parent, struct hist_entry, rb_node_in);
  1770. cmp = hist_entry__collapse(he, pair);
  1771. if (!cmp)
  1772. goto out;
  1773. if (cmp < 0)
  1774. p = &(*p)->rb_left;
  1775. else
  1776. p = &(*p)->rb_right;
  1777. }
  1778. he = hist_entry__new(pair, true);
  1779. if (he) {
  1780. memset(&he->stat, 0, sizeof(he->stat));
  1781. he->hists = hists;
  1782. if (symbol_conf.cumulate_callchain)
  1783. memset(he->stat_acc, 0, sizeof(he->stat));
  1784. rb_link_node(&he->rb_node_in, parent, p);
  1785. rb_insert_color(&he->rb_node_in, root);
  1786. hists__inc_stats(hists, he);
  1787. he->dummy = true;
  1788. }
  1789. out:
  1790. return he;
  1791. }
  1792. static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
  1793. struct rb_root *root,
  1794. struct hist_entry *pair)
  1795. {
  1796. struct rb_node **p;
  1797. struct rb_node *parent = NULL;
  1798. struct hist_entry *he;
  1799. struct perf_hpp_fmt *fmt;
  1800. p = &root->rb_node;
  1801. while (*p != NULL) {
  1802. int64_t cmp = 0;
  1803. parent = *p;
  1804. he = rb_entry(parent, struct hist_entry, rb_node_in);
  1805. perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
  1806. cmp = fmt->collapse(fmt, he, pair);
  1807. if (cmp)
  1808. break;
  1809. }
  1810. if (!cmp)
  1811. goto out;
  1812. if (cmp < 0)
  1813. p = &parent->rb_left;
  1814. else
  1815. p = &parent->rb_right;
  1816. }
  1817. he = hist_entry__new(pair, true);
  1818. if (he) {
  1819. rb_link_node(&he->rb_node_in, parent, p);
  1820. rb_insert_color(&he->rb_node_in, root);
  1821. he->dummy = true;
  1822. he->hists = hists;
  1823. memset(&he->stat, 0, sizeof(he->stat));
  1824. hists__inc_stats(hists, he);
  1825. }
  1826. out:
  1827. return he;
  1828. }
  1829. static struct hist_entry *hists__find_entry(struct hists *hists,
  1830. struct hist_entry *he)
  1831. {
  1832. struct rb_node *n;
  1833. if (hists__has(hists, need_collapse))
  1834. n = hists->entries_collapsed.rb_node;
  1835. else
  1836. n = hists->entries_in->rb_node;
  1837. while (n) {
  1838. struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
  1839. int64_t cmp = hist_entry__collapse(iter, he);
  1840. if (cmp < 0)
  1841. n = n->rb_left;
  1842. else if (cmp > 0)
  1843. n = n->rb_right;
  1844. else
  1845. return iter;
  1846. }
  1847. return NULL;
  1848. }
  1849. static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
  1850. struct hist_entry *he)
  1851. {
  1852. struct rb_node *n = root->rb_node;
  1853. while (n) {
  1854. struct hist_entry *iter;
  1855. struct perf_hpp_fmt *fmt;
  1856. int64_t cmp = 0;
  1857. iter = rb_entry(n, struct hist_entry, rb_node_in);
  1858. perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
  1859. cmp = fmt->collapse(fmt, iter, he);
  1860. if (cmp)
  1861. break;
  1862. }
  1863. if (cmp < 0)
  1864. n = n->rb_left;
  1865. else if (cmp > 0)
  1866. n = n->rb_right;
  1867. else
  1868. return iter;
  1869. }
  1870. return NULL;
  1871. }
  1872. static void hists__match_hierarchy(struct rb_root *leader_root,
  1873. struct rb_root *other_root)
  1874. {
  1875. struct rb_node *nd;
  1876. struct hist_entry *pos, *pair;
  1877. for (nd = rb_first(leader_root); nd; nd = rb_next(nd)) {
  1878. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  1879. pair = hists__find_hierarchy_entry(other_root, pos);
  1880. if (pair) {
  1881. hist_entry__add_pair(pair, pos);
  1882. hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
  1883. }
  1884. }
  1885. }
  1886. /*
  1887. * Look for pairs to link to the leader buckets (hist_entries):
  1888. */
  1889. void hists__match(struct hists *leader, struct hists *other)
  1890. {
  1891. struct rb_root *root;
  1892. struct rb_node *nd;
  1893. struct hist_entry *pos, *pair;
  1894. if (symbol_conf.report_hierarchy) {
  1895. /* hierarchy report always collapses entries */
  1896. return hists__match_hierarchy(&leader->entries_collapsed,
  1897. &other->entries_collapsed);
  1898. }
  1899. if (hists__has(leader, need_collapse))
  1900. root = &leader->entries_collapsed;
  1901. else
  1902. root = leader->entries_in;
  1903. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  1904. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  1905. pair = hists__find_entry(other, pos);
  1906. if (pair)
  1907. hist_entry__add_pair(pair, pos);
  1908. }
  1909. }
  1910. static int hists__link_hierarchy(struct hists *leader_hists,
  1911. struct hist_entry *parent,
  1912. struct rb_root *leader_root,
  1913. struct rb_root *other_root)
  1914. {
  1915. struct rb_node *nd;
  1916. struct hist_entry *pos, *leader;
  1917. for (nd = rb_first(other_root); nd; nd = rb_next(nd)) {
  1918. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  1919. if (hist_entry__has_pairs(pos)) {
  1920. bool found = false;
  1921. list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
  1922. if (leader->hists == leader_hists) {
  1923. found = true;
  1924. break;
  1925. }
  1926. }
  1927. if (!found)
  1928. return -1;
  1929. } else {
  1930. leader = add_dummy_hierarchy_entry(leader_hists,
  1931. leader_root, pos);
  1932. if (leader == NULL)
  1933. return -1;
  1934. /* do not point parent in the pos */
  1935. leader->parent_he = parent;
  1936. hist_entry__add_pair(pos, leader);
  1937. }
  1938. if (!pos->leaf) {
  1939. if (hists__link_hierarchy(leader_hists, leader,
  1940. &leader->hroot_in,
  1941. &pos->hroot_in) < 0)
  1942. return -1;
  1943. }
  1944. }
  1945. return 0;
  1946. }
  1947. /*
  1948. * Look for entries in the other hists that are not present in the leader, if
  1949. * we find them, just add a dummy entry on the leader hists, with period=0,
  1950. * nr_events=0, to serve as the list header.
  1951. */
  1952. int hists__link(struct hists *leader, struct hists *other)
  1953. {
  1954. struct rb_root *root;
  1955. struct rb_node *nd;
  1956. struct hist_entry *pos, *pair;
  1957. if (symbol_conf.report_hierarchy) {
  1958. /* hierarchy report always collapses entries */
  1959. return hists__link_hierarchy(leader, NULL,
  1960. &leader->entries_collapsed,
  1961. &other->entries_collapsed);
  1962. }
  1963. if (hists__has(other, need_collapse))
  1964. root = &other->entries_collapsed;
  1965. else
  1966. root = other->entries_in;
  1967. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  1968. pos = rb_entry(nd, struct hist_entry, rb_node_in);
  1969. if (!hist_entry__has_pairs(pos)) {
  1970. pair = hists__add_dummy_entry(leader, pos);
  1971. if (pair == NULL)
  1972. return -1;
  1973. hist_entry__add_pair(pos, pair);
  1974. }
  1975. }
  1976. return 0;
  1977. }
  1978. void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
  1979. struct perf_sample *sample, bool nonany_branch_mode)
  1980. {
  1981. struct branch_info *bi;
  1982. /* If we have branch cycles always annotate them. */
  1983. if (bs && bs->nr && bs->entries[0].flags.cycles) {
  1984. int i;
  1985. bi = sample__resolve_bstack(sample, al);
  1986. if (bi) {
  1987. struct addr_map_symbol *prev = NULL;
  1988. /*
  1989. * Ignore errors, still want to process the
  1990. * other entries.
  1991. *
  1992. * For non standard branch modes always
  1993. * force no IPC (prev == NULL)
  1994. *
  1995. * Note that perf stores branches reversed from
  1996. * program order!
  1997. */
  1998. for (i = bs->nr - 1; i >= 0; i--) {
  1999. addr_map_symbol__account_cycles(&bi[i].from,
  2000. nonany_branch_mode ? NULL : prev,
  2001. bi[i].flags.cycles);
  2002. prev = &bi[i].to;
  2003. }
  2004. free(bi);
  2005. }
  2006. }
  2007. }
  2008. size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
  2009. {
  2010. struct perf_evsel *pos;
  2011. size_t ret = 0;
  2012. evlist__for_each_entry(evlist, pos) {
  2013. ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
  2014. ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
  2015. }
  2016. return ret;
  2017. }
  2018. u64 hists__total_period(struct hists *hists)
  2019. {
  2020. return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
  2021. hists->stats.total_period;
  2022. }
  2023. int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
  2024. {
  2025. char unit;
  2026. int printed;
  2027. const struct dso *dso = hists->dso_filter;
  2028. const struct thread *thread = hists->thread_filter;
  2029. int socket_id = hists->socket_filter;
  2030. unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
  2031. u64 nr_events = hists->stats.total_period;
  2032. struct perf_evsel *evsel = hists_to_evsel(hists);
  2033. const char *ev_name = perf_evsel__name(evsel);
  2034. char buf[512], sample_freq_str[64] = "";
  2035. size_t buflen = sizeof(buf);
  2036. char ref[30] = " show reference callgraph, ";
  2037. bool enable_ref = false;
  2038. if (symbol_conf.filter_relative) {
  2039. nr_samples = hists->stats.nr_non_filtered_samples;
  2040. nr_events = hists->stats.total_non_filtered_period;
  2041. }
  2042. if (perf_evsel__is_group_event(evsel)) {
  2043. struct perf_evsel *pos;
  2044. perf_evsel__group_desc(evsel, buf, buflen);
  2045. ev_name = buf;
  2046. for_each_group_member(pos, evsel) {
  2047. struct hists *pos_hists = evsel__hists(pos);
  2048. if (symbol_conf.filter_relative) {
  2049. nr_samples += pos_hists->stats.nr_non_filtered_samples;
  2050. nr_events += pos_hists->stats.total_non_filtered_period;
  2051. } else {
  2052. nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
  2053. nr_events += pos_hists->stats.total_period;
  2054. }
  2055. }
  2056. }
  2057. if (symbol_conf.show_ref_callgraph &&
  2058. strstr(ev_name, "call-graph=no"))
  2059. enable_ref = true;
  2060. if (show_freq)
  2061. scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->attr.sample_freq);
  2062. nr_samples = convert_unit(nr_samples, &unit);
  2063. printed = scnprintf(bf, size,
  2064. "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
  2065. nr_samples, unit, evsel->nr_members > 1 ? "s" : "",
  2066. ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
  2067. if (hists->uid_filter_str)
  2068. printed += snprintf(bf + printed, size - printed,
  2069. ", UID: %s", hists->uid_filter_str);
  2070. if (thread) {
  2071. if (hists__has(hists, thread)) {
  2072. printed += scnprintf(bf + printed, size - printed,
  2073. ", Thread: %s(%d)",
  2074. (thread->comm_set ? thread__comm_str(thread) : ""),
  2075. thread->tid);
  2076. } else {
  2077. printed += scnprintf(bf + printed, size - printed,
  2078. ", Thread: %s",
  2079. (thread->comm_set ? thread__comm_str(thread) : ""));
  2080. }
  2081. }
  2082. if (dso)
  2083. printed += scnprintf(bf + printed, size - printed,
  2084. ", DSO: %s", dso->short_name);
  2085. if (socket_id > -1)
  2086. printed += scnprintf(bf + printed, size - printed,
  2087. ", Processor Socket: %d", socket_id);
  2088. return printed;
  2089. }
  2090. int parse_filter_percentage(const struct option *opt __maybe_unused,
  2091. const char *arg, int unset __maybe_unused)
  2092. {
  2093. if (!strcmp(arg, "relative"))
  2094. symbol_conf.filter_relative = true;
  2095. else if (!strcmp(arg, "absolute"))
  2096. symbol_conf.filter_relative = false;
  2097. else {
  2098. pr_debug("Invalid percentage: %s\n", arg);
  2099. return -1;
  2100. }
  2101. return 0;
  2102. }
  2103. int perf_hist_config(const char *var, const char *value)
  2104. {
  2105. if (!strcmp(var, "hist.percentage"))
  2106. return parse_filter_percentage(NULL, value, 0);
  2107. return 0;
  2108. }
  2109. int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
  2110. {
  2111. memset(hists, 0, sizeof(*hists));
  2112. hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
  2113. hists->entries_in = &hists->entries_in_array[0];
  2114. hists->entries_collapsed = RB_ROOT;
  2115. hists->entries = RB_ROOT;
  2116. pthread_mutex_init(&hists->lock, NULL);
  2117. hists->socket_filter = -1;
  2118. hists->hpp_list = hpp_list;
  2119. INIT_LIST_HEAD(&hists->hpp_formats);
  2120. return 0;
  2121. }
  2122. static void hists__delete_remaining_entries(struct rb_root *root)
  2123. {
  2124. struct rb_node *node;
  2125. struct hist_entry *he;
  2126. while (!RB_EMPTY_ROOT(root)) {
  2127. node = rb_first(root);
  2128. rb_erase(node, root);
  2129. he = rb_entry(node, struct hist_entry, rb_node_in);
  2130. hist_entry__delete(he);
  2131. }
  2132. }
  2133. static void hists__delete_all_entries(struct hists *hists)
  2134. {
  2135. hists__delete_entries(hists);
  2136. hists__delete_remaining_entries(&hists->entries_in_array[0]);
  2137. hists__delete_remaining_entries(&hists->entries_in_array[1]);
  2138. hists__delete_remaining_entries(&hists->entries_collapsed);
  2139. }
  2140. static void hists_evsel__exit(struct perf_evsel *evsel)
  2141. {
  2142. struct hists *hists = evsel__hists(evsel);
  2143. struct perf_hpp_fmt *fmt, *pos;
  2144. struct perf_hpp_list_node *node, *tmp;
  2145. hists__delete_all_entries(hists);
  2146. list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
  2147. perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
  2148. list_del(&fmt->list);
  2149. free(fmt);
  2150. }
  2151. list_del(&node->list);
  2152. free(node);
  2153. }
  2154. }
  2155. static int hists_evsel__init(struct perf_evsel *evsel)
  2156. {
  2157. struct hists *hists = evsel__hists(evsel);
  2158. __hists__init(hists, &perf_hpp_list);
  2159. return 0;
  2160. }
  2161. /*
  2162. * XXX We probably need a hists_evsel__exit() to free the hist_entries
  2163. * stored in the rbtree...
  2164. */
  2165. int hists__init(void)
  2166. {
  2167. int err = perf_evsel__object_config(sizeof(struct hists_evsel),
  2168. hists_evsel__init,
  2169. hists_evsel__exit);
  2170. if (err)
  2171. fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
  2172. return err;
  2173. }
  2174. void perf_hpp_list__init(struct perf_hpp_list *list)
  2175. {
  2176. INIT_LIST_HEAD(&list->fields);
  2177. INIT_LIST_HEAD(&list->sorts);
  2178. }