sort.c 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <errno.h>
  3. #include <inttypes.h>
  4. #include <regex.h>
  5. #include <linux/mman.h>
  6. #include "sort.h"
  7. #include "hist.h"
  8. #include "comm.h"
  9. #include "symbol.h"
  10. #include "thread.h"
  11. #include "evsel.h"
  12. #include "evlist.h"
  13. #include "strlist.h"
  14. #include <traceevent/event-parse.h>
  15. #include "mem-events.h"
  16. #include <linux/kernel.h>
  17. regex_t parent_regex;
  18. const char default_parent_pattern[] = "^sys_|^do_page_fault";
  19. const char *parent_pattern = default_parent_pattern;
  20. const char *default_sort_order = "comm,dso,symbol";
  21. const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
  22. const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
  23. const char default_top_sort_order[] = "dso,symbol";
  24. const char default_diff_sort_order[] = "dso,symbol";
  25. const char default_tracepoint_sort_order[] = "trace";
  26. const char *sort_order;
  27. const char *field_order;
  28. regex_t ignore_callees_regex;
  29. int have_ignore_callees = 0;
  30. enum sort_mode sort__mode = SORT_MODE__NORMAL;
  31. /*
  32. * Replaces all occurrences of a char used with the:
  33. *
  34. * -t, --field-separator
  35. *
  36. * option, that uses a special separator character and don't pad with spaces,
  37. * replacing all occurances of this separator in symbol names (and other
  38. * output) with a '.' character, that thus it's the only non valid separator.
  39. */
  40. static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
  41. {
  42. int n;
  43. va_list ap;
  44. va_start(ap, fmt);
  45. n = vsnprintf(bf, size, fmt, ap);
  46. if (symbol_conf.field_sep && n > 0) {
  47. char *sep = bf;
  48. while (1) {
  49. sep = strchr(sep, *symbol_conf.field_sep);
  50. if (sep == NULL)
  51. break;
  52. *sep = '.';
  53. }
  54. }
  55. va_end(ap);
  56. if (n >= (int)size)
  57. return size - 1;
  58. return n;
  59. }
  60. static int64_t cmp_null(const void *l, const void *r)
  61. {
  62. if (!l && !r)
  63. return 0;
  64. else if (!l)
  65. return -1;
  66. else
  67. return 1;
  68. }
  69. /* --sort pid */
  70. static int64_t
  71. sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
  72. {
  73. return right->thread->tid - left->thread->tid;
  74. }
  75. static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
  76. size_t size, unsigned int width)
  77. {
  78. const char *comm = thread__comm_str(he->thread);
  79. width = max(7U, width) - 8;
  80. return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
  81. width, width, comm ?: "");
  82. }
  83. static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
  84. {
  85. const struct thread *th = arg;
  86. if (type != HIST_FILTER__THREAD)
  87. return -1;
  88. return th && he->thread != th;
  89. }
  90. struct sort_entry sort_thread = {
  91. .se_header = " Pid:Command",
  92. .se_cmp = sort__thread_cmp,
  93. .se_snprintf = hist_entry__thread_snprintf,
  94. .se_filter = hist_entry__thread_filter,
  95. .se_width_idx = HISTC_THREAD,
  96. };
  97. /* --sort comm */
  98. /*
  99. * We can't use pointer comparison in functions below,
  100. * because it gives different results based on pointer
  101. * values, which could break some sorting assumptions.
  102. */
  103. static int64_t
  104. sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
  105. {
  106. return strcmp(comm__str(right->comm), comm__str(left->comm));
  107. }
  108. static int64_t
  109. sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
  110. {
  111. return strcmp(comm__str(right->comm), comm__str(left->comm));
  112. }
  113. static int64_t
  114. sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
  115. {
  116. return strcmp(comm__str(right->comm), comm__str(left->comm));
  117. }
  118. static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
  119. size_t size, unsigned int width)
  120. {
  121. return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
  122. }
  123. struct sort_entry sort_comm = {
  124. .se_header = "Command",
  125. .se_cmp = sort__comm_cmp,
  126. .se_collapse = sort__comm_collapse,
  127. .se_sort = sort__comm_sort,
  128. .se_snprintf = hist_entry__comm_snprintf,
  129. .se_filter = hist_entry__thread_filter,
  130. .se_width_idx = HISTC_COMM,
  131. };
  132. /* --sort dso */
  133. static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
  134. {
  135. struct dso *dso_l = map_l ? map_l->dso : NULL;
  136. struct dso *dso_r = map_r ? map_r->dso : NULL;
  137. const char *dso_name_l, *dso_name_r;
  138. if (!dso_l || !dso_r)
  139. return cmp_null(dso_r, dso_l);
  140. if (verbose > 0) {
  141. dso_name_l = dso_l->long_name;
  142. dso_name_r = dso_r->long_name;
  143. } else {
  144. dso_name_l = dso_l->short_name;
  145. dso_name_r = dso_r->short_name;
  146. }
  147. return strcmp(dso_name_l, dso_name_r);
  148. }
  149. static int64_t
  150. sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
  151. {
  152. return _sort__dso_cmp(right->ms.map, left->ms.map);
  153. }
  154. static int _hist_entry__dso_snprintf(struct map *map, char *bf,
  155. size_t size, unsigned int width)
  156. {
  157. if (map && map->dso) {
  158. const char *dso_name = verbose > 0 ? map->dso->long_name :
  159. map->dso->short_name;
  160. return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
  161. }
  162. return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
  163. }
  164. static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
  165. size_t size, unsigned int width)
  166. {
  167. return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
  168. }
  169. static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
  170. {
  171. const struct dso *dso = arg;
  172. if (type != HIST_FILTER__DSO)
  173. return -1;
  174. return dso && (!he->ms.map || he->ms.map->dso != dso);
  175. }
  176. struct sort_entry sort_dso = {
  177. .se_header = "Shared Object",
  178. .se_cmp = sort__dso_cmp,
  179. .se_snprintf = hist_entry__dso_snprintf,
  180. .se_filter = hist_entry__dso_filter,
  181. .se_width_idx = HISTC_DSO,
  182. };
  183. /* --sort symbol */
  184. static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
  185. {
  186. return (int64_t)(right_ip - left_ip);
  187. }
  188. static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
  189. {
  190. if (!sym_l || !sym_r)
  191. return cmp_null(sym_l, sym_r);
  192. if (sym_l == sym_r)
  193. return 0;
  194. if (sym_l->inlined || sym_r->inlined)
  195. return strcmp(sym_l->name, sym_r->name);
  196. if (sym_l->start != sym_r->start)
  197. return (int64_t)(sym_r->start - sym_l->start);
  198. return (int64_t)(sym_r->end - sym_l->end);
  199. }
  200. static int64_t
  201. sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
  202. {
  203. int64_t ret;
  204. if (!left->ms.sym && !right->ms.sym)
  205. return _sort__addr_cmp(left->ip, right->ip);
  206. /*
  207. * comparing symbol address alone is not enough since it's a
  208. * relative address within a dso.
  209. */
  210. if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
  211. ret = sort__dso_cmp(left, right);
  212. if (ret != 0)
  213. return ret;
  214. }
  215. return _sort__sym_cmp(left->ms.sym, right->ms.sym);
  216. }
  217. static int64_t
  218. sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
  219. {
  220. if (!left->ms.sym || !right->ms.sym)
  221. return cmp_null(left->ms.sym, right->ms.sym);
  222. return strcmp(right->ms.sym->name, left->ms.sym->name);
  223. }
  224. static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
  225. u64 ip, char level, char *bf, size_t size,
  226. unsigned int width)
  227. {
  228. size_t ret = 0;
  229. if (verbose > 0) {
  230. char o = map ? dso__symtab_origin(map->dso) : '!';
  231. ret += repsep_snprintf(bf, size, "%-#*llx %c ",
  232. BITS_PER_LONG / 4 + 2, ip, o);
  233. }
  234. ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
  235. if (sym && map) {
  236. if (sym->type == STT_OBJECT) {
  237. ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
  238. ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
  239. ip - map->unmap_ip(map, sym->start));
  240. } else {
  241. ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
  242. width - ret,
  243. sym->name);
  244. if (sym->inlined)
  245. ret += repsep_snprintf(bf + ret, size - ret,
  246. " (inlined)");
  247. }
  248. } else {
  249. size_t len = BITS_PER_LONG / 4;
  250. ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
  251. len, ip);
  252. }
  253. return ret;
  254. }
  255. static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
  256. size_t size, unsigned int width)
  257. {
  258. return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
  259. he->level, bf, size, width);
  260. }
  261. static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
  262. {
  263. const char *sym = arg;
  264. if (type != HIST_FILTER__SYMBOL)
  265. return -1;
  266. return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
  267. }
  268. struct sort_entry sort_sym = {
  269. .se_header = "Symbol",
  270. .se_cmp = sort__sym_cmp,
  271. .se_sort = sort__sym_sort,
  272. .se_snprintf = hist_entry__sym_snprintf,
  273. .se_filter = hist_entry__sym_filter,
  274. .se_width_idx = HISTC_SYMBOL,
  275. };
  276. /* --sort srcline */
  277. char *hist_entry__srcline(struct hist_entry *he)
  278. {
  279. return map__srcline(he->ms.map, he->ip, he->ms.sym);
  280. }
  281. static int64_t
  282. sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
  283. {
  284. if (!left->srcline)
  285. left->srcline = hist_entry__srcline(left);
  286. if (!right->srcline)
  287. right->srcline = hist_entry__srcline(right);
  288. return strcmp(right->srcline, left->srcline);
  289. }
  290. static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
  291. size_t size, unsigned int width)
  292. {
  293. if (!he->srcline)
  294. he->srcline = hist_entry__srcline(he);
  295. return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
  296. }
  297. struct sort_entry sort_srcline = {
  298. .se_header = "Source:Line",
  299. .se_cmp = sort__srcline_cmp,
  300. .se_snprintf = hist_entry__srcline_snprintf,
  301. .se_width_idx = HISTC_SRCLINE,
  302. };
  303. /* --sort srcline_from */
  304. static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
  305. {
  306. return map__srcline(ams->map, ams->al_addr, ams->sym);
  307. }
  308. static int64_t
  309. sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
  310. {
  311. if (!left->branch_info->srcline_from)
  312. left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
  313. if (!right->branch_info->srcline_from)
  314. right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
  315. return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
  316. }
  317. static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
  318. size_t size, unsigned int width)
  319. {
  320. return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
  321. }
  322. struct sort_entry sort_srcline_from = {
  323. .se_header = "From Source:Line",
  324. .se_cmp = sort__srcline_from_cmp,
  325. .se_snprintf = hist_entry__srcline_from_snprintf,
  326. .se_width_idx = HISTC_SRCLINE_FROM,
  327. };
  328. /* --sort srcline_to */
  329. static int64_t
  330. sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
  331. {
  332. if (!left->branch_info->srcline_to)
  333. left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
  334. if (!right->branch_info->srcline_to)
  335. right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
  336. return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
  337. }
  338. static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
  339. size_t size, unsigned int width)
  340. {
  341. return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
  342. }
  343. struct sort_entry sort_srcline_to = {
  344. .se_header = "To Source:Line",
  345. .se_cmp = sort__srcline_to_cmp,
  346. .se_snprintf = hist_entry__srcline_to_snprintf,
  347. .se_width_idx = HISTC_SRCLINE_TO,
  348. };
  349. /* --sort srcfile */
  350. static char no_srcfile[1];
  351. static char *hist_entry__get_srcfile(struct hist_entry *e)
  352. {
  353. char *sf, *p;
  354. struct map *map = e->ms.map;
  355. if (!map)
  356. return no_srcfile;
  357. sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
  358. e->ms.sym, false, true, true, e->ip);
  359. if (!strcmp(sf, SRCLINE_UNKNOWN))
  360. return no_srcfile;
  361. p = strchr(sf, ':');
  362. if (p && *sf) {
  363. *p = 0;
  364. return sf;
  365. }
  366. free(sf);
  367. return no_srcfile;
  368. }
  369. static int64_t
  370. sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
  371. {
  372. if (!left->srcfile)
  373. left->srcfile = hist_entry__get_srcfile(left);
  374. if (!right->srcfile)
  375. right->srcfile = hist_entry__get_srcfile(right);
  376. return strcmp(right->srcfile, left->srcfile);
  377. }
  378. static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
  379. size_t size, unsigned int width)
  380. {
  381. if (!he->srcfile)
  382. he->srcfile = hist_entry__get_srcfile(he);
  383. return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
  384. }
  385. struct sort_entry sort_srcfile = {
  386. .se_header = "Source File",
  387. .se_cmp = sort__srcfile_cmp,
  388. .se_snprintf = hist_entry__srcfile_snprintf,
  389. .se_width_idx = HISTC_SRCFILE,
  390. };
  391. /* --sort parent */
  392. static int64_t
  393. sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
  394. {
  395. struct symbol *sym_l = left->parent;
  396. struct symbol *sym_r = right->parent;
  397. if (!sym_l || !sym_r)
  398. return cmp_null(sym_l, sym_r);
  399. return strcmp(sym_r->name, sym_l->name);
  400. }
  401. static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
  402. size_t size, unsigned int width)
  403. {
  404. return repsep_snprintf(bf, size, "%-*.*s", width, width,
  405. he->parent ? he->parent->name : "[other]");
  406. }
  407. struct sort_entry sort_parent = {
  408. .se_header = "Parent symbol",
  409. .se_cmp = sort__parent_cmp,
  410. .se_snprintf = hist_entry__parent_snprintf,
  411. .se_width_idx = HISTC_PARENT,
  412. };
  413. /* --sort cpu */
  414. static int64_t
  415. sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
  416. {
  417. return right->cpu - left->cpu;
  418. }
  419. static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
  420. size_t size, unsigned int width)
  421. {
  422. return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
  423. }
  424. struct sort_entry sort_cpu = {
  425. .se_header = "CPU",
  426. .se_cmp = sort__cpu_cmp,
  427. .se_snprintf = hist_entry__cpu_snprintf,
  428. .se_width_idx = HISTC_CPU,
  429. };
  430. /* --sort cgroup_id */
  431. static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
  432. {
  433. return (int64_t)(right_dev - left_dev);
  434. }
  435. static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
  436. {
  437. return (int64_t)(right_ino - left_ino);
  438. }
  439. static int64_t
  440. sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
  441. {
  442. int64_t ret;
  443. ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
  444. if (ret != 0)
  445. return ret;
  446. return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
  447. left->cgroup_id.ino);
  448. }
  449. static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
  450. char *bf, size_t size,
  451. unsigned int width __maybe_unused)
  452. {
  453. return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
  454. he->cgroup_id.ino);
  455. }
  456. struct sort_entry sort_cgroup_id = {
  457. .se_header = "cgroup id (dev/inode)",
  458. .se_cmp = sort__cgroup_id_cmp,
  459. .se_snprintf = hist_entry__cgroup_id_snprintf,
  460. .se_width_idx = HISTC_CGROUP_ID,
  461. };
  462. /* --sort socket */
  463. static int64_t
  464. sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
  465. {
  466. return right->socket - left->socket;
  467. }
  468. static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
  469. size_t size, unsigned int width)
  470. {
  471. return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
  472. }
  473. static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
  474. {
  475. int sk = *(const int *)arg;
  476. if (type != HIST_FILTER__SOCKET)
  477. return -1;
  478. return sk >= 0 && he->socket != sk;
  479. }
  480. struct sort_entry sort_socket = {
  481. .se_header = "Socket",
  482. .se_cmp = sort__socket_cmp,
  483. .se_snprintf = hist_entry__socket_snprintf,
  484. .se_filter = hist_entry__socket_filter,
  485. .se_width_idx = HISTC_SOCKET,
  486. };
  487. /* --sort trace */
  488. static char *get_trace_output(struct hist_entry *he)
  489. {
  490. struct trace_seq seq;
  491. struct perf_evsel *evsel;
  492. struct tep_record rec = {
  493. .data = he->raw_data,
  494. .size = he->raw_size,
  495. };
  496. evsel = hists_to_evsel(he->hists);
  497. trace_seq_init(&seq);
  498. if (symbol_conf.raw_trace) {
  499. tep_print_fields(&seq, he->raw_data, he->raw_size,
  500. evsel->tp_format);
  501. } else {
  502. tep_event_info(&seq, evsel->tp_format, &rec);
  503. }
  504. /*
  505. * Trim the buffer, it starts at 4KB and we're not going to
  506. * add anything more to this buffer.
  507. */
  508. return realloc(seq.buffer, seq.len + 1);
  509. }
  510. static int64_t
  511. sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
  512. {
  513. struct perf_evsel *evsel;
  514. evsel = hists_to_evsel(left->hists);
  515. if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
  516. return 0;
  517. if (left->trace_output == NULL)
  518. left->trace_output = get_trace_output(left);
  519. if (right->trace_output == NULL)
  520. right->trace_output = get_trace_output(right);
  521. return strcmp(right->trace_output, left->trace_output);
  522. }
  523. static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
  524. size_t size, unsigned int width)
  525. {
  526. struct perf_evsel *evsel;
  527. evsel = hists_to_evsel(he->hists);
  528. if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
  529. return scnprintf(bf, size, "%-.*s", width, "N/A");
  530. if (he->trace_output == NULL)
  531. he->trace_output = get_trace_output(he);
  532. return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
  533. }
  534. struct sort_entry sort_trace = {
  535. .se_header = "Trace output",
  536. .se_cmp = sort__trace_cmp,
  537. .se_snprintf = hist_entry__trace_snprintf,
  538. .se_width_idx = HISTC_TRACE,
  539. };
  540. /* sort keys for branch stacks */
  541. static int64_t
  542. sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
  543. {
  544. if (!left->branch_info || !right->branch_info)
  545. return cmp_null(left->branch_info, right->branch_info);
  546. return _sort__dso_cmp(left->branch_info->from.map,
  547. right->branch_info->from.map);
  548. }
  549. static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
  550. size_t size, unsigned int width)
  551. {
  552. if (he->branch_info)
  553. return _hist_entry__dso_snprintf(he->branch_info->from.map,
  554. bf, size, width);
  555. else
  556. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  557. }
  558. static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
  559. const void *arg)
  560. {
  561. const struct dso *dso = arg;
  562. if (type != HIST_FILTER__DSO)
  563. return -1;
  564. return dso && (!he->branch_info || !he->branch_info->from.map ||
  565. he->branch_info->from.map->dso != dso);
  566. }
  567. static int64_t
  568. sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
  569. {
  570. if (!left->branch_info || !right->branch_info)
  571. return cmp_null(left->branch_info, right->branch_info);
  572. return _sort__dso_cmp(left->branch_info->to.map,
  573. right->branch_info->to.map);
  574. }
  575. static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
  576. size_t size, unsigned int width)
  577. {
  578. if (he->branch_info)
  579. return _hist_entry__dso_snprintf(he->branch_info->to.map,
  580. bf, size, width);
  581. else
  582. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  583. }
  584. static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
  585. const void *arg)
  586. {
  587. const struct dso *dso = arg;
  588. if (type != HIST_FILTER__DSO)
  589. return -1;
  590. return dso && (!he->branch_info || !he->branch_info->to.map ||
  591. he->branch_info->to.map->dso != dso);
  592. }
  593. static int64_t
  594. sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
  595. {
  596. struct addr_map_symbol *from_l = &left->branch_info->from;
  597. struct addr_map_symbol *from_r = &right->branch_info->from;
  598. if (!left->branch_info || !right->branch_info)
  599. return cmp_null(left->branch_info, right->branch_info);
  600. from_l = &left->branch_info->from;
  601. from_r = &right->branch_info->from;
  602. if (!from_l->sym && !from_r->sym)
  603. return _sort__addr_cmp(from_l->addr, from_r->addr);
  604. return _sort__sym_cmp(from_l->sym, from_r->sym);
  605. }
  606. static int64_t
  607. sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
  608. {
  609. struct addr_map_symbol *to_l, *to_r;
  610. if (!left->branch_info || !right->branch_info)
  611. return cmp_null(left->branch_info, right->branch_info);
  612. to_l = &left->branch_info->to;
  613. to_r = &right->branch_info->to;
  614. if (!to_l->sym && !to_r->sym)
  615. return _sort__addr_cmp(to_l->addr, to_r->addr);
  616. return _sort__sym_cmp(to_l->sym, to_r->sym);
  617. }
  618. static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
  619. size_t size, unsigned int width)
  620. {
  621. if (he->branch_info) {
  622. struct addr_map_symbol *from = &he->branch_info->from;
  623. return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
  624. he->level, bf, size, width);
  625. }
  626. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  627. }
  628. static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
  629. size_t size, unsigned int width)
  630. {
  631. if (he->branch_info) {
  632. struct addr_map_symbol *to = &he->branch_info->to;
  633. return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
  634. he->level, bf, size, width);
  635. }
  636. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  637. }
  638. static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
  639. const void *arg)
  640. {
  641. const char *sym = arg;
  642. if (type != HIST_FILTER__SYMBOL)
  643. return -1;
  644. return sym && !(he->branch_info && he->branch_info->from.sym &&
  645. strstr(he->branch_info->from.sym->name, sym));
  646. }
  647. static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
  648. const void *arg)
  649. {
  650. const char *sym = arg;
  651. if (type != HIST_FILTER__SYMBOL)
  652. return -1;
  653. return sym && !(he->branch_info && he->branch_info->to.sym &&
  654. strstr(he->branch_info->to.sym->name, sym));
  655. }
  656. struct sort_entry sort_dso_from = {
  657. .se_header = "Source Shared Object",
  658. .se_cmp = sort__dso_from_cmp,
  659. .se_snprintf = hist_entry__dso_from_snprintf,
  660. .se_filter = hist_entry__dso_from_filter,
  661. .se_width_idx = HISTC_DSO_FROM,
  662. };
  663. struct sort_entry sort_dso_to = {
  664. .se_header = "Target Shared Object",
  665. .se_cmp = sort__dso_to_cmp,
  666. .se_snprintf = hist_entry__dso_to_snprintf,
  667. .se_filter = hist_entry__dso_to_filter,
  668. .se_width_idx = HISTC_DSO_TO,
  669. };
  670. struct sort_entry sort_sym_from = {
  671. .se_header = "Source Symbol",
  672. .se_cmp = sort__sym_from_cmp,
  673. .se_snprintf = hist_entry__sym_from_snprintf,
  674. .se_filter = hist_entry__sym_from_filter,
  675. .se_width_idx = HISTC_SYMBOL_FROM,
  676. };
  677. struct sort_entry sort_sym_to = {
  678. .se_header = "Target Symbol",
  679. .se_cmp = sort__sym_to_cmp,
  680. .se_snprintf = hist_entry__sym_to_snprintf,
  681. .se_filter = hist_entry__sym_to_filter,
  682. .se_width_idx = HISTC_SYMBOL_TO,
  683. };
  684. static int64_t
  685. sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
  686. {
  687. unsigned char mp, p;
  688. if (!left->branch_info || !right->branch_info)
  689. return cmp_null(left->branch_info, right->branch_info);
  690. mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
  691. p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
  692. return mp || p;
  693. }
  694. static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
  695. size_t size, unsigned int width){
  696. static const char *out = "N/A";
  697. if (he->branch_info) {
  698. if (he->branch_info->flags.predicted)
  699. out = "N";
  700. else if (he->branch_info->flags.mispred)
  701. out = "Y";
  702. }
  703. return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
  704. }
  705. static int64_t
  706. sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
  707. {
  708. if (!left->branch_info || !right->branch_info)
  709. return cmp_null(left->branch_info, right->branch_info);
  710. return left->branch_info->flags.cycles -
  711. right->branch_info->flags.cycles;
  712. }
  713. static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
  714. size_t size, unsigned int width)
  715. {
  716. if (!he->branch_info)
  717. return scnprintf(bf, size, "%-.*s", width, "N/A");
  718. if (he->branch_info->flags.cycles == 0)
  719. return repsep_snprintf(bf, size, "%-*s", width, "-");
  720. return repsep_snprintf(bf, size, "%-*hd", width,
  721. he->branch_info->flags.cycles);
  722. }
  723. struct sort_entry sort_cycles = {
  724. .se_header = "Basic Block Cycles",
  725. .se_cmp = sort__cycles_cmp,
  726. .se_snprintf = hist_entry__cycles_snprintf,
  727. .se_width_idx = HISTC_CYCLES,
  728. };
  729. /* --sort daddr_sym */
  730. int64_t
  731. sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
  732. {
  733. uint64_t l = 0, r = 0;
  734. if (left->mem_info)
  735. l = left->mem_info->daddr.addr;
  736. if (right->mem_info)
  737. r = right->mem_info->daddr.addr;
  738. return (int64_t)(r - l);
  739. }
  740. static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
  741. size_t size, unsigned int width)
  742. {
  743. uint64_t addr = 0;
  744. struct map *map = NULL;
  745. struct symbol *sym = NULL;
  746. if (he->mem_info) {
  747. addr = he->mem_info->daddr.addr;
  748. map = he->mem_info->daddr.map;
  749. sym = he->mem_info->daddr.sym;
  750. }
  751. return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
  752. width);
  753. }
  754. int64_t
  755. sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
  756. {
  757. uint64_t l = 0, r = 0;
  758. if (left->mem_info)
  759. l = left->mem_info->iaddr.addr;
  760. if (right->mem_info)
  761. r = right->mem_info->iaddr.addr;
  762. return (int64_t)(r - l);
  763. }
  764. static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
  765. size_t size, unsigned int width)
  766. {
  767. uint64_t addr = 0;
  768. struct map *map = NULL;
  769. struct symbol *sym = NULL;
  770. if (he->mem_info) {
  771. addr = he->mem_info->iaddr.addr;
  772. map = he->mem_info->iaddr.map;
  773. sym = he->mem_info->iaddr.sym;
  774. }
  775. return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
  776. width);
  777. }
  778. static int64_t
  779. sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
  780. {
  781. struct map *map_l = NULL;
  782. struct map *map_r = NULL;
  783. if (left->mem_info)
  784. map_l = left->mem_info->daddr.map;
  785. if (right->mem_info)
  786. map_r = right->mem_info->daddr.map;
  787. return _sort__dso_cmp(map_l, map_r);
  788. }
  789. static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
  790. size_t size, unsigned int width)
  791. {
  792. struct map *map = NULL;
  793. if (he->mem_info)
  794. map = he->mem_info->daddr.map;
  795. return _hist_entry__dso_snprintf(map, bf, size, width);
  796. }
  797. static int64_t
  798. sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
  799. {
  800. union perf_mem_data_src data_src_l;
  801. union perf_mem_data_src data_src_r;
  802. if (left->mem_info)
  803. data_src_l = left->mem_info->data_src;
  804. else
  805. data_src_l.mem_lock = PERF_MEM_LOCK_NA;
  806. if (right->mem_info)
  807. data_src_r = right->mem_info->data_src;
  808. else
  809. data_src_r.mem_lock = PERF_MEM_LOCK_NA;
  810. return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
  811. }
  812. static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
  813. size_t size, unsigned int width)
  814. {
  815. char out[10];
  816. perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
  817. return repsep_snprintf(bf, size, "%.*s", width, out);
  818. }
  819. static int64_t
  820. sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
  821. {
  822. union perf_mem_data_src data_src_l;
  823. union perf_mem_data_src data_src_r;
  824. if (left->mem_info)
  825. data_src_l = left->mem_info->data_src;
  826. else
  827. data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
  828. if (right->mem_info)
  829. data_src_r = right->mem_info->data_src;
  830. else
  831. data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
  832. return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
  833. }
  834. static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
  835. size_t size, unsigned int width)
  836. {
  837. char out[64];
  838. perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
  839. return repsep_snprintf(bf, size, "%-*s", width, out);
  840. }
  841. static int64_t
  842. sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
  843. {
  844. union perf_mem_data_src data_src_l;
  845. union perf_mem_data_src data_src_r;
  846. if (left->mem_info)
  847. data_src_l = left->mem_info->data_src;
  848. else
  849. data_src_l.mem_lvl = PERF_MEM_LVL_NA;
  850. if (right->mem_info)
  851. data_src_r = right->mem_info->data_src;
  852. else
  853. data_src_r.mem_lvl = PERF_MEM_LVL_NA;
  854. return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
  855. }
  856. static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
  857. size_t size, unsigned int width)
  858. {
  859. char out[64];
  860. perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
  861. return repsep_snprintf(bf, size, "%-*s", width, out);
  862. }
  863. static int64_t
  864. sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
  865. {
  866. union perf_mem_data_src data_src_l;
  867. union perf_mem_data_src data_src_r;
  868. if (left->mem_info)
  869. data_src_l = left->mem_info->data_src;
  870. else
  871. data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
  872. if (right->mem_info)
  873. data_src_r = right->mem_info->data_src;
  874. else
  875. data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
  876. return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
  877. }
  878. static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
  879. size_t size, unsigned int width)
  880. {
  881. char out[64];
  882. perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
  883. return repsep_snprintf(bf, size, "%-*s", width, out);
  884. }
  885. int64_t
  886. sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
  887. {
  888. u64 l, r;
  889. struct map *l_map, *r_map;
  890. if (!left->mem_info) return -1;
  891. if (!right->mem_info) return 1;
  892. /* group event types together */
  893. if (left->cpumode > right->cpumode) return -1;
  894. if (left->cpumode < right->cpumode) return 1;
  895. l_map = left->mem_info->daddr.map;
  896. r_map = right->mem_info->daddr.map;
  897. /* if both are NULL, jump to sort on al_addr instead */
  898. if (!l_map && !r_map)
  899. goto addr;
  900. if (!l_map) return -1;
  901. if (!r_map) return 1;
  902. if (l_map->maj > r_map->maj) return -1;
  903. if (l_map->maj < r_map->maj) return 1;
  904. if (l_map->min > r_map->min) return -1;
  905. if (l_map->min < r_map->min) return 1;
  906. if (l_map->ino > r_map->ino) return -1;
  907. if (l_map->ino < r_map->ino) return 1;
  908. if (l_map->ino_generation > r_map->ino_generation) return -1;
  909. if (l_map->ino_generation < r_map->ino_generation) return 1;
  910. /*
  911. * Addresses with no major/minor numbers are assumed to be
  912. * anonymous in userspace. Sort those on pid then address.
  913. *
  914. * The kernel and non-zero major/minor mapped areas are
  915. * assumed to be unity mapped. Sort those on address.
  916. */
  917. if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
  918. (!(l_map->flags & MAP_SHARED)) &&
  919. !l_map->maj && !l_map->min && !l_map->ino &&
  920. !l_map->ino_generation) {
  921. /* userspace anonymous */
  922. if (left->thread->pid_ > right->thread->pid_) return -1;
  923. if (left->thread->pid_ < right->thread->pid_) return 1;
  924. }
  925. addr:
  926. /* al_addr does all the right addr - start + offset calculations */
  927. l = cl_address(left->mem_info->daddr.al_addr);
  928. r = cl_address(right->mem_info->daddr.al_addr);
  929. if (l > r) return -1;
  930. if (l < r) return 1;
  931. return 0;
  932. }
  933. static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
  934. size_t size, unsigned int width)
  935. {
  936. uint64_t addr = 0;
  937. struct map *map = NULL;
  938. struct symbol *sym = NULL;
  939. char level = he->level;
  940. if (he->mem_info) {
  941. addr = cl_address(he->mem_info->daddr.al_addr);
  942. map = he->mem_info->daddr.map;
  943. sym = he->mem_info->daddr.sym;
  944. /* print [s] for shared data mmaps */
  945. if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
  946. map && !(map->prot & PROT_EXEC) &&
  947. (map->flags & MAP_SHARED) &&
  948. (map->maj || map->min || map->ino ||
  949. map->ino_generation))
  950. level = 's';
  951. else if (!map)
  952. level = 'X';
  953. }
  954. return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
  955. width);
  956. }
  957. struct sort_entry sort_mispredict = {
  958. .se_header = "Branch Mispredicted",
  959. .se_cmp = sort__mispredict_cmp,
  960. .se_snprintf = hist_entry__mispredict_snprintf,
  961. .se_width_idx = HISTC_MISPREDICT,
  962. };
  963. static u64 he_weight(struct hist_entry *he)
  964. {
  965. return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
  966. }
  967. static int64_t
  968. sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
  969. {
  970. return he_weight(left) - he_weight(right);
  971. }
  972. static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
  973. size_t size, unsigned int width)
  974. {
  975. return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
  976. }
  977. struct sort_entry sort_local_weight = {
  978. .se_header = "Local Weight",
  979. .se_cmp = sort__local_weight_cmp,
  980. .se_snprintf = hist_entry__local_weight_snprintf,
  981. .se_width_idx = HISTC_LOCAL_WEIGHT,
  982. };
  983. static int64_t
  984. sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
  985. {
  986. return left->stat.weight - right->stat.weight;
  987. }
  988. static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
  989. size_t size, unsigned int width)
  990. {
  991. return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
  992. }
  993. struct sort_entry sort_global_weight = {
  994. .se_header = "Weight",
  995. .se_cmp = sort__global_weight_cmp,
  996. .se_snprintf = hist_entry__global_weight_snprintf,
  997. .se_width_idx = HISTC_GLOBAL_WEIGHT,
  998. };
  999. struct sort_entry sort_mem_daddr_sym = {
  1000. .se_header = "Data Symbol",
  1001. .se_cmp = sort__daddr_cmp,
  1002. .se_snprintf = hist_entry__daddr_snprintf,
  1003. .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
  1004. };
  1005. struct sort_entry sort_mem_iaddr_sym = {
  1006. .se_header = "Code Symbol",
  1007. .se_cmp = sort__iaddr_cmp,
  1008. .se_snprintf = hist_entry__iaddr_snprintf,
  1009. .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
  1010. };
  1011. struct sort_entry sort_mem_daddr_dso = {
  1012. .se_header = "Data Object",
  1013. .se_cmp = sort__dso_daddr_cmp,
  1014. .se_snprintf = hist_entry__dso_daddr_snprintf,
  1015. .se_width_idx = HISTC_MEM_DADDR_DSO,
  1016. };
  1017. struct sort_entry sort_mem_locked = {
  1018. .se_header = "Locked",
  1019. .se_cmp = sort__locked_cmp,
  1020. .se_snprintf = hist_entry__locked_snprintf,
  1021. .se_width_idx = HISTC_MEM_LOCKED,
  1022. };
  1023. struct sort_entry sort_mem_tlb = {
  1024. .se_header = "TLB access",
  1025. .se_cmp = sort__tlb_cmp,
  1026. .se_snprintf = hist_entry__tlb_snprintf,
  1027. .se_width_idx = HISTC_MEM_TLB,
  1028. };
  1029. struct sort_entry sort_mem_lvl = {
  1030. .se_header = "Memory access",
  1031. .se_cmp = sort__lvl_cmp,
  1032. .se_snprintf = hist_entry__lvl_snprintf,
  1033. .se_width_idx = HISTC_MEM_LVL,
  1034. };
  1035. struct sort_entry sort_mem_snoop = {
  1036. .se_header = "Snoop",
  1037. .se_cmp = sort__snoop_cmp,
  1038. .se_snprintf = hist_entry__snoop_snprintf,
  1039. .se_width_idx = HISTC_MEM_SNOOP,
  1040. };
  1041. struct sort_entry sort_mem_dcacheline = {
  1042. .se_header = "Data Cacheline",
  1043. .se_cmp = sort__dcacheline_cmp,
  1044. .se_snprintf = hist_entry__dcacheline_snprintf,
  1045. .se_width_idx = HISTC_MEM_DCACHELINE,
  1046. };
  1047. static int64_t
  1048. sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
  1049. {
  1050. uint64_t l = 0, r = 0;
  1051. if (left->mem_info)
  1052. l = left->mem_info->daddr.phys_addr;
  1053. if (right->mem_info)
  1054. r = right->mem_info->daddr.phys_addr;
  1055. return (int64_t)(r - l);
  1056. }
  1057. static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
  1058. size_t size, unsigned int width)
  1059. {
  1060. uint64_t addr = 0;
  1061. size_t ret = 0;
  1062. size_t len = BITS_PER_LONG / 4;
  1063. addr = he->mem_info->daddr.phys_addr;
  1064. ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
  1065. ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
  1066. ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
  1067. if (ret > width)
  1068. bf[width] = '\0';
  1069. return width;
  1070. }
  1071. struct sort_entry sort_mem_phys_daddr = {
  1072. .se_header = "Data Physical Address",
  1073. .se_cmp = sort__phys_daddr_cmp,
  1074. .se_snprintf = hist_entry__phys_daddr_snprintf,
  1075. .se_width_idx = HISTC_MEM_PHYS_DADDR,
  1076. };
  1077. static int64_t
  1078. sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
  1079. {
  1080. if (!left->branch_info || !right->branch_info)
  1081. return cmp_null(left->branch_info, right->branch_info);
  1082. return left->branch_info->flags.abort !=
  1083. right->branch_info->flags.abort;
  1084. }
  1085. static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
  1086. size_t size, unsigned int width)
  1087. {
  1088. static const char *out = "N/A";
  1089. if (he->branch_info) {
  1090. if (he->branch_info->flags.abort)
  1091. out = "A";
  1092. else
  1093. out = ".";
  1094. }
  1095. return repsep_snprintf(bf, size, "%-*s", width, out);
  1096. }
  1097. struct sort_entry sort_abort = {
  1098. .se_header = "Transaction abort",
  1099. .se_cmp = sort__abort_cmp,
  1100. .se_snprintf = hist_entry__abort_snprintf,
  1101. .se_width_idx = HISTC_ABORT,
  1102. };
  1103. static int64_t
  1104. sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
  1105. {
  1106. if (!left->branch_info || !right->branch_info)
  1107. return cmp_null(left->branch_info, right->branch_info);
  1108. return left->branch_info->flags.in_tx !=
  1109. right->branch_info->flags.in_tx;
  1110. }
  1111. static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
  1112. size_t size, unsigned int width)
  1113. {
  1114. static const char *out = "N/A";
  1115. if (he->branch_info) {
  1116. if (he->branch_info->flags.in_tx)
  1117. out = "T";
  1118. else
  1119. out = ".";
  1120. }
  1121. return repsep_snprintf(bf, size, "%-*s", width, out);
  1122. }
  1123. struct sort_entry sort_in_tx = {
  1124. .se_header = "Branch in transaction",
  1125. .se_cmp = sort__in_tx_cmp,
  1126. .se_snprintf = hist_entry__in_tx_snprintf,
  1127. .se_width_idx = HISTC_IN_TX,
  1128. };
  1129. static int64_t
  1130. sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
  1131. {
  1132. return left->transaction - right->transaction;
  1133. }
  1134. static inline char *add_str(char *p, const char *str)
  1135. {
  1136. strcpy(p, str);
  1137. return p + strlen(str);
  1138. }
  1139. static struct txbit {
  1140. unsigned flag;
  1141. const char *name;
  1142. int skip_for_len;
  1143. } txbits[] = {
  1144. { PERF_TXN_ELISION, "EL ", 0 },
  1145. { PERF_TXN_TRANSACTION, "TX ", 1 },
  1146. { PERF_TXN_SYNC, "SYNC ", 1 },
  1147. { PERF_TXN_ASYNC, "ASYNC ", 0 },
  1148. { PERF_TXN_RETRY, "RETRY ", 0 },
  1149. { PERF_TXN_CONFLICT, "CON ", 0 },
  1150. { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
  1151. { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
  1152. { 0, NULL, 0 }
  1153. };
  1154. int hist_entry__transaction_len(void)
  1155. {
  1156. int i;
  1157. int len = 0;
  1158. for (i = 0; txbits[i].name; i++) {
  1159. if (!txbits[i].skip_for_len)
  1160. len += strlen(txbits[i].name);
  1161. }
  1162. len += 4; /* :XX<space> */
  1163. return len;
  1164. }
  1165. static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
  1166. size_t size, unsigned int width)
  1167. {
  1168. u64 t = he->transaction;
  1169. char buf[128];
  1170. char *p = buf;
  1171. int i;
  1172. buf[0] = 0;
  1173. for (i = 0; txbits[i].name; i++)
  1174. if (txbits[i].flag & t)
  1175. p = add_str(p, txbits[i].name);
  1176. if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
  1177. p = add_str(p, "NEITHER ");
  1178. if (t & PERF_TXN_ABORT_MASK) {
  1179. sprintf(p, ":%" PRIx64,
  1180. (t & PERF_TXN_ABORT_MASK) >>
  1181. PERF_TXN_ABORT_SHIFT);
  1182. p += strlen(p);
  1183. }
  1184. return repsep_snprintf(bf, size, "%-*s", width, buf);
  1185. }
  1186. struct sort_entry sort_transaction = {
  1187. .se_header = "Transaction ",
  1188. .se_cmp = sort__transaction_cmp,
  1189. .se_snprintf = hist_entry__transaction_snprintf,
  1190. .se_width_idx = HISTC_TRANSACTION,
  1191. };
  1192. /* --sort symbol_size */
  1193. static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
  1194. {
  1195. int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
  1196. int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
  1197. return size_l < size_r ? -1 :
  1198. size_l == size_r ? 0 : 1;
  1199. }
  1200. static int64_t
  1201. sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
  1202. {
  1203. return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
  1204. }
  1205. static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
  1206. size_t bf_size, unsigned int width)
  1207. {
  1208. if (sym)
  1209. return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
  1210. return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
  1211. }
  1212. static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
  1213. size_t size, unsigned int width)
  1214. {
  1215. return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
  1216. }
  1217. struct sort_entry sort_sym_size = {
  1218. .se_header = "Symbol size",
  1219. .se_cmp = sort__sym_size_cmp,
  1220. .se_snprintf = hist_entry__sym_size_snprintf,
  1221. .se_width_idx = HISTC_SYM_SIZE,
  1222. };
  1223. /* --sort dso_size */
  1224. static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
  1225. {
  1226. int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
  1227. int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
  1228. return size_l < size_r ? -1 :
  1229. size_l == size_r ? 0 : 1;
  1230. }
  1231. static int64_t
  1232. sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
  1233. {
  1234. return _sort__dso_size_cmp(right->ms.map, left->ms.map);
  1235. }
  1236. static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
  1237. size_t bf_size, unsigned int width)
  1238. {
  1239. if (map && map->dso)
  1240. return repsep_snprintf(bf, bf_size, "%*d", width,
  1241. map__size(map));
  1242. return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
  1243. }
  1244. static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
  1245. size_t size, unsigned int width)
  1246. {
  1247. return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
  1248. }
  1249. struct sort_entry sort_dso_size = {
  1250. .se_header = "DSO size",
  1251. .se_cmp = sort__dso_size_cmp,
  1252. .se_snprintf = hist_entry__dso_size_snprintf,
  1253. .se_width_idx = HISTC_DSO_SIZE,
  1254. };
  1255. struct sort_dimension {
  1256. const char *name;
  1257. struct sort_entry *entry;
  1258. int taken;
  1259. };
  1260. #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
  1261. static struct sort_dimension common_sort_dimensions[] = {
  1262. DIM(SORT_PID, "pid", sort_thread),
  1263. DIM(SORT_COMM, "comm", sort_comm),
  1264. DIM(SORT_DSO, "dso", sort_dso),
  1265. DIM(SORT_SYM, "symbol", sort_sym),
  1266. DIM(SORT_PARENT, "parent", sort_parent),
  1267. DIM(SORT_CPU, "cpu", sort_cpu),
  1268. DIM(SORT_SOCKET, "socket", sort_socket),
  1269. DIM(SORT_SRCLINE, "srcline", sort_srcline),
  1270. DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
  1271. DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
  1272. DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
  1273. DIM(SORT_TRANSACTION, "transaction", sort_transaction),
  1274. DIM(SORT_TRACE, "trace", sort_trace),
  1275. DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
  1276. DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
  1277. DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
  1278. };
  1279. #undef DIM
  1280. #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
  1281. static struct sort_dimension bstack_sort_dimensions[] = {
  1282. DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
  1283. DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
  1284. DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
  1285. DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
  1286. DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
  1287. DIM(SORT_IN_TX, "in_tx", sort_in_tx),
  1288. DIM(SORT_ABORT, "abort", sort_abort),
  1289. DIM(SORT_CYCLES, "cycles", sort_cycles),
  1290. DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
  1291. DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
  1292. };
  1293. #undef DIM
  1294. #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
  1295. static struct sort_dimension memory_sort_dimensions[] = {
  1296. DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
  1297. DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
  1298. DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
  1299. DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
  1300. DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
  1301. DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
  1302. DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
  1303. DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
  1304. DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
  1305. };
  1306. #undef DIM
  1307. struct hpp_dimension {
  1308. const char *name;
  1309. struct perf_hpp_fmt *fmt;
  1310. int taken;
  1311. };
  1312. #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
  1313. static struct hpp_dimension hpp_sort_dimensions[] = {
  1314. DIM(PERF_HPP__OVERHEAD, "overhead"),
  1315. DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
  1316. DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
  1317. DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
  1318. DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
  1319. DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
  1320. DIM(PERF_HPP__SAMPLES, "sample"),
  1321. DIM(PERF_HPP__PERIOD, "period"),
  1322. };
  1323. #undef DIM
  1324. struct hpp_sort_entry {
  1325. struct perf_hpp_fmt hpp;
  1326. struct sort_entry *se;
  1327. };
  1328. void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
  1329. {
  1330. struct hpp_sort_entry *hse;
  1331. if (!perf_hpp__is_sort_entry(fmt))
  1332. return;
  1333. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1334. hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
  1335. }
  1336. static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1337. struct hists *hists, int line __maybe_unused,
  1338. int *span __maybe_unused)
  1339. {
  1340. struct hpp_sort_entry *hse;
  1341. size_t len = fmt->user_len;
  1342. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1343. if (!len)
  1344. len = hists__col_len(hists, hse->se->se_width_idx);
  1345. return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
  1346. }
  1347. static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
  1348. struct perf_hpp *hpp __maybe_unused,
  1349. struct hists *hists)
  1350. {
  1351. struct hpp_sort_entry *hse;
  1352. size_t len = fmt->user_len;
  1353. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1354. if (!len)
  1355. len = hists__col_len(hists, hse->se->se_width_idx);
  1356. return len;
  1357. }
  1358. static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1359. struct hist_entry *he)
  1360. {
  1361. struct hpp_sort_entry *hse;
  1362. size_t len = fmt->user_len;
  1363. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1364. if (!len)
  1365. len = hists__col_len(he->hists, hse->se->se_width_idx);
  1366. return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
  1367. }
  1368. static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
  1369. struct hist_entry *a, struct hist_entry *b)
  1370. {
  1371. struct hpp_sort_entry *hse;
  1372. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1373. return hse->se->se_cmp(a, b);
  1374. }
  1375. static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
  1376. struct hist_entry *a, struct hist_entry *b)
  1377. {
  1378. struct hpp_sort_entry *hse;
  1379. int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
  1380. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1381. collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
  1382. return collapse_fn(a, b);
  1383. }
  1384. static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
  1385. struct hist_entry *a, struct hist_entry *b)
  1386. {
  1387. struct hpp_sort_entry *hse;
  1388. int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
  1389. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1390. sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
  1391. return sort_fn(a, b);
  1392. }
  1393. bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
  1394. {
  1395. return format->header == __sort__hpp_header;
  1396. }
  1397. #define MK_SORT_ENTRY_CHK(key) \
  1398. bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
  1399. { \
  1400. struct hpp_sort_entry *hse; \
  1401. \
  1402. if (!perf_hpp__is_sort_entry(fmt)) \
  1403. return false; \
  1404. \
  1405. hse = container_of(fmt, struct hpp_sort_entry, hpp); \
  1406. return hse->se == &sort_ ## key ; \
  1407. }
  1408. MK_SORT_ENTRY_CHK(trace)
  1409. MK_SORT_ENTRY_CHK(srcline)
  1410. MK_SORT_ENTRY_CHK(srcfile)
  1411. MK_SORT_ENTRY_CHK(thread)
  1412. MK_SORT_ENTRY_CHK(comm)
  1413. MK_SORT_ENTRY_CHK(dso)
  1414. MK_SORT_ENTRY_CHK(sym)
  1415. static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  1416. {
  1417. struct hpp_sort_entry *hse_a;
  1418. struct hpp_sort_entry *hse_b;
  1419. if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
  1420. return false;
  1421. hse_a = container_of(a, struct hpp_sort_entry, hpp);
  1422. hse_b = container_of(b, struct hpp_sort_entry, hpp);
  1423. return hse_a->se == hse_b->se;
  1424. }
  1425. static void hse_free(struct perf_hpp_fmt *fmt)
  1426. {
  1427. struct hpp_sort_entry *hse;
  1428. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1429. free(hse);
  1430. }
  1431. static struct hpp_sort_entry *
  1432. __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
  1433. {
  1434. struct hpp_sort_entry *hse;
  1435. hse = malloc(sizeof(*hse));
  1436. if (hse == NULL) {
  1437. pr_err("Memory allocation failed\n");
  1438. return NULL;
  1439. }
  1440. hse->se = sd->entry;
  1441. hse->hpp.name = sd->entry->se_header;
  1442. hse->hpp.header = __sort__hpp_header;
  1443. hse->hpp.width = __sort__hpp_width;
  1444. hse->hpp.entry = __sort__hpp_entry;
  1445. hse->hpp.color = NULL;
  1446. hse->hpp.cmp = __sort__hpp_cmp;
  1447. hse->hpp.collapse = __sort__hpp_collapse;
  1448. hse->hpp.sort = __sort__hpp_sort;
  1449. hse->hpp.equal = __sort__hpp_equal;
  1450. hse->hpp.free = hse_free;
  1451. INIT_LIST_HEAD(&hse->hpp.list);
  1452. INIT_LIST_HEAD(&hse->hpp.sort_list);
  1453. hse->hpp.elide = false;
  1454. hse->hpp.len = 0;
  1455. hse->hpp.user_len = 0;
  1456. hse->hpp.level = level;
  1457. return hse;
  1458. }
  1459. static void hpp_free(struct perf_hpp_fmt *fmt)
  1460. {
  1461. free(fmt);
  1462. }
  1463. static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
  1464. int level)
  1465. {
  1466. struct perf_hpp_fmt *fmt;
  1467. fmt = memdup(hd->fmt, sizeof(*fmt));
  1468. if (fmt) {
  1469. INIT_LIST_HEAD(&fmt->list);
  1470. INIT_LIST_HEAD(&fmt->sort_list);
  1471. fmt->free = hpp_free;
  1472. fmt->level = level;
  1473. }
  1474. return fmt;
  1475. }
  1476. int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
  1477. {
  1478. struct perf_hpp_fmt *fmt;
  1479. struct hpp_sort_entry *hse;
  1480. int ret = -1;
  1481. int r;
  1482. perf_hpp_list__for_each_format(he->hpp_list, fmt) {
  1483. if (!perf_hpp__is_sort_entry(fmt))
  1484. continue;
  1485. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1486. if (hse->se->se_filter == NULL)
  1487. continue;
  1488. /*
  1489. * hist entry is filtered if any of sort key in the hpp list
  1490. * is applied. But it should skip non-matched filter types.
  1491. */
  1492. r = hse->se->se_filter(he, type, arg);
  1493. if (r >= 0) {
  1494. if (ret < 0)
  1495. ret = 0;
  1496. ret |= r;
  1497. }
  1498. }
  1499. return ret;
  1500. }
  1501. static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
  1502. struct perf_hpp_list *list,
  1503. int level)
  1504. {
  1505. struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
  1506. if (hse == NULL)
  1507. return -1;
  1508. perf_hpp_list__register_sort_field(list, &hse->hpp);
  1509. return 0;
  1510. }
  1511. static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
  1512. struct perf_hpp_list *list)
  1513. {
  1514. struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
  1515. if (hse == NULL)
  1516. return -1;
  1517. perf_hpp_list__column_register(list, &hse->hpp);
  1518. return 0;
  1519. }
  1520. struct hpp_dynamic_entry {
  1521. struct perf_hpp_fmt hpp;
  1522. struct perf_evsel *evsel;
  1523. struct tep_format_field *field;
  1524. unsigned dynamic_len;
  1525. bool raw_trace;
  1526. };
  1527. static int hde_width(struct hpp_dynamic_entry *hde)
  1528. {
  1529. if (!hde->hpp.len) {
  1530. int len = hde->dynamic_len;
  1531. int namelen = strlen(hde->field->name);
  1532. int fieldlen = hde->field->size;
  1533. if (namelen > len)
  1534. len = namelen;
  1535. if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
  1536. /* length for print hex numbers */
  1537. fieldlen = hde->field->size * 2 + 2;
  1538. }
  1539. if (fieldlen > len)
  1540. len = fieldlen;
  1541. hde->hpp.len = len;
  1542. }
  1543. return hde->hpp.len;
  1544. }
  1545. static void update_dynamic_len(struct hpp_dynamic_entry *hde,
  1546. struct hist_entry *he)
  1547. {
  1548. char *str, *pos;
  1549. struct tep_format_field *field = hde->field;
  1550. size_t namelen;
  1551. bool last = false;
  1552. if (hde->raw_trace)
  1553. return;
  1554. /* parse pretty print result and update max length */
  1555. if (!he->trace_output)
  1556. he->trace_output = get_trace_output(he);
  1557. namelen = strlen(field->name);
  1558. str = he->trace_output;
  1559. while (str) {
  1560. pos = strchr(str, ' ');
  1561. if (pos == NULL) {
  1562. last = true;
  1563. pos = str + strlen(str);
  1564. }
  1565. if (!strncmp(str, field->name, namelen)) {
  1566. size_t len;
  1567. str += namelen + 1;
  1568. len = pos - str;
  1569. if (len > hde->dynamic_len)
  1570. hde->dynamic_len = len;
  1571. break;
  1572. }
  1573. if (last)
  1574. str = NULL;
  1575. else
  1576. str = pos + 1;
  1577. }
  1578. }
  1579. static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1580. struct hists *hists __maybe_unused,
  1581. int line __maybe_unused,
  1582. int *span __maybe_unused)
  1583. {
  1584. struct hpp_dynamic_entry *hde;
  1585. size_t len = fmt->user_len;
  1586. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1587. if (!len)
  1588. len = hde_width(hde);
  1589. return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
  1590. }
  1591. static int __sort__hde_width(struct perf_hpp_fmt *fmt,
  1592. struct perf_hpp *hpp __maybe_unused,
  1593. struct hists *hists __maybe_unused)
  1594. {
  1595. struct hpp_dynamic_entry *hde;
  1596. size_t len = fmt->user_len;
  1597. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1598. if (!len)
  1599. len = hde_width(hde);
  1600. return len;
  1601. }
  1602. bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
  1603. {
  1604. struct hpp_dynamic_entry *hde;
  1605. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1606. return hists_to_evsel(hists) == hde->evsel;
  1607. }
  1608. static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1609. struct hist_entry *he)
  1610. {
  1611. struct hpp_dynamic_entry *hde;
  1612. size_t len = fmt->user_len;
  1613. char *str, *pos;
  1614. struct tep_format_field *field;
  1615. size_t namelen;
  1616. bool last = false;
  1617. int ret;
  1618. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1619. if (!len)
  1620. len = hde_width(hde);
  1621. if (hde->raw_trace)
  1622. goto raw_field;
  1623. if (!he->trace_output)
  1624. he->trace_output = get_trace_output(he);
  1625. field = hde->field;
  1626. namelen = strlen(field->name);
  1627. str = he->trace_output;
  1628. while (str) {
  1629. pos = strchr(str, ' ');
  1630. if (pos == NULL) {
  1631. last = true;
  1632. pos = str + strlen(str);
  1633. }
  1634. if (!strncmp(str, field->name, namelen)) {
  1635. str += namelen + 1;
  1636. str = strndup(str, pos - str);
  1637. if (str == NULL)
  1638. return scnprintf(hpp->buf, hpp->size,
  1639. "%*.*s", len, len, "ERROR");
  1640. break;
  1641. }
  1642. if (last)
  1643. str = NULL;
  1644. else
  1645. str = pos + 1;
  1646. }
  1647. if (str == NULL) {
  1648. struct trace_seq seq;
  1649. raw_field:
  1650. trace_seq_init(&seq);
  1651. tep_print_field(&seq, he->raw_data, hde->field);
  1652. str = seq.buffer;
  1653. }
  1654. ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
  1655. free(str);
  1656. return ret;
  1657. }
  1658. static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
  1659. struct hist_entry *a, struct hist_entry *b)
  1660. {
  1661. struct hpp_dynamic_entry *hde;
  1662. struct tep_format_field *field;
  1663. unsigned offset, size;
  1664. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1665. if (b == NULL) {
  1666. update_dynamic_len(hde, a);
  1667. return 0;
  1668. }
  1669. field = hde->field;
  1670. if (field->flags & TEP_FIELD_IS_DYNAMIC) {
  1671. unsigned long long dyn;
  1672. tep_read_number_field(field, a->raw_data, &dyn);
  1673. offset = dyn & 0xffff;
  1674. size = (dyn >> 16) & 0xffff;
  1675. /* record max width for output */
  1676. if (size > hde->dynamic_len)
  1677. hde->dynamic_len = size;
  1678. } else {
  1679. offset = field->offset;
  1680. size = field->size;
  1681. }
  1682. return memcmp(a->raw_data + offset, b->raw_data + offset, size);
  1683. }
  1684. bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
  1685. {
  1686. return fmt->cmp == __sort__hde_cmp;
  1687. }
  1688. static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  1689. {
  1690. struct hpp_dynamic_entry *hde_a;
  1691. struct hpp_dynamic_entry *hde_b;
  1692. if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
  1693. return false;
  1694. hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
  1695. hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
  1696. return hde_a->field == hde_b->field;
  1697. }
  1698. static void hde_free(struct perf_hpp_fmt *fmt)
  1699. {
  1700. struct hpp_dynamic_entry *hde;
  1701. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1702. free(hde);
  1703. }
  1704. static struct hpp_dynamic_entry *
  1705. __alloc_dynamic_entry(struct perf_evsel *evsel, struct tep_format_field *field,
  1706. int level)
  1707. {
  1708. struct hpp_dynamic_entry *hde;
  1709. hde = malloc(sizeof(*hde));
  1710. if (hde == NULL) {
  1711. pr_debug("Memory allocation failed\n");
  1712. return NULL;
  1713. }
  1714. hde->evsel = evsel;
  1715. hde->field = field;
  1716. hde->dynamic_len = 0;
  1717. hde->hpp.name = field->name;
  1718. hde->hpp.header = __sort__hde_header;
  1719. hde->hpp.width = __sort__hde_width;
  1720. hde->hpp.entry = __sort__hde_entry;
  1721. hde->hpp.color = NULL;
  1722. hde->hpp.cmp = __sort__hde_cmp;
  1723. hde->hpp.collapse = __sort__hde_cmp;
  1724. hde->hpp.sort = __sort__hde_cmp;
  1725. hde->hpp.equal = __sort__hde_equal;
  1726. hde->hpp.free = hde_free;
  1727. INIT_LIST_HEAD(&hde->hpp.list);
  1728. INIT_LIST_HEAD(&hde->hpp.sort_list);
  1729. hde->hpp.elide = false;
  1730. hde->hpp.len = 0;
  1731. hde->hpp.user_len = 0;
  1732. hde->hpp.level = level;
  1733. return hde;
  1734. }
  1735. struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
  1736. {
  1737. struct perf_hpp_fmt *new_fmt = NULL;
  1738. if (perf_hpp__is_sort_entry(fmt)) {
  1739. struct hpp_sort_entry *hse, *new_hse;
  1740. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1741. new_hse = memdup(hse, sizeof(*hse));
  1742. if (new_hse)
  1743. new_fmt = &new_hse->hpp;
  1744. } else if (perf_hpp__is_dynamic_entry(fmt)) {
  1745. struct hpp_dynamic_entry *hde, *new_hde;
  1746. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1747. new_hde = memdup(hde, sizeof(*hde));
  1748. if (new_hde)
  1749. new_fmt = &new_hde->hpp;
  1750. } else {
  1751. new_fmt = memdup(fmt, sizeof(*fmt));
  1752. }
  1753. INIT_LIST_HEAD(&new_fmt->list);
  1754. INIT_LIST_HEAD(&new_fmt->sort_list);
  1755. return new_fmt;
  1756. }
  1757. static int parse_field_name(char *str, char **event, char **field, char **opt)
  1758. {
  1759. char *event_name, *field_name, *opt_name;
  1760. event_name = str;
  1761. field_name = strchr(str, '.');
  1762. if (field_name) {
  1763. *field_name++ = '\0';
  1764. } else {
  1765. event_name = NULL;
  1766. field_name = str;
  1767. }
  1768. opt_name = strchr(field_name, '/');
  1769. if (opt_name)
  1770. *opt_name++ = '\0';
  1771. *event = event_name;
  1772. *field = field_name;
  1773. *opt = opt_name;
  1774. return 0;
  1775. }
  1776. /* find match evsel using a given event name. The event name can be:
  1777. * 1. '%' + event index (e.g. '%1' for first event)
  1778. * 2. full event name (e.g. sched:sched_switch)
  1779. * 3. partial event name (should not contain ':')
  1780. */
  1781. static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
  1782. {
  1783. struct perf_evsel *evsel = NULL;
  1784. struct perf_evsel *pos;
  1785. bool full_name;
  1786. /* case 1 */
  1787. if (event_name[0] == '%') {
  1788. int nr = strtol(event_name+1, NULL, 0);
  1789. if (nr > evlist->nr_entries)
  1790. return NULL;
  1791. evsel = perf_evlist__first(evlist);
  1792. while (--nr > 0)
  1793. evsel = perf_evsel__next(evsel);
  1794. return evsel;
  1795. }
  1796. full_name = !!strchr(event_name, ':');
  1797. evlist__for_each_entry(evlist, pos) {
  1798. /* case 2 */
  1799. if (full_name && !strcmp(pos->name, event_name))
  1800. return pos;
  1801. /* case 3 */
  1802. if (!full_name && strstr(pos->name, event_name)) {
  1803. if (evsel) {
  1804. pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
  1805. event_name, evsel->name, pos->name);
  1806. return NULL;
  1807. }
  1808. evsel = pos;
  1809. }
  1810. }
  1811. return evsel;
  1812. }
  1813. static int __dynamic_dimension__add(struct perf_evsel *evsel,
  1814. struct tep_format_field *field,
  1815. bool raw_trace, int level)
  1816. {
  1817. struct hpp_dynamic_entry *hde;
  1818. hde = __alloc_dynamic_entry(evsel, field, level);
  1819. if (hde == NULL)
  1820. return -ENOMEM;
  1821. hde->raw_trace = raw_trace;
  1822. perf_hpp__register_sort_field(&hde->hpp);
  1823. return 0;
  1824. }
  1825. static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
  1826. {
  1827. int ret;
  1828. struct tep_format_field *field;
  1829. field = evsel->tp_format->format.fields;
  1830. while (field) {
  1831. ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
  1832. if (ret < 0)
  1833. return ret;
  1834. field = field->next;
  1835. }
  1836. return 0;
  1837. }
  1838. static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
  1839. int level)
  1840. {
  1841. int ret;
  1842. struct perf_evsel *evsel;
  1843. evlist__for_each_entry(evlist, evsel) {
  1844. if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
  1845. continue;
  1846. ret = add_evsel_fields(evsel, raw_trace, level);
  1847. if (ret < 0)
  1848. return ret;
  1849. }
  1850. return 0;
  1851. }
  1852. static int add_all_matching_fields(struct perf_evlist *evlist,
  1853. char *field_name, bool raw_trace, int level)
  1854. {
  1855. int ret = -ESRCH;
  1856. struct perf_evsel *evsel;
  1857. struct tep_format_field *field;
  1858. evlist__for_each_entry(evlist, evsel) {
  1859. if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
  1860. continue;
  1861. field = tep_find_any_field(evsel->tp_format, field_name);
  1862. if (field == NULL)
  1863. continue;
  1864. ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
  1865. if (ret < 0)
  1866. break;
  1867. }
  1868. return ret;
  1869. }
  1870. static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
  1871. int level)
  1872. {
  1873. char *str, *event_name, *field_name, *opt_name;
  1874. struct perf_evsel *evsel;
  1875. struct tep_format_field *field;
  1876. bool raw_trace = symbol_conf.raw_trace;
  1877. int ret = 0;
  1878. if (evlist == NULL)
  1879. return -ENOENT;
  1880. str = strdup(tok);
  1881. if (str == NULL)
  1882. return -ENOMEM;
  1883. if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
  1884. ret = -EINVAL;
  1885. goto out;
  1886. }
  1887. if (opt_name) {
  1888. if (strcmp(opt_name, "raw")) {
  1889. pr_debug("unsupported field option %s\n", opt_name);
  1890. ret = -EINVAL;
  1891. goto out;
  1892. }
  1893. raw_trace = true;
  1894. }
  1895. if (!strcmp(field_name, "trace_fields")) {
  1896. ret = add_all_dynamic_fields(evlist, raw_trace, level);
  1897. goto out;
  1898. }
  1899. if (event_name == NULL) {
  1900. ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
  1901. goto out;
  1902. }
  1903. evsel = find_evsel(evlist, event_name);
  1904. if (evsel == NULL) {
  1905. pr_debug("Cannot find event: %s\n", event_name);
  1906. ret = -ENOENT;
  1907. goto out;
  1908. }
  1909. if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
  1910. pr_debug("%s is not a tracepoint event\n", event_name);
  1911. ret = -EINVAL;
  1912. goto out;
  1913. }
  1914. if (!strcmp(field_name, "*")) {
  1915. ret = add_evsel_fields(evsel, raw_trace, level);
  1916. } else {
  1917. field = tep_find_any_field(evsel->tp_format, field_name);
  1918. if (field == NULL) {
  1919. pr_debug("Cannot find event field for %s.%s\n",
  1920. event_name, field_name);
  1921. return -ENOENT;
  1922. }
  1923. ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
  1924. }
  1925. out:
  1926. free(str);
  1927. return ret;
  1928. }
  1929. static int __sort_dimension__add(struct sort_dimension *sd,
  1930. struct perf_hpp_list *list,
  1931. int level)
  1932. {
  1933. if (sd->taken)
  1934. return 0;
  1935. if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
  1936. return -1;
  1937. if (sd->entry->se_collapse)
  1938. list->need_collapse = 1;
  1939. sd->taken = 1;
  1940. return 0;
  1941. }
  1942. static int __hpp_dimension__add(struct hpp_dimension *hd,
  1943. struct perf_hpp_list *list,
  1944. int level)
  1945. {
  1946. struct perf_hpp_fmt *fmt;
  1947. if (hd->taken)
  1948. return 0;
  1949. fmt = __hpp_dimension__alloc_hpp(hd, level);
  1950. if (!fmt)
  1951. return -1;
  1952. hd->taken = 1;
  1953. perf_hpp_list__register_sort_field(list, fmt);
  1954. return 0;
  1955. }
  1956. static int __sort_dimension__add_output(struct perf_hpp_list *list,
  1957. struct sort_dimension *sd)
  1958. {
  1959. if (sd->taken)
  1960. return 0;
  1961. if (__sort_dimension__add_hpp_output(sd, list) < 0)
  1962. return -1;
  1963. sd->taken = 1;
  1964. return 0;
  1965. }
  1966. static int __hpp_dimension__add_output(struct perf_hpp_list *list,
  1967. struct hpp_dimension *hd)
  1968. {
  1969. struct perf_hpp_fmt *fmt;
  1970. if (hd->taken)
  1971. return 0;
  1972. fmt = __hpp_dimension__alloc_hpp(hd, 0);
  1973. if (!fmt)
  1974. return -1;
  1975. hd->taken = 1;
  1976. perf_hpp_list__column_register(list, fmt);
  1977. return 0;
  1978. }
  1979. int hpp_dimension__add_output(unsigned col)
  1980. {
  1981. BUG_ON(col >= PERF_HPP__MAX_INDEX);
  1982. return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
  1983. }
  1984. int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
  1985. struct perf_evlist *evlist,
  1986. int level)
  1987. {
  1988. unsigned int i;
  1989. for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
  1990. struct sort_dimension *sd = &common_sort_dimensions[i];
  1991. if (strncasecmp(tok, sd->name, strlen(tok)))
  1992. continue;
  1993. if (sd->entry == &sort_parent) {
  1994. int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
  1995. if (ret) {
  1996. char err[BUFSIZ];
  1997. regerror(ret, &parent_regex, err, sizeof(err));
  1998. pr_err("Invalid regex: %s\n%s", parent_pattern, err);
  1999. return -EINVAL;
  2000. }
  2001. list->parent = 1;
  2002. } else if (sd->entry == &sort_sym) {
  2003. list->sym = 1;
  2004. /*
  2005. * perf diff displays the performance difference amongst
  2006. * two or more perf.data files. Those files could come
  2007. * from different binaries. So we should not compare
  2008. * their ips, but the name of symbol.
  2009. */
  2010. if (sort__mode == SORT_MODE__DIFF)
  2011. sd->entry->se_collapse = sort__sym_sort;
  2012. } else if (sd->entry == &sort_dso) {
  2013. list->dso = 1;
  2014. } else if (sd->entry == &sort_socket) {
  2015. list->socket = 1;
  2016. } else if (sd->entry == &sort_thread) {
  2017. list->thread = 1;
  2018. } else if (sd->entry == &sort_comm) {
  2019. list->comm = 1;
  2020. }
  2021. return __sort_dimension__add(sd, list, level);
  2022. }
  2023. for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
  2024. struct hpp_dimension *hd = &hpp_sort_dimensions[i];
  2025. if (strncasecmp(tok, hd->name, strlen(tok)))
  2026. continue;
  2027. return __hpp_dimension__add(hd, list, level);
  2028. }
  2029. for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
  2030. struct sort_dimension *sd = &bstack_sort_dimensions[i];
  2031. if (strncasecmp(tok, sd->name, strlen(tok)))
  2032. continue;
  2033. if (sort__mode != SORT_MODE__BRANCH)
  2034. return -EINVAL;
  2035. if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
  2036. list->sym = 1;
  2037. __sort_dimension__add(sd, list, level);
  2038. return 0;
  2039. }
  2040. for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
  2041. struct sort_dimension *sd = &memory_sort_dimensions[i];
  2042. if (strncasecmp(tok, sd->name, strlen(tok)))
  2043. continue;
  2044. if (sort__mode != SORT_MODE__MEMORY)
  2045. return -EINVAL;
  2046. if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
  2047. return -EINVAL;
  2048. if (sd->entry == &sort_mem_daddr_sym)
  2049. list->sym = 1;
  2050. __sort_dimension__add(sd, list, level);
  2051. return 0;
  2052. }
  2053. if (!add_dynamic_entry(evlist, tok, level))
  2054. return 0;
  2055. return -ESRCH;
  2056. }
  2057. static int setup_sort_list(struct perf_hpp_list *list, char *str,
  2058. struct perf_evlist *evlist)
  2059. {
  2060. char *tmp, *tok;
  2061. int ret = 0;
  2062. int level = 0;
  2063. int next_level = 1;
  2064. bool in_group = false;
  2065. do {
  2066. tok = str;
  2067. tmp = strpbrk(str, "{}, ");
  2068. if (tmp) {
  2069. if (in_group)
  2070. next_level = level;
  2071. else
  2072. next_level = level + 1;
  2073. if (*tmp == '{')
  2074. in_group = true;
  2075. else if (*tmp == '}')
  2076. in_group = false;
  2077. *tmp = '\0';
  2078. str = tmp + 1;
  2079. }
  2080. if (*tok) {
  2081. ret = sort_dimension__add(list, tok, evlist, level);
  2082. if (ret == -EINVAL) {
  2083. if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
  2084. pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
  2085. else
  2086. pr_err("Invalid --sort key: `%s'", tok);
  2087. break;
  2088. } else if (ret == -ESRCH) {
  2089. pr_err("Unknown --sort key: `%s'", tok);
  2090. break;
  2091. }
  2092. }
  2093. level = next_level;
  2094. } while (tmp);
  2095. return ret;
  2096. }
  2097. static const char *get_default_sort_order(struct perf_evlist *evlist)
  2098. {
  2099. const char *default_sort_orders[] = {
  2100. default_sort_order,
  2101. default_branch_sort_order,
  2102. default_mem_sort_order,
  2103. default_top_sort_order,
  2104. default_diff_sort_order,
  2105. default_tracepoint_sort_order,
  2106. };
  2107. bool use_trace = true;
  2108. struct perf_evsel *evsel;
  2109. BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
  2110. if (evlist == NULL || perf_evlist__empty(evlist))
  2111. goto out_no_evlist;
  2112. evlist__for_each_entry(evlist, evsel) {
  2113. if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
  2114. use_trace = false;
  2115. break;
  2116. }
  2117. }
  2118. if (use_trace) {
  2119. sort__mode = SORT_MODE__TRACEPOINT;
  2120. if (symbol_conf.raw_trace)
  2121. return "trace_fields";
  2122. }
  2123. out_no_evlist:
  2124. return default_sort_orders[sort__mode];
  2125. }
  2126. static int setup_sort_order(struct perf_evlist *evlist)
  2127. {
  2128. char *new_sort_order;
  2129. /*
  2130. * Append '+'-prefixed sort order to the default sort
  2131. * order string.
  2132. */
  2133. if (!sort_order || is_strict_order(sort_order))
  2134. return 0;
  2135. if (sort_order[1] == '\0') {
  2136. pr_err("Invalid --sort key: `+'");
  2137. return -EINVAL;
  2138. }
  2139. /*
  2140. * We allocate new sort_order string, but we never free it,
  2141. * because it's checked over the rest of the code.
  2142. */
  2143. if (asprintf(&new_sort_order, "%s,%s",
  2144. get_default_sort_order(evlist), sort_order + 1) < 0) {
  2145. pr_err("Not enough memory to set up --sort");
  2146. return -ENOMEM;
  2147. }
  2148. sort_order = new_sort_order;
  2149. return 0;
  2150. }
  2151. /*
  2152. * Adds 'pre,' prefix into 'str' is 'pre' is
  2153. * not already part of 'str'.
  2154. */
  2155. static char *prefix_if_not_in(const char *pre, char *str)
  2156. {
  2157. char *n;
  2158. if (!str || strstr(str, pre))
  2159. return str;
  2160. if (asprintf(&n, "%s,%s", pre, str) < 0)
  2161. return NULL;
  2162. free(str);
  2163. return n;
  2164. }
  2165. static char *setup_overhead(char *keys)
  2166. {
  2167. if (sort__mode == SORT_MODE__DIFF)
  2168. return keys;
  2169. keys = prefix_if_not_in("overhead", keys);
  2170. if (symbol_conf.cumulate_callchain)
  2171. keys = prefix_if_not_in("overhead_children", keys);
  2172. return keys;
  2173. }
  2174. static int __setup_sorting(struct perf_evlist *evlist)
  2175. {
  2176. char *str;
  2177. const char *sort_keys;
  2178. int ret = 0;
  2179. ret = setup_sort_order(evlist);
  2180. if (ret)
  2181. return ret;
  2182. sort_keys = sort_order;
  2183. if (sort_keys == NULL) {
  2184. if (is_strict_order(field_order)) {
  2185. /*
  2186. * If user specified field order but no sort order,
  2187. * we'll honor it and not add default sort orders.
  2188. */
  2189. return 0;
  2190. }
  2191. sort_keys = get_default_sort_order(evlist);
  2192. }
  2193. str = strdup(sort_keys);
  2194. if (str == NULL) {
  2195. pr_err("Not enough memory to setup sort keys");
  2196. return -ENOMEM;
  2197. }
  2198. /*
  2199. * Prepend overhead fields for backward compatibility.
  2200. */
  2201. if (!is_strict_order(field_order)) {
  2202. str = setup_overhead(str);
  2203. if (str == NULL) {
  2204. pr_err("Not enough memory to setup overhead keys");
  2205. return -ENOMEM;
  2206. }
  2207. }
  2208. ret = setup_sort_list(&perf_hpp_list, str, evlist);
  2209. free(str);
  2210. return ret;
  2211. }
  2212. void perf_hpp__set_elide(int idx, bool elide)
  2213. {
  2214. struct perf_hpp_fmt *fmt;
  2215. struct hpp_sort_entry *hse;
  2216. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2217. if (!perf_hpp__is_sort_entry(fmt))
  2218. continue;
  2219. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  2220. if (hse->se->se_width_idx == idx) {
  2221. fmt->elide = elide;
  2222. break;
  2223. }
  2224. }
  2225. }
  2226. static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
  2227. {
  2228. if (list && strlist__nr_entries(list) == 1) {
  2229. if (fp != NULL)
  2230. fprintf(fp, "# %s: %s\n", list_name,
  2231. strlist__entry(list, 0)->s);
  2232. return true;
  2233. }
  2234. return false;
  2235. }
  2236. static bool get_elide(int idx, FILE *output)
  2237. {
  2238. switch (idx) {
  2239. case HISTC_SYMBOL:
  2240. return __get_elide(symbol_conf.sym_list, "symbol", output);
  2241. case HISTC_DSO:
  2242. return __get_elide(symbol_conf.dso_list, "dso", output);
  2243. case HISTC_COMM:
  2244. return __get_elide(symbol_conf.comm_list, "comm", output);
  2245. default:
  2246. break;
  2247. }
  2248. if (sort__mode != SORT_MODE__BRANCH)
  2249. return false;
  2250. switch (idx) {
  2251. case HISTC_SYMBOL_FROM:
  2252. return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
  2253. case HISTC_SYMBOL_TO:
  2254. return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
  2255. case HISTC_DSO_FROM:
  2256. return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
  2257. case HISTC_DSO_TO:
  2258. return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
  2259. default:
  2260. break;
  2261. }
  2262. return false;
  2263. }
  2264. void sort__setup_elide(FILE *output)
  2265. {
  2266. struct perf_hpp_fmt *fmt;
  2267. struct hpp_sort_entry *hse;
  2268. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2269. if (!perf_hpp__is_sort_entry(fmt))
  2270. continue;
  2271. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  2272. fmt->elide = get_elide(hse->se->se_width_idx, output);
  2273. }
  2274. /*
  2275. * It makes no sense to elide all of sort entries.
  2276. * Just revert them to show up again.
  2277. */
  2278. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2279. if (!perf_hpp__is_sort_entry(fmt))
  2280. continue;
  2281. if (!fmt->elide)
  2282. return;
  2283. }
  2284. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2285. if (!perf_hpp__is_sort_entry(fmt))
  2286. continue;
  2287. fmt->elide = false;
  2288. }
  2289. }
  2290. int output_field_add(struct perf_hpp_list *list, char *tok)
  2291. {
  2292. unsigned int i;
  2293. for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
  2294. struct sort_dimension *sd = &common_sort_dimensions[i];
  2295. if (strncasecmp(tok, sd->name, strlen(tok)))
  2296. continue;
  2297. return __sort_dimension__add_output(list, sd);
  2298. }
  2299. for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
  2300. struct hpp_dimension *hd = &hpp_sort_dimensions[i];
  2301. if (strncasecmp(tok, hd->name, strlen(tok)))
  2302. continue;
  2303. return __hpp_dimension__add_output(list, hd);
  2304. }
  2305. for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
  2306. struct sort_dimension *sd = &bstack_sort_dimensions[i];
  2307. if (strncasecmp(tok, sd->name, strlen(tok)))
  2308. continue;
  2309. return __sort_dimension__add_output(list, sd);
  2310. }
  2311. for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
  2312. struct sort_dimension *sd = &memory_sort_dimensions[i];
  2313. if (strncasecmp(tok, sd->name, strlen(tok)))
  2314. continue;
  2315. return __sort_dimension__add_output(list, sd);
  2316. }
  2317. return -ESRCH;
  2318. }
  2319. static int setup_output_list(struct perf_hpp_list *list, char *str)
  2320. {
  2321. char *tmp, *tok;
  2322. int ret = 0;
  2323. for (tok = strtok_r(str, ", ", &tmp);
  2324. tok; tok = strtok_r(NULL, ", ", &tmp)) {
  2325. ret = output_field_add(list, tok);
  2326. if (ret == -EINVAL) {
  2327. ui__error("Invalid --fields key: `%s'", tok);
  2328. break;
  2329. } else if (ret == -ESRCH) {
  2330. ui__error("Unknown --fields key: `%s'", tok);
  2331. break;
  2332. }
  2333. }
  2334. return ret;
  2335. }
  2336. void reset_dimensions(void)
  2337. {
  2338. unsigned int i;
  2339. for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
  2340. common_sort_dimensions[i].taken = 0;
  2341. for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
  2342. hpp_sort_dimensions[i].taken = 0;
  2343. for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
  2344. bstack_sort_dimensions[i].taken = 0;
  2345. for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
  2346. memory_sort_dimensions[i].taken = 0;
  2347. }
  2348. bool is_strict_order(const char *order)
  2349. {
  2350. return order && (*order != '+');
  2351. }
  2352. static int __setup_output_field(void)
  2353. {
  2354. char *str, *strp;
  2355. int ret = -EINVAL;
  2356. if (field_order == NULL)
  2357. return 0;
  2358. strp = str = strdup(field_order);
  2359. if (str == NULL) {
  2360. pr_err("Not enough memory to setup output fields");
  2361. return -ENOMEM;
  2362. }
  2363. if (!is_strict_order(field_order))
  2364. strp++;
  2365. if (!strlen(strp)) {
  2366. pr_err("Invalid --fields key: `+'");
  2367. goto out;
  2368. }
  2369. ret = setup_output_list(&perf_hpp_list, strp);
  2370. out:
  2371. free(str);
  2372. return ret;
  2373. }
  2374. int setup_sorting(struct perf_evlist *evlist)
  2375. {
  2376. int err;
  2377. err = __setup_sorting(evlist);
  2378. if (err < 0)
  2379. return err;
  2380. if (parent_pattern != default_parent_pattern) {
  2381. err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
  2382. if (err < 0)
  2383. return err;
  2384. }
  2385. reset_dimensions();
  2386. /*
  2387. * perf diff doesn't use default hpp output fields.
  2388. */
  2389. if (sort__mode != SORT_MODE__DIFF)
  2390. perf_hpp__init();
  2391. err = __setup_output_field();
  2392. if (err < 0)
  2393. return err;
  2394. /* copy sort keys to output fields */
  2395. perf_hpp__setup_output_field(&perf_hpp_list);
  2396. /* and then copy output fields to sort keys */
  2397. perf_hpp__append_sort_keys(&perf_hpp_list);
  2398. /* setup hists-specific output fields */
  2399. if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
  2400. return -1;
  2401. return 0;
  2402. }
  2403. void reset_output_field(void)
  2404. {
  2405. perf_hpp_list.need_collapse = 0;
  2406. perf_hpp_list.parent = 0;
  2407. perf_hpp_list.sym = 0;
  2408. perf_hpp_list.dso = 0;
  2409. field_order = NULL;
  2410. sort_order = NULL;
  2411. reset_dimensions();
  2412. perf_hpp__reset_output_field(&perf_hpp_list);
  2413. }