sort.c 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757
  1. #include <sys/mman.h>
  2. #include "sort.h"
  3. #include "hist.h"
  4. #include "comm.h"
  5. #include "symbol.h"
  6. #include "evsel.h"
  7. #include "evlist.h"
  8. #include <traceevent/event-parse.h>
  9. #include "mem-events.h"
  10. regex_t parent_regex;
  11. const char default_parent_pattern[] = "^sys_|^do_page_fault";
  12. const char *parent_pattern = default_parent_pattern;
  13. const char default_sort_order[] = "comm,dso,symbol";
  14. const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
  15. const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
  16. const char default_top_sort_order[] = "dso,symbol";
  17. const char default_diff_sort_order[] = "dso,symbol";
  18. const char default_tracepoint_sort_order[] = "trace";
  19. const char *sort_order;
  20. const char *field_order;
  21. regex_t ignore_callees_regex;
  22. int have_ignore_callees = 0;
  23. int sort__has_sym = 0;
  24. int sort__has_dso = 0;
  25. int sort__has_socket = 0;
  26. int sort__has_thread = 0;
  27. int sort__has_comm = 0;
  28. enum sort_mode sort__mode = SORT_MODE__NORMAL;
  29. /*
  30. * Replaces all occurrences of a char used with the:
  31. *
  32. * -t, --field-separator
  33. *
  34. * option, that uses a special separator character and don't pad with spaces,
  35. * replacing all occurances of this separator in symbol names (and other
  36. * output) with a '.' character, that thus it's the only non valid separator.
  37. */
  38. static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
  39. {
  40. int n;
  41. va_list ap;
  42. va_start(ap, fmt);
  43. n = vsnprintf(bf, size, fmt, ap);
  44. if (symbol_conf.field_sep && n > 0) {
  45. char *sep = bf;
  46. while (1) {
  47. sep = strchr(sep, *symbol_conf.field_sep);
  48. if (sep == NULL)
  49. break;
  50. *sep = '.';
  51. }
  52. }
  53. va_end(ap);
  54. if (n >= (int)size)
  55. return size - 1;
  56. return n;
  57. }
  58. static int64_t cmp_null(const void *l, const void *r)
  59. {
  60. if (!l && !r)
  61. return 0;
  62. else if (!l)
  63. return -1;
  64. else
  65. return 1;
  66. }
  67. /* --sort pid */
  68. static int64_t
  69. sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
  70. {
  71. return right->thread->tid - left->thread->tid;
  72. }
  73. static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
  74. size_t size, unsigned int width)
  75. {
  76. const char *comm = thread__comm_str(he->thread);
  77. width = max(7U, width) - 6;
  78. return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
  79. width, width, comm ?: "");
  80. }
  81. static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
  82. {
  83. const struct thread *th = arg;
  84. if (type != HIST_FILTER__THREAD)
  85. return -1;
  86. return th && he->thread != th;
  87. }
  88. struct sort_entry sort_thread = {
  89. .se_header = " Pid:Command",
  90. .se_cmp = sort__thread_cmp,
  91. .se_snprintf = hist_entry__thread_snprintf,
  92. .se_filter = hist_entry__thread_filter,
  93. .se_width_idx = HISTC_THREAD,
  94. };
  95. /* --sort comm */
  96. static int64_t
  97. sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
  98. {
  99. /* Compare the addr that should be unique among comm */
  100. return strcmp(comm__str(right->comm), comm__str(left->comm));
  101. }
  102. static int64_t
  103. sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
  104. {
  105. /* Compare the addr that should be unique among comm */
  106. return strcmp(comm__str(right->comm), comm__str(left->comm));
  107. }
  108. static int64_t
  109. sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
  110. {
  111. return strcmp(comm__str(right->comm), comm__str(left->comm));
  112. }
  113. static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
  114. size_t size, unsigned int width)
  115. {
  116. return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
  117. }
  118. struct sort_entry sort_comm = {
  119. .se_header = "Command",
  120. .se_cmp = sort__comm_cmp,
  121. .se_collapse = sort__comm_collapse,
  122. .se_sort = sort__comm_sort,
  123. .se_snprintf = hist_entry__comm_snprintf,
  124. .se_filter = hist_entry__thread_filter,
  125. .se_width_idx = HISTC_COMM,
  126. };
  127. /* --sort dso */
  128. static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
  129. {
  130. struct dso *dso_l = map_l ? map_l->dso : NULL;
  131. struct dso *dso_r = map_r ? map_r->dso : NULL;
  132. const char *dso_name_l, *dso_name_r;
  133. if (!dso_l || !dso_r)
  134. return cmp_null(dso_r, dso_l);
  135. if (verbose) {
  136. dso_name_l = dso_l->long_name;
  137. dso_name_r = dso_r->long_name;
  138. } else {
  139. dso_name_l = dso_l->short_name;
  140. dso_name_r = dso_r->short_name;
  141. }
  142. return strcmp(dso_name_l, dso_name_r);
  143. }
  144. static int64_t
  145. sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
  146. {
  147. return _sort__dso_cmp(right->ms.map, left->ms.map);
  148. }
  149. static int _hist_entry__dso_snprintf(struct map *map, char *bf,
  150. size_t size, unsigned int width)
  151. {
  152. if (map && map->dso) {
  153. const char *dso_name = !verbose ? map->dso->short_name :
  154. map->dso->long_name;
  155. return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
  156. }
  157. return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
  158. }
  159. static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
  160. size_t size, unsigned int width)
  161. {
  162. return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
  163. }
  164. static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
  165. {
  166. const struct dso *dso = arg;
  167. if (type != HIST_FILTER__DSO)
  168. return -1;
  169. return dso && (!he->ms.map || he->ms.map->dso != dso);
  170. }
  171. struct sort_entry sort_dso = {
  172. .se_header = "Shared Object",
  173. .se_cmp = sort__dso_cmp,
  174. .se_snprintf = hist_entry__dso_snprintf,
  175. .se_filter = hist_entry__dso_filter,
  176. .se_width_idx = HISTC_DSO,
  177. };
  178. /* --sort symbol */
  179. static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
  180. {
  181. return (int64_t)(right_ip - left_ip);
  182. }
  183. static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
  184. {
  185. if (!sym_l || !sym_r)
  186. return cmp_null(sym_l, sym_r);
  187. if (sym_l == sym_r)
  188. return 0;
  189. if (sym_l->start != sym_r->start)
  190. return (int64_t)(sym_r->start - sym_l->start);
  191. return (int64_t)(sym_r->end - sym_l->end);
  192. }
  193. static int64_t
  194. sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
  195. {
  196. int64_t ret;
  197. if (!left->ms.sym && !right->ms.sym)
  198. return _sort__addr_cmp(left->ip, right->ip);
  199. /*
  200. * comparing symbol address alone is not enough since it's a
  201. * relative address within a dso.
  202. */
  203. if (!sort__has_dso) {
  204. ret = sort__dso_cmp(left, right);
  205. if (ret != 0)
  206. return ret;
  207. }
  208. return _sort__sym_cmp(left->ms.sym, right->ms.sym);
  209. }
  210. static int64_t
  211. sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
  212. {
  213. if (!left->ms.sym || !right->ms.sym)
  214. return cmp_null(left->ms.sym, right->ms.sym);
  215. return strcmp(right->ms.sym->name, left->ms.sym->name);
  216. }
  217. static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
  218. u64 ip, char level, char *bf, size_t size,
  219. unsigned int width)
  220. {
  221. size_t ret = 0;
  222. if (verbose) {
  223. char o = map ? dso__symtab_origin(map->dso) : '!';
  224. ret += repsep_snprintf(bf, size, "%-#*llx %c ",
  225. BITS_PER_LONG / 4 + 2, ip, o);
  226. }
  227. ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
  228. if (sym && map) {
  229. if (map->type == MAP__VARIABLE) {
  230. ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
  231. ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
  232. ip - map->unmap_ip(map, sym->start));
  233. } else {
  234. ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
  235. width - ret,
  236. sym->name);
  237. }
  238. } else {
  239. size_t len = BITS_PER_LONG / 4;
  240. ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
  241. len, ip);
  242. }
  243. return ret;
  244. }
  245. static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
  246. size_t size, unsigned int width)
  247. {
  248. return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
  249. he->level, bf, size, width);
  250. }
  251. static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
  252. {
  253. const char *sym = arg;
  254. if (type != HIST_FILTER__SYMBOL)
  255. return -1;
  256. return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
  257. }
  258. struct sort_entry sort_sym = {
  259. .se_header = "Symbol",
  260. .se_cmp = sort__sym_cmp,
  261. .se_sort = sort__sym_sort,
  262. .se_snprintf = hist_entry__sym_snprintf,
  263. .se_filter = hist_entry__sym_filter,
  264. .se_width_idx = HISTC_SYMBOL,
  265. };
  266. /* --sort srcline */
  267. static char *hist_entry__get_srcline(struct hist_entry *he)
  268. {
  269. struct map *map = he->ms.map;
  270. if (!map)
  271. return SRCLINE_UNKNOWN;
  272. return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
  273. he->ms.sym, true);
  274. }
  275. static int64_t
  276. sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
  277. {
  278. if (!left->srcline)
  279. left->srcline = hist_entry__get_srcline(left);
  280. if (!right->srcline)
  281. right->srcline = hist_entry__get_srcline(right);
  282. return strcmp(right->srcline, left->srcline);
  283. }
  284. static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
  285. size_t size, unsigned int width)
  286. {
  287. if (!he->srcline)
  288. he->srcline = hist_entry__get_srcline(he);
  289. return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
  290. }
  291. struct sort_entry sort_srcline = {
  292. .se_header = "Source:Line",
  293. .se_cmp = sort__srcline_cmp,
  294. .se_snprintf = hist_entry__srcline_snprintf,
  295. .se_width_idx = HISTC_SRCLINE,
  296. };
  297. /* --sort srcfile */
  298. static char no_srcfile[1];
  299. static char *hist_entry__get_srcfile(struct hist_entry *e)
  300. {
  301. char *sf, *p;
  302. struct map *map = e->ms.map;
  303. if (!map)
  304. return no_srcfile;
  305. sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
  306. e->ms.sym, false, true);
  307. if (!strcmp(sf, SRCLINE_UNKNOWN))
  308. return no_srcfile;
  309. p = strchr(sf, ':');
  310. if (p && *sf) {
  311. *p = 0;
  312. return sf;
  313. }
  314. free(sf);
  315. return no_srcfile;
  316. }
  317. static int64_t
  318. sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
  319. {
  320. if (!left->srcfile)
  321. left->srcfile = hist_entry__get_srcfile(left);
  322. if (!right->srcfile)
  323. right->srcfile = hist_entry__get_srcfile(right);
  324. return strcmp(right->srcfile, left->srcfile);
  325. }
  326. static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
  327. size_t size, unsigned int width)
  328. {
  329. if (!he->srcfile)
  330. he->srcfile = hist_entry__get_srcfile(he);
  331. return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
  332. }
  333. struct sort_entry sort_srcfile = {
  334. .se_header = "Source File",
  335. .se_cmp = sort__srcfile_cmp,
  336. .se_snprintf = hist_entry__srcfile_snprintf,
  337. .se_width_idx = HISTC_SRCFILE,
  338. };
  339. /* --sort parent */
  340. static int64_t
  341. sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
  342. {
  343. struct symbol *sym_l = left->parent;
  344. struct symbol *sym_r = right->parent;
  345. if (!sym_l || !sym_r)
  346. return cmp_null(sym_l, sym_r);
  347. return strcmp(sym_r->name, sym_l->name);
  348. }
  349. static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
  350. size_t size, unsigned int width)
  351. {
  352. return repsep_snprintf(bf, size, "%-*.*s", width, width,
  353. he->parent ? he->parent->name : "[other]");
  354. }
  355. struct sort_entry sort_parent = {
  356. .se_header = "Parent symbol",
  357. .se_cmp = sort__parent_cmp,
  358. .se_snprintf = hist_entry__parent_snprintf,
  359. .se_width_idx = HISTC_PARENT,
  360. };
  361. /* --sort cpu */
  362. static int64_t
  363. sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
  364. {
  365. return right->cpu - left->cpu;
  366. }
  367. static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
  368. size_t size, unsigned int width)
  369. {
  370. return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
  371. }
  372. struct sort_entry sort_cpu = {
  373. .se_header = "CPU",
  374. .se_cmp = sort__cpu_cmp,
  375. .se_snprintf = hist_entry__cpu_snprintf,
  376. .se_width_idx = HISTC_CPU,
  377. };
  378. /* --sort socket */
  379. static int64_t
  380. sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
  381. {
  382. return right->socket - left->socket;
  383. }
  384. static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
  385. size_t size, unsigned int width)
  386. {
  387. return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
  388. }
  389. static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
  390. {
  391. int sk = *(const int *)arg;
  392. if (type != HIST_FILTER__SOCKET)
  393. return -1;
  394. return sk >= 0 && he->socket != sk;
  395. }
  396. struct sort_entry sort_socket = {
  397. .se_header = "Socket",
  398. .se_cmp = sort__socket_cmp,
  399. .se_snprintf = hist_entry__socket_snprintf,
  400. .se_filter = hist_entry__socket_filter,
  401. .se_width_idx = HISTC_SOCKET,
  402. };
  403. /* --sort trace */
  404. static char *get_trace_output(struct hist_entry *he)
  405. {
  406. struct trace_seq seq;
  407. struct perf_evsel *evsel;
  408. struct pevent_record rec = {
  409. .data = he->raw_data,
  410. .size = he->raw_size,
  411. };
  412. evsel = hists_to_evsel(he->hists);
  413. trace_seq_init(&seq);
  414. if (symbol_conf.raw_trace) {
  415. pevent_print_fields(&seq, he->raw_data, he->raw_size,
  416. evsel->tp_format);
  417. } else {
  418. pevent_event_info(&seq, evsel->tp_format, &rec);
  419. }
  420. return seq.buffer;
  421. }
  422. static int64_t
  423. sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
  424. {
  425. struct perf_evsel *evsel;
  426. evsel = hists_to_evsel(left->hists);
  427. if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
  428. return 0;
  429. if (left->trace_output == NULL)
  430. left->trace_output = get_trace_output(left);
  431. if (right->trace_output == NULL)
  432. right->trace_output = get_trace_output(right);
  433. return strcmp(right->trace_output, left->trace_output);
  434. }
  435. static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
  436. size_t size, unsigned int width)
  437. {
  438. struct perf_evsel *evsel;
  439. evsel = hists_to_evsel(he->hists);
  440. if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
  441. return scnprintf(bf, size, "%-.*s", width, "N/A");
  442. if (he->trace_output == NULL)
  443. he->trace_output = get_trace_output(he);
  444. return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
  445. }
  446. struct sort_entry sort_trace = {
  447. .se_header = "Trace output",
  448. .se_cmp = sort__trace_cmp,
  449. .se_snprintf = hist_entry__trace_snprintf,
  450. .se_width_idx = HISTC_TRACE,
  451. };
  452. /* sort keys for branch stacks */
  453. static int64_t
  454. sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
  455. {
  456. if (!left->branch_info || !right->branch_info)
  457. return cmp_null(left->branch_info, right->branch_info);
  458. return _sort__dso_cmp(left->branch_info->from.map,
  459. right->branch_info->from.map);
  460. }
  461. static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
  462. size_t size, unsigned int width)
  463. {
  464. if (he->branch_info)
  465. return _hist_entry__dso_snprintf(he->branch_info->from.map,
  466. bf, size, width);
  467. else
  468. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  469. }
  470. static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
  471. const void *arg)
  472. {
  473. const struct dso *dso = arg;
  474. if (type != HIST_FILTER__DSO)
  475. return -1;
  476. return dso && (!he->branch_info || !he->branch_info->from.map ||
  477. he->branch_info->from.map->dso != dso);
  478. }
  479. static int64_t
  480. sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
  481. {
  482. if (!left->branch_info || !right->branch_info)
  483. return cmp_null(left->branch_info, right->branch_info);
  484. return _sort__dso_cmp(left->branch_info->to.map,
  485. right->branch_info->to.map);
  486. }
  487. static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
  488. size_t size, unsigned int width)
  489. {
  490. if (he->branch_info)
  491. return _hist_entry__dso_snprintf(he->branch_info->to.map,
  492. bf, size, width);
  493. else
  494. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  495. }
  496. static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
  497. const void *arg)
  498. {
  499. const struct dso *dso = arg;
  500. if (type != HIST_FILTER__DSO)
  501. return -1;
  502. return dso && (!he->branch_info || !he->branch_info->to.map ||
  503. he->branch_info->to.map->dso != dso);
  504. }
  505. static int64_t
  506. sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
  507. {
  508. struct addr_map_symbol *from_l = &left->branch_info->from;
  509. struct addr_map_symbol *from_r = &right->branch_info->from;
  510. if (!left->branch_info || !right->branch_info)
  511. return cmp_null(left->branch_info, right->branch_info);
  512. from_l = &left->branch_info->from;
  513. from_r = &right->branch_info->from;
  514. if (!from_l->sym && !from_r->sym)
  515. return _sort__addr_cmp(from_l->addr, from_r->addr);
  516. return _sort__sym_cmp(from_l->sym, from_r->sym);
  517. }
  518. static int64_t
  519. sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
  520. {
  521. struct addr_map_symbol *to_l, *to_r;
  522. if (!left->branch_info || !right->branch_info)
  523. return cmp_null(left->branch_info, right->branch_info);
  524. to_l = &left->branch_info->to;
  525. to_r = &right->branch_info->to;
  526. if (!to_l->sym && !to_r->sym)
  527. return _sort__addr_cmp(to_l->addr, to_r->addr);
  528. return _sort__sym_cmp(to_l->sym, to_r->sym);
  529. }
  530. static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
  531. size_t size, unsigned int width)
  532. {
  533. if (he->branch_info) {
  534. struct addr_map_symbol *from = &he->branch_info->from;
  535. return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
  536. he->level, bf, size, width);
  537. }
  538. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  539. }
  540. static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
  541. size_t size, unsigned int width)
  542. {
  543. if (he->branch_info) {
  544. struct addr_map_symbol *to = &he->branch_info->to;
  545. return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
  546. he->level, bf, size, width);
  547. }
  548. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  549. }
  550. static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
  551. const void *arg)
  552. {
  553. const char *sym = arg;
  554. if (type != HIST_FILTER__SYMBOL)
  555. return -1;
  556. return sym && !(he->branch_info && he->branch_info->from.sym &&
  557. strstr(he->branch_info->from.sym->name, sym));
  558. }
  559. static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
  560. const void *arg)
  561. {
  562. const char *sym = arg;
  563. if (type != HIST_FILTER__SYMBOL)
  564. return -1;
  565. return sym && !(he->branch_info && he->branch_info->to.sym &&
  566. strstr(he->branch_info->to.sym->name, sym));
  567. }
  568. struct sort_entry sort_dso_from = {
  569. .se_header = "Source Shared Object",
  570. .se_cmp = sort__dso_from_cmp,
  571. .se_snprintf = hist_entry__dso_from_snprintf,
  572. .se_filter = hist_entry__dso_from_filter,
  573. .se_width_idx = HISTC_DSO_FROM,
  574. };
  575. struct sort_entry sort_dso_to = {
  576. .se_header = "Target Shared Object",
  577. .se_cmp = sort__dso_to_cmp,
  578. .se_snprintf = hist_entry__dso_to_snprintf,
  579. .se_filter = hist_entry__dso_to_filter,
  580. .se_width_idx = HISTC_DSO_TO,
  581. };
  582. struct sort_entry sort_sym_from = {
  583. .se_header = "Source Symbol",
  584. .se_cmp = sort__sym_from_cmp,
  585. .se_snprintf = hist_entry__sym_from_snprintf,
  586. .se_filter = hist_entry__sym_from_filter,
  587. .se_width_idx = HISTC_SYMBOL_FROM,
  588. };
  589. struct sort_entry sort_sym_to = {
  590. .se_header = "Target Symbol",
  591. .se_cmp = sort__sym_to_cmp,
  592. .se_snprintf = hist_entry__sym_to_snprintf,
  593. .se_filter = hist_entry__sym_to_filter,
  594. .se_width_idx = HISTC_SYMBOL_TO,
  595. };
  596. static int64_t
  597. sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
  598. {
  599. unsigned char mp, p;
  600. if (!left->branch_info || !right->branch_info)
  601. return cmp_null(left->branch_info, right->branch_info);
  602. mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
  603. p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
  604. return mp || p;
  605. }
  606. static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
  607. size_t size, unsigned int width){
  608. static const char *out = "N/A";
  609. if (he->branch_info) {
  610. if (he->branch_info->flags.predicted)
  611. out = "N";
  612. else if (he->branch_info->flags.mispred)
  613. out = "Y";
  614. }
  615. return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
  616. }
  617. static int64_t
  618. sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
  619. {
  620. return left->branch_info->flags.cycles -
  621. right->branch_info->flags.cycles;
  622. }
  623. static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
  624. size_t size, unsigned int width)
  625. {
  626. if (he->branch_info->flags.cycles == 0)
  627. return repsep_snprintf(bf, size, "%-*s", width, "-");
  628. return repsep_snprintf(bf, size, "%-*hd", width,
  629. he->branch_info->flags.cycles);
  630. }
  631. struct sort_entry sort_cycles = {
  632. .se_header = "Basic Block Cycles",
  633. .se_cmp = sort__cycles_cmp,
  634. .se_snprintf = hist_entry__cycles_snprintf,
  635. .se_width_idx = HISTC_CYCLES,
  636. };
  637. /* --sort daddr_sym */
  638. static int64_t
  639. sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
  640. {
  641. uint64_t l = 0, r = 0;
  642. if (left->mem_info)
  643. l = left->mem_info->daddr.addr;
  644. if (right->mem_info)
  645. r = right->mem_info->daddr.addr;
  646. return (int64_t)(r - l);
  647. }
  648. static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
  649. size_t size, unsigned int width)
  650. {
  651. uint64_t addr = 0;
  652. struct map *map = NULL;
  653. struct symbol *sym = NULL;
  654. if (he->mem_info) {
  655. addr = he->mem_info->daddr.addr;
  656. map = he->mem_info->daddr.map;
  657. sym = he->mem_info->daddr.sym;
  658. }
  659. return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
  660. width);
  661. }
  662. static int64_t
  663. sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
  664. {
  665. uint64_t l = 0, r = 0;
  666. if (left->mem_info)
  667. l = left->mem_info->iaddr.addr;
  668. if (right->mem_info)
  669. r = right->mem_info->iaddr.addr;
  670. return (int64_t)(r - l);
  671. }
  672. static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
  673. size_t size, unsigned int width)
  674. {
  675. uint64_t addr = 0;
  676. struct map *map = NULL;
  677. struct symbol *sym = NULL;
  678. if (he->mem_info) {
  679. addr = he->mem_info->iaddr.addr;
  680. map = he->mem_info->iaddr.map;
  681. sym = he->mem_info->iaddr.sym;
  682. }
  683. return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
  684. width);
  685. }
  686. static int64_t
  687. sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
  688. {
  689. struct map *map_l = NULL;
  690. struct map *map_r = NULL;
  691. if (left->mem_info)
  692. map_l = left->mem_info->daddr.map;
  693. if (right->mem_info)
  694. map_r = right->mem_info->daddr.map;
  695. return _sort__dso_cmp(map_l, map_r);
  696. }
  697. static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
  698. size_t size, unsigned int width)
  699. {
  700. struct map *map = NULL;
  701. if (he->mem_info)
  702. map = he->mem_info->daddr.map;
  703. return _hist_entry__dso_snprintf(map, bf, size, width);
  704. }
  705. static int64_t
  706. sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
  707. {
  708. union perf_mem_data_src data_src_l;
  709. union perf_mem_data_src data_src_r;
  710. if (left->mem_info)
  711. data_src_l = left->mem_info->data_src;
  712. else
  713. data_src_l.mem_lock = PERF_MEM_LOCK_NA;
  714. if (right->mem_info)
  715. data_src_r = right->mem_info->data_src;
  716. else
  717. data_src_r.mem_lock = PERF_MEM_LOCK_NA;
  718. return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
  719. }
  720. static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
  721. size_t size, unsigned int width)
  722. {
  723. char out[10];
  724. perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
  725. return repsep_snprintf(bf, size, "%.*s", width, out);
  726. }
  727. static int64_t
  728. sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
  729. {
  730. union perf_mem_data_src data_src_l;
  731. union perf_mem_data_src data_src_r;
  732. if (left->mem_info)
  733. data_src_l = left->mem_info->data_src;
  734. else
  735. data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
  736. if (right->mem_info)
  737. data_src_r = right->mem_info->data_src;
  738. else
  739. data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
  740. return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
  741. }
  742. static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
  743. size_t size, unsigned int width)
  744. {
  745. char out[64];
  746. perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
  747. return repsep_snprintf(bf, size, "%-*s", width, out);
  748. }
  749. static int64_t
  750. sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
  751. {
  752. union perf_mem_data_src data_src_l;
  753. union perf_mem_data_src data_src_r;
  754. if (left->mem_info)
  755. data_src_l = left->mem_info->data_src;
  756. else
  757. data_src_l.mem_lvl = PERF_MEM_LVL_NA;
  758. if (right->mem_info)
  759. data_src_r = right->mem_info->data_src;
  760. else
  761. data_src_r.mem_lvl = PERF_MEM_LVL_NA;
  762. return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
  763. }
  764. static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
  765. size_t size, unsigned int width)
  766. {
  767. char out[64];
  768. perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
  769. return repsep_snprintf(bf, size, "%-*s", width, out);
  770. }
  771. static int64_t
  772. sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
  773. {
  774. union perf_mem_data_src data_src_l;
  775. union perf_mem_data_src data_src_r;
  776. if (left->mem_info)
  777. data_src_l = left->mem_info->data_src;
  778. else
  779. data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
  780. if (right->mem_info)
  781. data_src_r = right->mem_info->data_src;
  782. else
  783. data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
  784. return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
  785. }
  786. static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
  787. size_t size, unsigned int width)
  788. {
  789. char out[64];
  790. perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
  791. return repsep_snprintf(bf, size, "%-*s", width, out);
  792. }
  793. static int64_t
  794. sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
  795. {
  796. u64 l, r;
  797. struct map *l_map, *r_map;
  798. if (!left->mem_info) return -1;
  799. if (!right->mem_info) return 1;
  800. /* group event types together */
  801. if (left->cpumode > right->cpumode) return -1;
  802. if (left->cpumode < right->cpumode) return 1;
  803. l_map = left->mem_info->daddr.map;
  804. r_map = right->mem_info->daddr.map;
  805. /* if both are NULL, jump to sort on al_addr instead */
  806. if (!l_map && !r_map)
  807. goto addr;
  808. if (!l_map) return -1;
  809. if (!r_map) return 1;
  810. if (l_map->maj > r_map->maj) return -1;
  811. if (l_map->maj < r_map->maj) return 1;
  812. if (l_map->min > r_map->min) return -1;
  813. if (l_map->min < r_map->min) return 1;
  814. if (l_map->ino > r_map->ino) return -1;
  815. if (l_map->ino < r_map->ino) return 1;
  816. if (l_map->ino_generation > r_map->ino_generation) return -1;
  817. if (l_map->ino_generation < r_map->ino_generation) return 1;
  818. /*
  819. * Addresses with no major/minor numbers are assumed to be
  820. * anonymous in userspace. Sort those on pid then address.
  821. *
  822. * The kernel and non-zero major/minor mapped areas are
  823. * assumed to be unity mapped. Sort those on address.
  824. */
  825. if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
  826. (!(l_map->flags & MAP_SHARED)) &&
  827. !l_map->maj && !l_map->min && !l_map->ino &&
  828. !l_map->ino_generation) {
  829. /* userspace anonymous */
  830. if (left->thread->pid_ > right->thread->pid_) return -1;
  831. if (left->thread->pid_ < right->thread->pid_) return 1;
  832. }
  833. addr:
  834. /* al_addr does all the right addr - start + offset calculations */
  835. l = cl_address(left->mem_info->daddr.al_addr);
  836. r = cl_address(right->mem_info->daddr.al_addr);
  837. if (l > r) return -1;
  838. if (l < r) return 1;
  839. return 0;
  840. }
  841. static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
  842. size_t size, unsigned int width)
  843. {
  844. uint64_t addr = 0;
  845. struct map *map = NULL;
  846. struct symbol *sym = NULL;
  847. char level = he->level;
  848. if (he->mem_info) {
  849. addr = cl_address(he->mem_info->daddr.al_addr);
  850. map = he->mem_info->daddr.map;
  851. sym = he->mem_info->daddr.sym;
  852. /* print [s] for shared data mmaps */
  853. if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
  854. map && (map->type == MAP__VARIABLE) &&
  855. (map->flags & MAP_SHARED) &&
  856. (map->maj || map->min || map->ino ||
  857. map->ino_generation))
  858. level = 's';
  859. else if (!map)
  860. level = 'X';
  861. }
  862. return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
  863. width);
  864. }
  865. struct sort_entry sort_mispredict = {
  866. .se_header = "Branch Mispredicted",
  867. .se_cmp = sort__mispredict_cmp,
  868. .se_snprintf = hist_entry__mispredict_snprintf,
  869. .se_width_idx = HISTC_MISPREDICT,
  870. };
  871. static u64 he_weight(struct hist_entry *he)
  872. {
  873. return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
  874. }
  875. static int64_t
  876. sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
  877. {
  878. return he_weight(left) - he_weight(right);
  879. }
  880. static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
  881. size_t size, unsigned int width)
  882. {
  883. return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
  884. }
  885. struct sort_entry sort_local_weight = {
  886. .se_header = "Local Weight",
  887. .se_cmp = sort__local_weight_cmp,
  888. .se_snprintf = hist_entry__local_weight_snprintf,
  889. .se_width_idx = HISTC_LOCAL_WEIGHT,
  890. };
  891. static int64_t
  892. sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
  893. {
  894. return left->stat.weight - right->stat.weight;
  895. }
  896. static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
  897. size_t size, unsigned int width)
  898. {
  899. return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
  900. }
  901. struct sort_entry sort_global_weight = {
  902. .se_header = "Weight",
  903. .se_cmp = sort__global_weight_cmp,
  904. .se_snprintf = hist_entry__global_weight_snprintf,
  905. .se_width_idx = HISTC_GLOBAL_WEIGHT,
  906. };
  907. struct sort_entry sort_mem_daddr_sym = {
  908. .se_header = "Data Symbol",
  909. .se_cmp = sort__daddr_cmp,
  910. .se_snprintf = hist_entry__daddr_snprintf,
  911. .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
  912. };
  913. struct sort_entry sort_mem_iaddr_sym = {
  914. .se_header = "Code Symbol",
  915. .se_cmp = sort__iaddr_cmp,
  916. .se_snprintf = hist_entry__iaddr_snprintf,
  917. .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
  918. };
  919. struct sort_entry sort_mem_daddr_dso = {
  920. .se_header = "Data Object",
  921. .se_cmp = sort__dso_daddr_cmp,
  922. .se_snprintf = hist_entry__dso_daddr_snprintf,
  923. .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
  924. };
  925. struct sort_entry sort_mem_locked = {
  926. .se_header = "Locked",
  927. .se_cmp = sort__locked_cmp,
  928. .se_snprintf = hist_entry__locked_snprintf,
  929. .se_width_idx = HISTC_MEM_LOCKED,
  930. };
  931. struct sort_entry sort_mem_tlb = {
  932. .se_header = "TLB access",
  933. .se_cmp = sort__tlb_cmp,
  934. .se_snprintf = hist_entry__tlb_snprintf,
  935. .se_width_idx = HISTC_MEM_TLB,
  936. };
  937. struct sort_entry sort_mem_lvl = {
  938. .se_header = "Memory access",
  939. .se_cmp = sort__lvl_cmp,
  940. .se_snprintf = hist_entry__lvl_snprintf,
  941. .se_width_idx = HISTC_MEM_LVL,
  942. };
  943. struct sort_entry sort_mem_snoop = {
  944. .se_header = "Snoop",
  945. .se_cmp = sort__snoop_cmp,
  946. .se_snprintf = hist_entry__snoop_snprintf,
  947. .se_width_idx = HISTC_MEM_SNOOP,
  948. };
  949. struct sort_entry sort_mem_dcacheline = {
  950. .se_header = "Data Cacheline",
  951. .se_cmp = sort__dcacheline_cmp,
  952. .se_snprintf = hist_entry__dcacheline_snprintf,
  953. .se_width_idx = HISTC_MEM_DCACHELINE,
  954. };
  955. static int64_t
  956. sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
  957. {
  958. if (!left->branch_info || !right->branch_info)
  959. return cmp_null(left->branch_info, right->branch_info);
  960. return left->branch_info->flags.abort !=
  961. right->branch_info->flags.abort;
  962. }
  963. static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
  964. size_t size, unsigned int width)
  965. {
  966. static const char *out = "N/A";
  967. if (he->branch_info) {
  968. if (he->branch_info->flags.abort)
  969. out = "A";
  970. else
  971. out = ".";
  972. }
  973. return repsep_snprintf(bf, size, "%-*s", width, out);
  974. }
  975. struct sort_entry sort_abort = {
  976. .se_header = "Transaction abort",
  977. .se_cmp = sort__abort_cmp,
  978. .se_snprintf = hist_entry__abort_snprintf,
  979. .se_width_idx = HISTC_ABORT,
  980. };
  981. static int64_t
  982. sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
  983. {
  984. if (!left->branch_info || !right->branch_info)
  985. return cmp_null(left->branch_info, right->branch_info);
  986. return left->branch_info->flags.in_tx !=
  987. right->branch_info->flags.in_tx;
  988. }
  989. static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
  990. size_t size, unsigned int width)
  991. {
  992. static const char *out = "N/A";
  993. if (he->branch_info) {
  994. if (he->branch_info->flags.in_tx)
  995. out = "T";
  996. else
  997. out = ".";
  998. }
  999. return repsep_snprintf(bf, size, "%-*s", width, out);
  1000. }
  1001. struct sort_entry sort_in_tx = {
  1002. .se_header = "Branch in transaction",
  1003. .se_cmp = sort__in_tx_cmp,
  1004. .se_snprintf = hist_entry__in_tx_snprintf,
  1005. .se_width_idx = HISTC_IN_TX,
  1006. };
  1007. static int64_t
  1008. sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
  1009. {
  1010. return left->transaction - right->transaction;
  1011. }
  1012. static inline char *add_str(char *p, const char *str)
  1013. {
  1014. strcpy(p, str);
  1015. return p + strlen(str);
  1016. }
  1017. static struct txbit {
  1018. unsigned flag;
  1019. const char *name;
  1020. int skip_for_len;
  1021. } txbits[] = {
  1022. { PERF_TXN_ELISION, "EL ", 0 },
  1023. { PERF_TXN_TRANSACTION, "TX ", 1 },
  1024. { PERF_TXN_SYNC, "SYNC ", 1 },
  1025. { PERF_TXN_ASYNC, "ASYNC ", 0 },
  1026. { PERF_TXN_RETRY, "RETRY ", 0 },
  1027. { PERF_TXN_CONFLICT, "CON ", 0 },
  1028. { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
  1029. { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
  1030. { 0, NULL, 0 }
  1031. };
  1032. int hist_entry__transaction_len(void)
  1033. {
  1034. int i;
  1035. int len = 0;
  1036. for (i = 0; txbits[i].name; i++) {
  1037. if (!txbits[i].skip_for_len)
  1038. len += strlen(txbits[i].name);
  1039. }
  1040. len += 4; /* :XX<space> */
  1041. return len;
  1042. }
  1043. static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
  1044. size_t size, unsigned int width)
  1045. {
  1046. u64 t = he->transaction;
  1047. char buf[128];
  1048. char *p = buf;
  1049. int i;
  1050. buf[0] = 0;
  1051. for (i = 0; txbits[i].name; i++)
  1052. if (txbits[i].flag & t)
  1053. p = add_str(p, txbits[i].name);
  1054. if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
  1055. p = add_str(p, "NEITHER ");
  1056. if (t & PERF_TXN_ABORT_MASK) {
  1057. sprintf(p, ":%" PRIx64,
  1058. (t & PERF_TXN_ABORT_MASK) >>
  1059. PERF_TXN_ABORT_SHIFT);
  1060. p += strlen(p);
  1061. }
  1062. return repsep_snprintf(bf, size, "%-*s", width, buf);
  1063. }
  1064. struct sort_entry sort_transaction = {
  1065. .se_header = "Transaction ",
  1066. .se_cmp = sort__transaction_cmp,
  1067. .se_snprintf = hist_entry__transaction_snprintf,
  1068. .se_width_idx = HISTC_TRANSACTION,
  1069. };
  1070. struct sort_dimension {
  1071. const char *name;
  1072. struct sort_entry *entry;
  1073. int taken;
  1074. };
  1075. #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
  1076. static struct sort_dimension common_sort_dimensions[] = {
  1077. DIM(SORT_PID, "pid", sort_thread),
  1078. DIM(SORT_COMM, "comm", sort_comm),
  1079. DIM(SORT_DSO, "dso", sort_dso),
  1080. DIM(SORT_SYM, "symbol", sort_sym),
  1081. DIM(SORT_PARENT, "parent", sort_parent),
  1082. DIM(SORT_CPU, "cpu", sort_cpu),
  1083. DIM(SORT_SOCKET, "socket", sort_socket),
  1084. DIM(SORT_SRCLINE, "srcline", sort_srcline),
  1085. DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
  1086. DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
  1087. DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
  1088. DIM(SORT_TRANSACTION, "transaction", sort_transaction),
  1089. DIM(SORT_TRACE, "trace", sort_trace),
  1090. };
  1091. #undef DIM
  1092. #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
  1093. static struct sort_dimension bstack_sort_dimensions[] = {
  1094. DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
  1095. DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
  1096. DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
  1097. DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
  1098. DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
  1099. DIM(SORT_IN_TX, "in_tx", sort_in_tx),
  1100. DIM(SORT_ABORT, "abort", sort_abort),
  1101. DIM(SORT_CYCLES, "cycles", sort_cycles),
  1102. };
  1103. #undef DIM
  1104. #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
  1105. static struct sort_dimension memory_sort_dimensions[] = {
  1106. DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
  1107. DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
  1108. DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
  1109. DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
  1110. DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
  1111. DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
  1112. DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
  1113. DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
  1114. };
  1115. #undef DIM
  1116. struct hpp_dimension {
  1117. const char *name;
  1118. struct perf_hpp_fmt *fmt;
  1119. int taken;
  1120. };
  1121. #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
  1122. static struct hpp_dimension hpp_sort_dimensions[] = {
  1123. DIM(PERF_HPP__OVERHEAD, "overhead"),
  1124. DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
  1125. DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
  1126. DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
  1127. DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
  1128. DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
  1129. DIM(PERF_HPP__SAMPLES, "sample"),
  1130. DIM(PERF_HPP__PERIOD, "period"),
  1131. };
  1132. #undef DIM
  1133. struct hpp_sort_entry {
  1134. struct perf_hpp_fmt hpp;
  1135. struct sort_entry *se;
  1136. };
  1137. void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
  1138. {
  1139. struct hpp_sort_entry *hse;
  1140. if (!perf_hpp__is_sort_entry(fmt))
  1141. return;
  1142. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1143. hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
  1144. }
  1145. static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1146. struct perf_evsel *evsel)
  1147. {
  1148. struct hpp_sort_entry *hse;
  1149. size_t len = fmt->user_len;
  1150. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1151. if (!len)
  1152. len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
  1153. return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
  1154. }
  1155. static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
  1156. struct perf_hpp *hpp __maybe_unused,
  1157. struct perf_evsel *evsel)
  1158. {
  1159. struct hpp_sort_entry *hse;
  1160. size_t len = fmt->user_len;
  1161. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1162. if (!len)
  1163. len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
  1164. return len;
  1165. }
  1166. static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1167. struct hist_entry *he)
  1168. {
  1169. struct hpp_sort_entry *hse;
  1170. size_t len = fmt->user_len;
  1171. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1172. if (!len)
  1173. len = hists__col_len(he->hists, hse->se->se_width_idx);
  1174. return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
  1175. }
  1176. static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
  1177. struct hist_entry *a, struct hist_entry *b)
  1178. {
  1179. struct hpp_sort_entry *hse;
  1180. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1181. return hse->se->se_cmp(a, b);
  1182. }
  1183. static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
  1184. struct hist_entry *a, struct hist_entry *b)
  1185. {
  1186. struct hpp_sort_entry *hse;
  1187. int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
  1188. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1189. collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
  1190. return collapse_fn(a, b);
  1191. }
  1192. static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
  1193. struct hist_entry *a, struct hist_entry *b)
  1194. {
  1195. struct hpp_sort_entry *hse;
  1196. int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
  1197. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1198. sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
  1199. return sort_fn(a, b);
  1200. }
  1201. bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
  1202. {
  1203. return format->header == __sort__hpp_header;
  1204. }
  1205. #define MK_SORT_ENTRY_CHK(key) \
  1206. bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
  1207. { \
  1208. struct hpp_sort_entry *hse; \
  1209. \
  1210. if (!perf_hpp__is_sort_entry(fmt)) \
  1211. return false; \
  1212. \
  1213. hse = container_of(fmt, struct hpp_sort_entry, hpp); \
  1214. return hse->se == &sort_ ## key ; \
  1215. }
  1216. MK_SORT_ENTRY_CHK(trace)
  1217. MK_SORT_ENTRY_CHK(srcline)
  1218. MK_SORT_ENTRY_CHK(srcfile)
  1219. MK_SORT_ENTRY_CHK(thread)
  1220. MK_SORT_ENTRY_CHK(comm)
  1221. MK_SORT_ENTRY_CHK(dso)
  1222. MK_SORT_ENTRY_CHK(sym)
  1223. static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  1224. {
  1225. struct hpp_sort_entry *hse_a;
  1226. struct hpp_sort_entry *hse_b;
  1227. if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
  1228. return false;
  1229. hse_a = container_of(a, struct hpp_sort_entry, hpp);
  1230. hse_b = container_of(b, struct hpp_sort_entry, hpp);
  1231. return hse_a->se == hse_b->se;
  1232. }
  1233. static void hse_free(struct perf_hpp_fmt *fmt)
  1234. {
  1235. struct hpp_sort_entry *hse;
  1236. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1237. free(hse);
  1238. }
  1239. static struct hpp_sort_entry *
  1240. __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
  1241. {
  1242. struct hpp_sort_entry *hse;
  1243. hse = malloc(sizeof(*hse));
  1244. if (hse == NULL) {
  1245. pr_err("Memory allocation failed\n");
  1246. return NULL;
  1247. }
  1248. hse->se = sd->entry;
  1249. hse->hpp.name = sd->entry->se_header;
  1250. hse->hpp.header = __sort__hpp_header;
  1251. hse->hpp.width = __sort__hpp_width;
  1252. hse->hpp.entry = __sort__hpp_entry;
  1253. hse->hpp.color = NULL;
  1254. hse->hpp.cmp = __sort__hpp_cmp;
  1255. hse->hpp.collapse = __sort__hpp_collapse;
  1256. hse->hpp.sort = __sort__hpp_sort;
  1257. hse->hpp.equal = __sort__hpp_equal;
  1258. hse->hpp.free = hse_free;
  1259. INIT_LIST_HEAD(&hse->hpp.list);
  1260. INIT_LIST_HEAD(&hse->hpp.sort_list);
  1261. hse->hpp.elide = false;
  1262. hse->hpp.len = 0;
  1263. hse->hpp.user_len = 0;
  1264. hse->hpp.level = level;
  1265. return hse;
  1266. }
  1267. static void hpp_free(struct perf_hpp_fmt *fmt)
  1268. {
  1269. free(fmt);
  1270. }
  1271. static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
  1272. int level)
  1273. {
  1274. struct perf_hpp_fmt *fmt;
  1275. fmt = memdup(hd->fmt, sizeof(*fmt));
  1276. if (fmt) {
  1277. INIT_LIST_HEAD(&fmt->list);
  1278. INIT_LIST_HEAD(&fmt->sort_list);
  1279. fmt->free = hpp_free;
  1280. fmt->level = level;
  1281. }
  1282. return fmt;
  1283. }
  1284. int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
  1285. {
  1286. struct perf_hpp_fmt *fmt;
  1287. struct hpp_sort_entry *hse;
  1288. int ret = -1;
  1289. int r;
  1290. perf_hpp_list__for_each_format(he->hpp_list, fmt) {
  1291. if (!perf_hpp__is_sort_entry(fmt))
  1292. continue;
  1293. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1294. if (hse->se->se_filter == NULL)
  1295. continue;
  1296. /*
  1297. * hist entry is filtered if any of sort key in the hpp list
  1298. * is applied. But it should skip non-matched filter types.
  1299. */
  1300. r = hse->se->se_filter(he, type, arg);
  1301. if (r >= 0) {
  1302. if (ret < 0)
  1303. ret = 0;
  1304. ret |= r;
  1305. }
  1306. }
  1307. return ret;
  1308. }
  1309. static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
  1310. struct perf_hpp_list *list,
  1311. int level)
  1312. {
  1313. struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
  1314. if (hse == NULL)
  1315. return -1;
  1316. perf_hpp_list__register_sort_field(list, &hse->hpp);
  1317. return 0;
  1318. }
  1319. static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
  1320. struct perf_hpp_list *list)
  1321. {
  1322. struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
  1323. if (hse == NULL)
  1324. return -1;
  1325. perf_hpp_list__column_register(list, &hse->hpp);
  1326. return 0;
  1327. }
  1328. struct hpp_dynamic_entry {
  1329. struct perf_hpp_fmt hpp;
  1330. struct perf_evsel *evsel;
  1331. struct format_field *field;
  1332. unsigned dynamic_len;
  1333. bool raw_trace;
  1334. };
  1335. static int hde_width(struct hpp_dynamic_entry *hde)
  1336. {
  1337. if (!hde->hpp.len) {
  1338. int len = hde->dynamic_len;
  1339. int namelen = strlen(hde->field->name);
  1340. int fieldlen = hde->field->size;
  1341. if (namelen > len)
  1342. len = namelen;
  1343. if (!(hde->field->flags & FIELD_IS_STRING)) {
  1344. /* length for print hex numbers */
  1345. fieldlen = hde->field->size * 2 + 2;
  1346. }
  1347. if (fieldlen > len)
  1348. len = fieldlen;
  1349. hde->hpp.len = len;
  1350. }
  1351. return hde->hpp.len;
  1352. }
  1353. static void update_dynamic_len(struct hpp_dynamic_entry *hde,
  1354. struct hist_entry *he)
  1355. {
  1356. char *str, *pos;
  1357. struct format_field *field = hde->field;
  1358. size_t namelen;
  1359. bool last = false;
  1360. if (hde->raw_trace)
  1361. return;
  1362. /* parse pretty print result and update max length */
  1363. if (!he->trace_output)
  1364. he->trace_output = get_trace_output(he);
  1365. namelen = strlen(field->name);
  1366. str = he->trace_output;
  1367. while (str) {
  1368. pos = strchr(str, ' ');
  1369. if (pos == NULL) {
  1370. last = true;
  1371. pos = str + strlen(str);
  1372. }
  1373. if (!strncmp(str, field->name, namelen)) {
  1374. size_t len;
  1375. str += namelen + 1;
  1376. len = pos - str;
  1377. if (len > hde->dynamic_len)
  1378. hde->dynamic_len = len;
  1379. break;
  1380. }
  1381. if (last)
  1382. str = NULL;
  1383. else
  1384. str = pos + 1;
  1385. }
  1386. }
  1387. static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1388. struct perf_evsel *evsel __maybe_unused)
  1389. {
  1390. struct hpp_dynamic_entry *hde;
  1391. size_t len = fmt->user_len;
  1392. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1393. if (!len)
  1394. len = hde_width(hde);
  1395. return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
  1396. }
  1397. static int __sort__hde_width(struct perf_hpp_fmt *fmt,
  1398. struct perf_hpp *hpp __maybe_unused,
  1399. struct perf_evsel *evsel __maybe_unused)
  1400. {
  1401. struct hpp_dynamic_entry *hde;
  1402. size_t len = fmt->user_len;
  1403. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1404. if (!len)
  1405. len = hde_width(hde);
  1406. return len;
  1407. }
  1408. bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
  1409. {
  1410. struct hpp_dynamic_entry *hde;
  1411. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1412. return hists_to_evsel(hists) == hde->evsel;
  1413. }
  1414. static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1415. struct hist_entry *he)
  1416. {
  1417. struct hpp_dynamic_entry *hde;
  1418. size_t len = fmt->user_len;
  1419. char *str, *pos;
  1420. struct format_field *field;
  1421. size_t namelen;
  1422. bool last = false;
  1423. int ret;
  1424. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1425. if (!len)
  1426. len = hde_width(hde);
  1427. if (hde->raw_trace)
  1428. goto raw_field;
  1429. if (!he->trace_output)
  1430. he->trace_output = get_trace_output(he);
  1431. field = hde->field;
  1432. namelen = strlen(field->name);
  1433. str = he->trace_output;
  1434. while (str) {
  1435. pos = strchr(str, ' ');
  1436. if (pos == NULL) {
  1437. last = true;
  1438. pos = str + strlen(str);
  1439. }
  1440. if (!strncmp(str, field->name, namelen)) {
  1441. str += namelen + 1;
  1442. str = strndup(str, pos - str);
  1443. if (str == NULL)
  1444. return scnprintf(hpp->buf, hpp->size,
  1445. "%*.*s", len, len, "ERROR");
  1446. break;
  1447. }
  1448. if (last)
  1449. str = NULL;
  1450. else
  1451. str = pos + 1;
  1452. }
  1453. if (str == NULL) {
  1454. struct trace_seq seq;
  1455. raw_field:
  1456. trace_seq_init(&seq);
  1457. pevent_print_field(&seq, he->raw_data, hde->field);
  1458. str = seq.buffer;
  1459. }
  1460. ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
  1461. free(str);
  1462. return ret;
  1463. }
  1464. static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
  1465. struct hist_entry *a, struct hist_entry *b)
  1466. {
  1467. struct hpp_dynamic_entry *hde;
  1468. struct format_field *field;
  1469. unsigned offset, size;
  1470. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1471. if (b == NULL) {
  1472. update_dynamic_len(hde, a);
  1473. return 0;
  1474. }
  1475. field = hde->field;
  1476. if (field->flags & FIELD_IS_DYNAMIC) {
  1477. unsigned long long dyn;
  1478. pevent_read_number_field(field, a->raw_data, &dyn);
  1479. offset = dyn & 0xffff;
  1480. size = (dyn >> 16) & 0xffff;
  1481. /* record max width for output */
  1482. if (size > hde->dynamic_len)
  1483. hde->dynamic_len = size;
  1484. } else {
  1485. offset = field->offset;
  1486. size = field->size;
  1487. }
  1488. return memcmp(a->raw_data + offset, b->raw_data + offset, size);
  1489. }
  1490. bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
  1491. {
  1492. return fmt->cmp == __sort__hde_cmp;
  1493. }
  1494. static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  1495. {
  1496. struct hpp_dynamic_entry *hde_a;
  1497. struct hpp_dynamic_entry *hde_b;
  1498. if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
  1499. return false;
  1500. hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
  1501. hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
  1502. return hde_a->field == hde_b->field;
  1503. }
  1504. static void hde_free(struct perf_hpp_fmt *fmt)
  1505. {
  1506. struct hpp_dynamic_entry *hde;
  1507. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1508. free(hde);
  1509. }
  1510. static struct hpp_dynamic_entry *
  1511. __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field,
  1512. int level)
  1513. {
  1514. struct hpp_dynamic_entry *hde;
  1515. hde = malloc(sizeof(*hde));
  1516. if (hde == NULL) {
  1517. pr_debug("Memory allocation failed\n");
  1518. return NULL;
  1519. }
  1520. hde->evsel = evsel;
  1521. hde->field = field;
  1522. hde->dynamic_len = 0;
  1523. hde->hpp.name = field->name;
  1524. hde->hpp.header = __sort__hde_header;
  1525. hde->hpp.width = __sort__hde_width;
  1526. hde->hpp.entry = __sort__hde_entry;
  1527. hde->hpp.color = NULL;
  1528. hde->hpp.cmp = __sort__hde_cmp;
  1529. hde->hpp.collapse = __sort__hde_cmp;
  1530. hde->hpp.sort = __sort__hde_cmp;
  1531. hde->hpp.equal = __sort__hde_equal;
  1532. hde->hpp.free = hde_free;
  1533. INIT_LIST_HEAD(&hde->hpp.list);
  1534. INIT_LIST_HEAD(&hde->hpp.sort_list);
  1535. hde->hpp.elide = false;
  1536. hde->hpp.len = 0;
  1537. hde->hpp.user_len = 0;
  1538. hde->hpp.level = level;
  1539. return hde;
  1540. }
  1541. struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
  1542. {
  1543. struct perf_hpp_fmt *new_fmt = NULL;
  1544. if (perf_hpp__is_sort_entry(fmt)) {
  1545. struct hpp_sort_entry *hse, *new_hse;
  1546. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1547. new_hse = memdup(hse, sizeof(*hse));
  1548. if (new_hse)
  1549. new_fmt = &new_hse->hpp;
  1550. } else if (perf_hpp__is_dynamic_entry(fmt)) {
  1551. struct hpp_dynamic_entry *hde, *new_hde;
  1552. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1553. new_hde = memdup(hde, sizeof(*hde));
  1554. if (new_hde)
  1555. new_fmt = &new_hde->hpp;
  1556. } else {
  1557. new_fmt = memdup(fmt, sizeof(*fmt));
  1558. }
  1559. INIT_LIST_HEAD(&new_fmt->list);
  1560. INIT_LIST_HEAD(&new_fmt->sort_list);
  1561. return new_fmt;
  1562. }
  1563. static int parse_field_name(char *str, char **event, char **field, char **opt)
  1564. {
  1565. char *event_name, *field_name, *opt_name;
  1566. event_name = str;
  1567. field_name = strchr(str, '.');
  1568. if (field_name) {
  1569. *field_name++ = '\0';
  1570. } else {
  1571. event_name = NULL;
  1572. field_name = str;
  1573. }
  1574. opt_name = strchr(field_name, '/');
  1575. if (opt_name)
  1576. *opt_name++ = '\0';
  1577. *event = event_name;
  1578. *field = field_name;
  1579. *opt = opt_name;
  1580. return 0;
  1581. }
  1582. /* find match evsel using a given event name. The event name can be:
  1583. * 1. '%' + event index (e.g. '%1' for first event)
  1584. * 2. full event name (e.g. sched:sched_switch)
  1585. * 3. partial event name (should not contain ':')
  1586. */
  1587. static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
  1588. {
  1589. struct perf_evsel *evsel = NULL;
  1590. struct perf_evsel *pos;
  1591. bool full_name;
  1592. /* case 1 */
  1593. if (event_name[0] == '%') {
  1594. int nr = strtol(event_name+1, NULL, 0);
  1595. if (nr > evlist->nr_entries)
  1596. return NULL;
  1597. evsel = perf_evlist__first(evlist);
  1598. while (--nr > 0)
  1599. evsel = perf_evsel__next(evsel);
  1600. return evsel;
  1601. }
  1602. full_name = !!strchr(event_name, ':');
  1603. evlist__for_each(evlist, pos) {
  1604. /* case 2 */
  1605. if (full_name && !strcmp(pos->name, event_name))
  1606. return pos;
  1607. /* case 3 */
  1608. if (!full_name && strstr(pos->name, event_name)) {
  1609. if (evsel) {
  1610. pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
  1611. event_name, evsel->name, pos->name);
  1612. return NULL;
  1613. }
  1614. evsel = pos;
  1615. }
  1616. }
  1617. return evsel;
  1618. }
  1619. static int __dynamic_dimension__add(struct perf_evsel *evsel,
  1620. struct format_field *field,
  1621. bool raw_trace, int level)
  1622. {
  1623. struct hpp_dynamic_entry *hde;
  1624. hde = __alloc_dynamic_entry(evsel, field, level);
  1625. if (hde == NULL)
  1626. return -ENOMEM;
  1627. hde->raw_trace = raw_trace;
  1628. perf_hpp__register_sort_field(&hde->hpp);
  1629. return 0;
  1630. }
  1631. static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
  1632. {
  1633. int ret;
  1634. struct format_field *field;
  1635. field = evsel->tp_format->format.fields;
  1636. while (field) {
  1637. ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
  1638. if (ret < 0)
  1639. return ret;
  1640. field = field->next;
  1641. }
  1642. return 0;
  1643. }
  1644. static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
  1645. int level)
  1646. {
  1647. int ret;
  1648. struct perf_evsel *evsel;
  1649. evlist__for_each(evlist, evsel) {
  1650. if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
  1651. continue;
  1652. ret = add_evsel_fields(evsel, raw_trace, level);
  1653. if (ret < 0)
  1654. return ret;
  1655. }
  1656. return 0;
  1657. }
  1658. static int add_all_matching_fields(struct perf_evlist *evlist,
  1659. char *field_name, bool raw_trace, int level)
  1660. {
  1661. int ret = -ESRCH;
  1662. struct perf_evsel *evsel;
  1663. struct format_field *field;
  1664. evlist__for_each(evlist, evsel) {
  1665. if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
  1666. continue;
  1667. field = pevent_find_any_field(evsel->tp_format, field_name);
  1668. if (field == NULL)
  1669. continue;
  1670. ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
  1671. if (ret < 0)
  1672. break;
  1673. }
  1674. return ret;
  1675. }
  1676. static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
  1677. int level)
  1678. {
  1679. char *str, *event_name, *field_name, *opt_name;
  1680. struct perf_evsel *evsel;
  1681. struct format_field *field;
  1682. bool raw_trace = symbol_conf.raw_trace;
  1683. int ret = 0;
  1684. if (evlist == NULL)
  1685. return -ENOENT;
  1686. str = strdup(tok);
  1687. if (str == NULL)
  1688. return -ENOMEM;
  1689. if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
  1690. ret = -EINVAL;
  1691. goto out;
  1692. }
  1693. if (opt_name) {
  1694. if (strcmp(opt_name, "raw")) {
  1695. pr_debug("unsupported field option %s\n", opt_name);
  1696. ret = -EINVAL;
  1697. goto out;
  1698. }
  1699. raw_trace = true;
  1700. }
  1701. if (!strcmp(field_name, "trace_fields")) {
  1702. ret = add_all_dynamic_fields(evlist, raw_trace, level);
  1703. goto out;
  1704. }
  1705. if (event_name == NULL) {
  1706. ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
  1707. goto out;
  1708. }
  1709. evsel = find_evsel(evlist, event_name);
  1710. if (evsel == NULL) {
  1711. pr_debug("Cannot find event: %s\n", event_name);
  1712. ret = -ENOENT;
  1713. goto out;
  1714. }
  1715. if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
  1716. pr_debug("%s is not a tracepoint event\n", event_name);
  1717. ret = -EINVAL;
  1718. goto out;
  1719. }
  1720. if (!strcmp(field_name, "*")) {
  1721. ret = add_evsel_fields(evsel, raw_trace, level);
  1722. } else {
  1723. field = pevent_find_any_field(evsel->tp_format, field_name);
  1724. if (field == NULL) {
  1725. pr_debug("Cannot find event field for %s.%s\n",
  1726. event_name, field_name);
  1727. return -ENOENT;
  1728. }
  1729. ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
  1730. }
  1731. out:
  1732. free(str);
  1733. return ret;
  1734. }
  1735. static int __sort_dimension__add(struct sort_dimension *sd,
  1736. struct perf_hpp_list *list,
  1737. int level)
  1738. {
  1739. if (sd->taken)
  1740. return 0;
  1741. if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
  1742. return -1;
  1743. if (sd->entry->se_collapse)
  1744. list->need_collapse = 1;
  1745. sd->taken = 1;
  1746. return 0;
  1747. }
  1748. static int __hpp_dimension__add(struct hpp_dimension *hd,
  1749. struct perf_hpp_list *list,
  1750. int level)
  1751. {
  1752. struct perf_hpp_fmt *fmt;
  1753. if (hd->taken)
  1754. return 0;
  1755. fmt = __hpp_dimension__alloc_hpp(hd, level);
  1756. if (!fmt)
  1757. return -1;
  1758. hd->taken = 1;
  1759. perf_hpp_list__register_sort_field(list, fmt);
  1760. return 0;
  1761. }
  1762. static int __sort_dimension__add_output(struct perf_hpp_list *list,
  1763. struct sort_dimension *sd)
  1764. {
  1765. if (sd->taken)
  1766. return 0;
  1767. if (__sort_dimension__add_hpp_output(sd, list) < 0)
  1768. return -1;
  1769. sd->taken = 1;
  1770. return 0;
  1771. }
  1772. static int __hpp_dimension__add_output(struct perf_hpp_list *list,
  1773. struct hpp_dimension *hd)
  1774. {
  1775. struct perf_hpp_fmt *fmt;
  1776. if (hd->taken)
  1777. return 0;
  1778. fmt = __hpp_dimension__alloc_hpp(hd, 0);
  1779. if (!fmt)
  1780. return -1;
  1781. hd->taken = 1;
  1782. perf_hpp_list__column_register(list, fmt);
  1783. return 0;
  1784. }
  1785. int hpp_dimension__add_output(unsigned col)
  1786. {
  1787. BUG_ON(col >= PERF_HPP__MAX_INDEX);
  1788. return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
  1789. }
  1790. static int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
  1791. struct perf_evlist *evlist,
  1792. int level)
  1793. {
  1794. unsigned int i;
  1795. for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
  1796. struct sort_dimension *sd = &common_sort_dimensions[i];
  1797. if (strncasecmp(tok, sd->name, strlen(tok)))
  1798. continue;
  1799. if (sd->entry == &sort_parent) {
  1800. int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
  1801. if (ret) {
  1802. char err[BUFSIZ];
  1803. regerror(ret, &parent_regex, err, sizeof(err));
  1804. pr_err("Invalid regex: %s\n%s", parent_pattern, err);
  1805. return -EINVAL;
  1806. }
  1807. list->parent = 1;
  1808. } else if (sd->entry == &sort_sym) {
  1809. sort__has_sym = 1;
  1810. /*
  1811. * perf diff displays the performance difference amongst
  1812. * two or more perf.data files. Those files could come
  1813. * from different binaries. So we should not compare
  1814. * their ips, but the name of symbol.
  1815. */
  1816. if (sort__mode == SORT_MODE__DIFF)
  1817. sd->entry->se_collapse = sort__sym_sort;
  1818. } else if (sd->entry == &sort_dso) {
  1819. sort__has_dso = 1;
  1820. } else if (sd->entry == &sort_socket) {
  1821. sort__has_socket = 1;
  1822. } else if (sd->entry == &sort_thread) {
  1823. sort__has_thread = 1;
  1824. } else if (sd->entry == &sort_comm) {
  1825. sort__has_comm = 1;
  1826. }
  1827. return __sort_dimension__add(sd, list, level);
  1828. }
  1829. for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
  1830. struct hpp_dimension *hd = &hpp_sort_dimensions[i];
  1831. if (strncasecmp(tok, hd->name, strlen(tok)))
  1832. continue;
  1833. return __hpp_dimension__add(hd, list, level);
  1834. }
  1835. for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
  1836. struct sort_dimension *sd = &bstack_sort_dimensions[i];
  1837. if (strncasecmp(tok, sd->name, strlen(tok)))
  1838. continue;
  1839. if (sort__mode != SORT_MODE__BRANCH)
  1840. return -EINVAL;
  1841. if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
  1842. sort__has_sym = 1;
  1843. __sort_dimension__add(sd, list, level);
  1844. return 0;
  1845. }
  1846. for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
  1847. struct sort_dimension *sd = &memory_sort_dimensions[i];
  1848. if (strncasecmp(tok, sd->name, strlen(tok)))
  1849. continue;
  1850. if (sort__mode != SORT_MODE__MEMORY)
  1851. return -EINVAL;
  1852. if (sd->entry == &sort_mem_daddr_sym)
  1853. sort__has_sym = 1;
  1854. __sort_dimension__add(sd, list, level);
  1855. return 0;
  1856. }
  1857. if (!add_dynamic_entry(evlist, tok, level))
  1858. return 0;
  1859. return -ESRCH;
  1860. }
  1861. static int setup_sort_list(struct perf_hpp_list *list, char *str,
  1862. struct perf_evlist *evlist)
  1863. {
  1864. char *tmp, *tok;
  1865. int ret = 0;
  1866. int level = 0;
  1867. int next_level = 1;
  1868. bool in_group = false;
  1869. do {
  1870. tok = str;
  1871. tmp = strpbrk(str, "{}, ");
  1872. if (tmp) {
  1873. if (in_group)
  1874. next_level = level;
  1875. else
  1876. next_level = level + 1;
  1877. if (*tmp == '{')
  1878. in_group = true;
  1879. else if (*tmp == '}')
  1880. in_group = false;
  1881. *tmp = '\0';
  1882. str = tmp + 1;
  1883. }
  1884. if (*tok) {
  1885. ret = sort_dimension__add(list, tok, evlist, level);
  1886. if (ret == -EINVAL) {
  1887. error("Invalid --sort key: `%s'", tok);
  1888. break;
  1889. } else if (ret == -ESRCH) {
  1890. error("Unknown --sort key: `%s'", tok);
  1891. break;
  1892. }
  1893. }
  1894. level = next_level;
  1895. } while (tmp);
  1896. return ret;
  1897. }
  1898. static const char *get_default_sort_order(struct perf_evlist *evlist)
  1899. {
  1900. const char *default_sort_orders[] = {
  1901. default_sort_order,
  1902. default_branch_sort_order,
  1903. default_mem_sort_order,
  1904. default_top_sort_order,
  1905. default_diff_sort_order,
  1906. default_tracepoint_sort_order,
  1907. };
  1908. bool use_trace = true;
  1909. struct perf_evsel *evsel;
  1910. BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
  1911. if (evlist == NULL)
  1912. goto out_no_evlist;
  1913. evlist__for_each(evlist, evsel) {
  1914. if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
  1915. use_trace = false;
  1916. break;
  1917. }
  1918. }
  1919. if (use_trace) {
  1920. sort__mode = SORT_MODE__TRACEPOINT;
  1921. if (symbol_conf.raw_trace)
  1922. return "trace_fields";
  1923. }
  1924. out_no_evlist:
  1925. return default_sort_orders[sort__mode];
  1926. }
  1927. static int setup_sort_order(struct perf_evlist *evlist)
  1928. {
  1929. char *new_sort_order;
  1930. /*
  1931. * Append '+'-prefixed sort order to the default sort
  1932. * order string.
  1933. */
  1934. if (!sort_order || is_strict_order(sort_order))
  1935. return 0;
  1936. if (sort_order[1] == '\0') {
  1937. error("Invalid --sort key: `+'");
  1938. return -EINVAL;
  1939. }
  1940. /*
  1941. * We allocate new sort_order string, but we never free it,
  1942. * because it's checked over the rest of the code.
  1943. */
  1944. if (asprintf(&new_sort_order, "%s,%s",
  1945. get_default_sort_order(evlist), sort_order + 1) < 0) {
  1946. error("Not enough memory to set up --sort");
  1947. return -ENOMEM;
  1948. }
  1949. sort_order = new_sort_order;
  1950. return 0;
  1951. }
  1952. /*
  1953. * Adds 'pre,' prefix into 'str' is 'pre' is
  1954. * not already part of 'str'.
  1955. */
  1956. static char *prefix_if_not_in(const char *pre, char *str)
  1957. {
  1958. char *n;
  1959. if (!str || strstr(str, pre))
  1960. return str;
  1961. if (asprintf(&n, "%s,%s", pre, str) < 0)
  1962. return NULL;
  1963. free(str);
  1964. return n;
  1965. }
  1966. static char *setup_overhead(char *keys)
  1967. {
  1968. keys = prefix_if_not_in("overhead", keys);
  1969. if (symbol_conf.cumulate_callchain)
  1970. keys = prefix_if_not_in("overhead_children", keys);
  1971. return keys;
  1972. }
  1973. static int __setup_sorting(struct perf_evlist *evlist)
  1974. {
  1975. char *str;
  1976. const char *sort_keys;
  1977. int ret = 0;
  1978. ret = setup_sort_order(evlist);
  1979. if (ret)
  1980. return ret;
  1981. sort_keys = sort_order;
  1982. if (sort_keys == NULL) {
  1983. if (is_strict_order(field_order)) {
  1984. /*
  1985. * If user specified field order but no sort order,
  1986. * we'll honor it and not add default sort orders.
  1987. */
  1988. return 0;
  1989. }
  1990. sort_keys = get_default_sort_order(evlist);
  1991. }
  1992. str = strdup(sort_keys);
  1993. if (str == NULL) {
  1994. error("Not enough memory to setup sort keys");
  1995. return -ENOMEM;
  1996. }
  1997. /*
  1998. * Prepend overhead fields for backward compatibility.
  1999. */
  2000. if (!is_strict_order(field_order)) {
  2001. str = setup_overhead(str);
  2002. if (str == NULL) {
  2003. error("Not enough memory to setup overhead keys");
  2004. return -ENOMEM;
  2005. }
  2006. }
  2007. ret = setup_sort_list(&perf_hpp_list, str, evlist);
  2008. free(str);
  2009. return ret;
  2010. }
  2011. void perf_hpp__set_elide(int idx, bool elide)
  2012. {
  2013. struct perf_hpp_fmt *fmt;
  2014. struct hpp_sort_entry *hse;
  2015. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2016. if (!perf_hpp__is_sort_entry(fmt))
  2017. continue;
  2018. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  2019. if (hse->se->se_width_idx == idx) {
  2020. fmt->elide = elide;
  2021. break;
  2022. }
  2023. }
  2024. }
  2025. static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
  2026. {
  2027. if (list && strlist__nr_entries(list) == 1) {
  2028. if (fp != NULL)
  2029. fprintf(fp, "# %s: %s\n", list_name,
  2030. strlist__entry(list, 0)->s);
  2031. return true;
  2032. }
  2033. return false;
  2034. }
  2035. static bool get_elide(int idx, FILE *output)
  2036. {
  2037. switch (idx) {
  2038. case HISTC_SYMBOL:
  2039. return __get_elide(symbol_conf.sym_list, "symbol", output);
  2040. case HISTC_DSO:
  2041. return __get_elide(symbol_conf.dso_list, "dso", output);
  2042. case HISTC_COMM:
  2043. return __get_elide(symbol_conf.comm_list, "comm", output);
  2044. default:
  2045. break;
  2046. }
  2047. if (sort__mode != SORT_MODE__BRANCH)
  2048. return false;
  2049. switch (idx) {
  2050. case HISTC_SYMBOL_FROM:
  2051. return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
  2052. case HISTC_SYMBOL_TO:
  2053. return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
  2054. case HISTC_DSO_FROM:
  2055. return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
  2056. case HISTC_DSO_TO:
  2057. return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
  2058. default:
  2059. break;
  2060. }
  2061. return false;
  2062. }
  2063. void sort__setup_elide(FILE *output)
  2064. {
  2065. struct perf_hpp_fmt *fmt;
  2066. struct hpp_sort_entry *hse;
  2067. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2068. if (!perf_hpp__is_sort_entry(fmt))
  2069. continue;
  2070. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  2071. fmt->elide = get_elide(hse->se->se_width_idx, output);
  2072. }
  2073. /*
  2074. * It makes no sense to elide all of sort entries.
  2075. * Just revert them to show up again.
  2076. */
  2077. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2078. if (!perf_hpp__is_sort_entry(fmt))
  2079. continue;
  2080. if (!fmt->elide)
  2081. return;
  2082. }
  2083. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2084. if (!perf_hpp__is_sort_entry(fmt))
  2085. continue;
  2086. fmt->elide = false;
  2087. }
  2088. }
  2089. static int output_field_add(struct perf_hpp_list *list, char *tok)
  2090. {
  2091. unsigned int i;
  2092. for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
  2093. struct sort_dimension *sd = &common_sort_dimensions[i];
  2094. if (strncasecmp(tok, sd->name, strlen(tok)))
  2095. continue;
  2096. return __sort_dimension__add_output(list, sd);
  2097. }
  2098. for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
  2099. struct hpp_dimension *hd = &hpp_sort_dimensions[i];
  2100. if (strncasecmp(tok, hd->name, strlen(tok)))
  2101. continue;
  2102. return __hpp_dimension__add_output(list, hd);
  2103. }
  2104. for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
  2105. struct sort_dimension *sd = &bstack_sort_dimensions[i];
  2106. if (strncasecmp(tok, sd->name, strlen(tok)))
  2107. continue;
  2108. return __sort_dimension__add_output(list, sd);
  2109. }
  2110. for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
  2111. struct sort_dimension *sd = &memory_sort_dimensions[i];
  2112. if (strncasecmp(tok, sd->name, strlen(tok)))
  2113. continue;
  2114. return __sort_dimension__add_output(list, sd);
  2115. }
  2116. return -ESRCH;
  2117. }
  2118. static int setup_output_list(struct perf_hpp_list *list, char *str)
  2119. {
  2120. char *tmp, *tok;
  2121. int ret = 0;
  2122. for (tok = strtok_r(str, ", ", &tmp);
  2123. tok; tok = strtok_r(NULL, ", ", &tmp)) {
  2124. ret = output_field_add(list, tok);
  2125. if (ret == -EINVAL) {
  2126. error("Invalid --fields key: `%s'", tok);
  2127. break;
  2128. } else if (ret == -ESRCH) {
  2129. error("Unknown --fields key: `%s'", tok);
  2130. break;
  2131. }
  2132. }
  2133. return ret;
  2134. }
  2135. static void reset_dimensions(void)
  2136. {
  2137. unsigned int i;
  2138. for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
  2139. common_sort_dimensions[i].taken = 0;
  2140. for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
  2141. hpp_sort_dimensions[i].taken = 0;
  2142. for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
  2143. bstack_sort_dimensions[i].taken = 0;
  2144. for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
  2145. memory_sort_dimensions[i].taken = 0;
  2146. }
  2147. bool is_strict_order(const char *order)
  2148. {
  2149. return order && (*order != '+');
  2150. }
  2151. static int __setup_output_field(void)
  2152. {
  2153. char *str, *strp;
  2154. int ret = -EINVAL;
  2155. if (field_order == NULL)
  2156. return 0;
  2157. strp = str = strdup(field_order);
  2158. if (str == NULL) {
  2159. error("Not enough memory to setup output fields");
  2160. return -ENOMEM;
  2161. }
  2162. if (!is_strict_order(field_order))
  2163. strp++;
  2164. if (!strlen(strp)) {
  2165. error("Invalid --fields key: `+'");
  2166. goto out;
  2167. }
  2168. ret = setup_output_list(&perf_hpp_list, strp);
  2169. out:
  2170. free(str);
  2171. return ret;
  2172. }
  2173. int setup_sorting(struct perf_evlist *evlist)
  2174. {
  2175. int err;
  2176. err = __setup_sorting(evlist);
  2177. if (err < 0)
  2178. return err;
  2179. if (parent_pattern != default_parent_pattern) {
  2180. err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
  2181. if (err < 0)
  2182. return err;
  2183. }
  2184. reset_dimensions();
  2185. /*
  2186. * perf diff doesn't use default hpp output fields.
  2187. */
  2188. if (sort__mode != SORT_MODE__DIFF)
  2189. perf_hpp__init();
  2190. err = __setup_output_field();
  2191. if (err < 0)
  2192. return err;
  2193. /* copy sort keys to output fields */
  2194. perf_hpp__setup_output_field(&perf_hpp_list);
  2195. /* and then copy output fields to sort keys */
  2196. perf_hpp__append_sort_keys(&perf_hpp_list);
  2197. /* setup hists-specific output fields */
  2198. if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
  2199. return -1;
  2200. return 0;
  2201. }
  2202. void reset_output_field(void)
  2203. {
  2204. perf_hpp_list.need_collapse = 0;
  2205. perf_hpp_list.parent = 0;
  2206. sort__has_sym = 0;
  2207. sort__has_dso = 0;
  2208. field_order = NULL;
  2209. sort_order = NULL;
  2210. reset_dimensions();
  2211. perf_hpp__reset_output_field(&perf_hpp_list);
  2212. }