hist.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932
  1. #include <stdio.h>
  2. #include "../../util/util.h"
  3. #include "../../util/hist.h"
  4. #include "../../util/sort.h"
  5. #include "../../util/evsel.h"
  6. #include "../../util/srcline.h"
  7. #include "../../util/string2.h"
  8. #include "../../util/thread.h"
  9. #include "../../util/sane_ctype.h"
  10. static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
  11. {
  12. int i;
  13. int ret = fprintf(fp, " ");
  14. for (i = 0; i < left_margin; i++)
  15. ret += fprintf(fp, " ");
  16. return ret;
  17. }
  18. static size_t inline__fprintf(struct map *map, u64 ip, int left_margin,
  19. int depth, int depth_mask, FILE *fp)
  20. {
  21. struct dso *dso;
  22. struct inline_node *node;
  23. struct inline_list *ilist;
  24. int ret = 0, i;
  25. if (map == NULL)
  26. return 0;
  27. dso = map->dso;
  28. if (dso == NULL)
  29. return 0;
  30. node = dso__parse_addr_inlines(dso,
  31. map__rip_2objdump(map, ip));
  32. if (node == NULL)
  33. return 0;
  34. list_for_each_entry(ilist, &node->val, list) {
  35. if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
  36. ret += callchain__fprintf_left_margin(fp, left_margin);
  37. for (i = 0; i < depth; i++) {
  38. if (depth_mask & (1 << i))
  39. ret += fprintf(fp, "|");
  40. else
  41. ret += fprintf(fp, " ");
  42. ret += fprintf(fp, " ");
  43. }
  44. if (callchain_param.key == CCKEY_ADDRESS ||
  45. callchain_param.key == CCKEY_SRCLINE) {
  46. if (ilist->filename != NULL)
  47. ret += fprintf(fp, "%s:%d (inline)",
  48. ilist->filename,
  49. ilist->line_nr);
  50. else
  51. ret += fprintf(fp, "??");
  52. } else if (ilist->funcname != NULL)
  53. ret += fprintf(fp, "%s (inline)",
  54. ilist->funcname);
  55. else if (ilist->filename != NULL)
  56. ret += fprintf(fp, "%s:%d (inline)",
  57. ilist->filename,
  58. ilist->line_nr);
  59. else
  60. ret += fprintf(fp, "??");
  61. ret += fprintf(fp, "\n");
  62. }
  63. }
  64. inline_node__delete(node);
  65. return ret;
  66. }
  67. static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
  68. int left_margin)
  69. {
  70. int i;
  71. size_t ret = callchain__fprintf_left_margin(fp, left_margin);
  72. for (i = 0; i < depth; i++)
  73. if (depth_mask & (1 << i))
  74. ret += fprintf(fp, "| ");
  75. else
  76. ret += fprintf(fp, " ");
  77. ret += fprintf(fp, "\n");
  78. return ret;
  79. }
  80. static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
  81. struct callchain_list *chain,
  82. int depth, int depth_mask, int period,
  83. u64 total_samples, int left_margin)
  84. {
  85. int i;
  86. size_t ret = 0;
  87. char bf[1024], *alloc_str = NULL;
  88. char buf[64];
  89. const char *str;
  90. ret += callchain__fprintf_left_margin(fp, left_margin);
  91. for (i = 0; i < depth; i++) {
  92. if (depth_mask & (1 << i))
  93. ret += fprintf(fp, "|");
  94. else
  95. ret += fprintf(fp, " ");
  96. if (!period && i == depth - 1) {
  97. ret += fprintf(fp, "--");
  98. ret += callchain_node__fprintf_value(node, fp, total_samples);
  99. ret += fprintf(fp, "--");
  100. } else
  101. ret += fprintf(fp, "%s", " ");
  102. }
  103. str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
  104. if (symbol_conf.show_branchflag_count) {
  105. if (!period)
  106. callchain_list_counts__printf_value(node, chain, NULL,
  107. buf, sizeof(buf));
  108. else
  109. callchain_list_counts__printf_value(NULL, chain, NULL,
  110. buf, sizeof(buf));
  111. if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
  112. str = "Not enough memory!";
  113. else
  114. str = alloc_str;
  115. }
  116. fputs(str, fp);
  117. fputc('\n', fp);
  118. free(alloc_str);
  119. if (symbol_conf.inline_name)
  120. ret += inline__fprintf(chain->ms.map, chain->ip,
  121. left_margin, depth, depth_mask, fp);
  122. return ret;
  123. }
  124. static struct symbol *rem_sq_bracket;
  125. static struct callchain_list rem_hits;
  126. static void init_rem_hits(void)
  127. {
  128. rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
  129. if (!rem_sq_bracket) {
  130. fprintf(stderr, "Not enough memory to display remaining hits\n");
  131. return;
  132. }
  133. strcpy(rem_sq_bracket->name, "[...]");
  134. rem_hits.ms.sym = rem_sq_bracket;
  135. }
  136. static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
  137. u64 total_samples, int depth,
  138. int depth_mask, int left_margin)
  139. {
  140. struct rb_node *node, *next;
  141. struct callchain_node *child = NULL;
  142. struct callchain_list *chain;
  143. int new_depth_mask = depth_mask;
  144. u64 remaining;
  145. size_t ret = 0;
  146. int i;
  147. uint entries_printed = 0;
  148. int cumul_count = 0;
  149. remaining = total_samples;
  150. node = rb_first(root);
  151. while (node) {
  152. u64 new_total;
  153. u64 cumul;
  154. child = rb_entry(node, struct callchain_node, rb_node);
  155. cumul = callchain_cumul_hits(child);
  156. remaining -= cumul;
  157. cumul_count += callchain_cumul_counts(child);
  158. /*
  159. * The depth mask manages the output of pipes that show
  160. * the depth. We don't want to keep the pipes of the current
  161. * level for the last child of this depth.
  162. * Except if we have remaining filtered hits. They will
  163. * supersede the last child
  164. */
  165. next = rb_next(node);
  166. if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
  167. new_depth_mask &= ~(1 << (depth - 1));
  168. /*
  169. * But we keep the older depth mask for the line separator
  170. * to keep the level link until we reach the last child
  171. */
  172. ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
  173. left_margin);
  174. i = 0;
  175. list_for_each_entry(chain, &child->val, list) {
  176. ret += ipchain__fprintf_graph(fp, child, chain, depth,
  177. new_depth_mask, i++,
  178. total_samples,
  179. left_margin);
  180. }
  181. if (callchain_param.mode == CHAIN_GRAPH_REL)
  182. new_total = child->children_hit;
  183. else
  184. new_total = total_samples;
  185. ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
  186. depth + 1,
  187. new_depth_mask | (1 << depth),
  188. left_margin);
  189. node = next;
  190. if (++entries_printed == callchain_param.print_limit)
  191. break;
  192. }
  193. if (callchain_param.mode == CHAIN_GRAPH_REL &&
  194. remaining && remaining != total_samples) {
  195. struct callchain_node rem_node = {
  196. .hit = remaining,
  197. };
  198. if (!rem_sq_bracket)
  199. return ret;
  200. if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
  201. rem_node.count = child->parent->children_count - cumul_count;
  202. if (rem_node.count <= 0)
  203. return ret;
  204. }
  205. new_depth_mask &= ~(1 << (depth - 1));
  206. ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
  207. new_depth_mask, 0, total_samples,
  208. left_margin);
  209. }
  210. return ret;
  211. }
  212. /*
  213. * If have one single callchain root, don't bother printing
  214. * its percentage (100 % in fractal mode and the same percentage
  215. * than the hist in graph mode). This also avoid one level of column.
  216. *
  217. * However when percent-limit applied, it's possible that single callchain
  218. * node have different (non-100% in fractal mode) percentage.
  219. */
  220. static bool need_percent_display(struct rb_node *node, u64 parent_samples)
  221. {
  222. struct callchain_node *cnode;
  223. if (rb_next(node))
  224. return true;
  225. cnode = rb_entry(node, struct callchain_node, rb_node);
  226. return callchain_cumul_hits(cnode) != parent_samples;
  227. }
  228. static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
  229. u64 total_samples, u64 parent_samples,
  230. int left_margin)
  231. {
  232. struct callchain_node *cnode;
  233. struct callchain_list *chain;
  234. u32 entries_printed = 0;
  235. bool printed = false;
  236. struct rb_node *node;
  237. int i = 0;
  238. int ret = 0;
  239. char bf[1024];
  240. node = rb_first(root);
  241. if (node && !need_percent_display(node, parent_samples)) {
  242. cnode = rb_entry(node, struct callchain_node, rb_node);
  243. list_for_each_entry(chain, &cnode->val, list) {
  244. /*
  245. * If we sort by symbol, the first entry is the same than
  246. * the symbol. No need to print it otherwise it appears as
  247. * displayed twice.
  248. */
  249. if (!i++ && field_order == NULL &&
  250. sort_order && !prefixcmp(sort_order, "sym"))
  251. continue;
  252. if (!printed) {
  253. ret += callchain__fprintf_left_margin(fp, left_margin);
  254. ret += fprintf(fp, "|\n");
  255. ret += callchain__fprintf_left_margin(fp, left_margin);
  256. ret += fprintf(fp, "---");
  257. left_margin += 3;
  258. printed = true;
  259. } else
  260. ret += callchain__fprintf_left_margin(fp, left_margin);
  261. ret += fprintf(fp, "%s",
  262. callchain_list__sym_name(chain, bf,
  263. sizeof(bf),
  264. false));
  265. if (symbol_conf.show_branchflag_count)
  266. ret += callchain_list_counts__printf_value(
  267. NULL, chain, fp, NULL, 0);
  268. ret += fprintf(fp, "\n");
  269. if (++entries_printed == callchain_param.print_limit)
  270. break;
  271. if (symbol_conf.inline_name)
  272. ret += inline__fprintf(chain->ms.map,
  273. chain->ip,
  274. left_margin,
  275. 0, 0,
  276. fp);
  277. }
  278. root = &cnode->rb_root;
  279. }
  280. if (callchain_param.mode == CHAIN_GRAPH_REL)
  281. total_samples = parent_samples;
  282. ret += __callchain__fprintf_graph(fp, root, total_samples,
  283. 1, 1, left_margin);
  284. if (ret) {
  285. /* do not add a blank line if it printed nothing */
  286. ret += fprintf(fp, "\n");
  287. }
  288. return ret;
  289. }
  290. static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
  291. u64 total_samples)
  292. {
  293. struct callchain_list *chain;
  294. size_t ret = 0;
  295. char bf[1024];
  296. if (!node)
  297. return 0;
  298. ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
  299. list_for_each_entry(chain, &node->val, list) {
  300. if (chain->ip >= PERF_CONTEXT_MAX)
  301. continue;
  302. ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
  303. bf, sizeof(bf), false));
  304. }
  305. return ret;
  306. }
  307. static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
  308. u64 total_samples)
  309. {
  310. size_t ret = 0;
  311. u32 entries_printed = 0;
  312. struct callchain_node *chain;
  313. struct rb_node *rb_node = rb_first(tree);
  314. while (rb_node) {
  315. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  316. ret += fprintf(fp, " ");
  317. ret += callchain_node__fprintf_value(chain, fp, total_samples);
  318. ret += fprintf(fp, "\n");
  319. ret += __callchain__fprintf_flat(fp, chain, total_samples);
  320. ret += fprintf(fp, "\n");
  321. if (++entries_printed == callchain_param.print_limit)
  322. break;
  323. rb_node = rb_next(rb_node);
  324. }
  325. return ret;
  326. }
  327. static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
  328. {
  329. const char *sep = symbol_conf.field_sep ?: ";";
  330. struct callchain_list *chain;
  331. size_t ret = 0;
  332. char bf[1024];
  333. bool first;
  334. if (!node)
  335. return 0;
  336. ret += __callchain__fprintf_folded(fp, node->parent);
  337. first = (ret == 0);
  338. list_for_each_entry(chain, &node->val, list) {
  339. if (chain->ip >= PERF_CONTEXT_MAX)
  340. continue;
  341. ret += fprintf(fp, "%s%s", first ? "" : sep,
  342. callchain_list__sym_name(chain,
  343. bf, sizeof(bf), false));
  344. first = false;
  345. }
  346. return ret;
  347. }
  348. static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
  349. u64 total_samples)
  350. {
  351. size_t ret = 0;
  352. u32 entries_printed = 0;
  353. struct callchain_node *chain;
  354. struct rb_node *rb_node = rb_first(tree);
  355. while (rb_node) {
  356. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  357. ret += callchain_node__fprintf_value(chain, fp, total_samples);
  358. ret += fprintf(fp, " ");
  359. ret += __callchain__fprintf_folded(fp, chain);
  360. ret += fprintf(fp, "\n");
  361. if (++entries_printed == callchain_param.print_limit)
  362. break;
  363. rb_node = rb_next(rb_node);
  364. }
  365. return ret;
  366. }
  367. static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
  368. u64 total_samples, int left_margin,
  369. FILE *fp)
  370. {
  371. u64 parent_samples = he->stat.period;
  372. if (symbol_conf.cumulate_callchain)
  373. parent_samples = he->stat_acc->period;
  374. switch (callchain_param.mode) {
  375. case CHAIN_GRAPH_REL:
  376. return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
  377. parent_samples, left_margin);
  378. break;
  379. case CHAIN_GRAPH_ABS:
  380. return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
  381. parent_samples, left_margin);
  382. break;
  383. case CHAIN_FLAT:
  384. return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
  385. break;
  386. case CHAIN_FOLDED:
  387. return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
  388. break;
  389. case CHAIN_NONE:
  390. break;
  391. default:
  392. pr_err("Bad callchain mode\n");
  393. }
  394. return 0;
  395. }
  396. int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
  397. struct perf_hpp_list *hpp_list)
  398. {
  399. const char *sep = symbol_conf.field_sep;
  400. struct perf_hpp_fmt *fmt;
  401. char *start = hpp->buf;
  402. int ret;
  403. bool first = true;
  404. if (symbol_conf.exclude_other && !he->parent)
  405. return 0;
  406. perf_hpp_list__for_each_format(hpp_list, fmt) {
  407. if (perf_hpp__should_skip(fmt, he->hists))
  408. continue;
  409. /*
  410. * If there's no field_sep, we still need
  411. * to display initial ' '.
  412. */
  413. if (!sep || !first) {
  414. ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
  415. advance_hpp(hpp, ret);
  416. } else
  417. first = false;
  418. if (perf_hpp__use_color() && fmt->color)
  419. ret = fmt->color(fmt, hpp, he);
  420. else
  421. ret = fmt->entry(fmt, hpp, he);
  422. ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
  423. advance_hpp(hpp, ret);
  424. }
  425. return hpp->buf - start;
  426. }
  427. static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
  428. {
  429. return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
  430. }
  431. static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
  432. struct perf_hpp *hpp,
  433. struct hists *hists,
  434. FILE *fp)
  435. {
  436. const char *sep = symbol_conf.field_sep;
  437. struct perf_hpp_fmt *fmt;
  438. struct perf_hpp_list_node *fmt_node;
  439. char *buf = hpp->buf;
  440. size_t size = hpp->size;
  441. int ret, printed = 0;
  442. bool first = true;
  443. if (symbol_conf.exclude_other && !he->parent)
  444. return 0;
  445. ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
  446. advance_hpp(hpp, ret);
  447. /* the first hpp_list_node is for overhead columns */
  448. fmt_node = list_first_entry(&hists->hpp_formats,
  449. struct perf_hpp_list_node, list);
  450. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  451. /*
  452. * If there's no field_sep, we still need
  453. * to display initial ' '.
  454. */
  455. if (!sep || !first) {
  456. ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
  457. advance_hpp(hpp, ret);
  458. } else
  459. first = false;
  460. if (perf_hpp__use_color() && fmt->color)
  461. ret = fmt->color(fmt, hpp, he);
  462. else
  463. ret = fmt->entry(fmt, hpp, he);
  464. ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
  465. advance_hpp(hpp, ret);
  466. }
  467. if (!sep)
  468. ret = scnprintf(hpp->buf, hpp->size, "%*s",
  469. (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
  470. advance_hpp(hpp, ret);
  471. printed += fprintf(fp, "%s", buf);
  472. perf_hpp_list__for_each_format(he->hpp_list, fmt) {
  473. hpp->buf = buf;
  474. hpp->size = size;
  475. /*
  476. * No need to call hist_entry__snprintf_alignment() since this
  477. * fmt is always the last column in the hierarchy mode.
  478. */
  479. if (perf_hpp__use_color() && fmt->color)
  480. fmt->color(fmt, hpp, he);
  481. else
  482. fmt->entry(fmt, hpp, he);
  483. /*
  484. * dynamic entries are right-aligned but we want left-aligned
  485. * in the hierarchy mode
  486. */
  487. printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
  488. }
  489. printed += putc('\n', fp);
  490. if (symbol_conf.use_callchain && he->leaf) {
  491. u64 total = hists__total_period(hists);
  492. printed += hist_entry_callchain__fprintf(he, total, 0, fp);
  493. goto out;
  494. }
  495. out:
  496. return printed;
  497. }
  498. static int hist_entry__fprintf(struct hist_entry *he, size_t size,
  499. char *bf, size_t bfsz, FILE *fp,
  500. bool use_callchain)
  501. {
  502. int ret;
  503. int callchain_ret = 0;
  504. int inline_ret = 0;
  505. struct perf_hpp hpp = {
  506. .buf = bf,
  507. .size = size,
  508. };
  509. struct hists *hists = he->hists;
  510. u64 total_period = hists->stats.total_period;
  511. if (size == 0 || size > bfsz)
  512. size = hpp.size = bfsz;
  513. if (symbol_conf.report_hierarchy)
  514. return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
  515. hist_entry__snprintf(he, &hpp);
  516. ret = fprintf(fp, "%s\n", bf);
  517. if (use_callchain)
  518. callchain_ret = hist_entry_callchain__fprintf(he, total_period,
  519. 0, fp);
  520. if (callchain_ret == 0 && symbol_conf.inline_name) {
  521. inline_ret = inline__fprintf(he->ms.map, he->ip, 0, 0, 0, fp);
  522. ret += inline_ret;
  523. if (inline_ret > 0)
  524. ret += fprintf(fp, "\n");
  525. } else
  526. ret += callchain_ret;
  527. return ret;
  528. }
  529. static int print_hierarchy_indent(const char *sep, int indent,
  530. const char *line, FILE *fp)
  531. {
  532. if (sep != NULL || indent < 2)
  533. return 0;
  534. return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
  535. }
  536. static int hists__fprintf_hierarchy_headers(struct hists *hists,
  537. struct perf_hpp *hpp, FILE *fp)
  538. {
  539. bool first_node, first_col;
  540. int indent;
  541. int depth;
  542. unsigned width = 0;
  543. unsigned header_width = 0;
  544. struct perf_hpp_fmt *fmt;
  545. struct perf_hpp_list_node *fmt_node;
  546. const char *sep = symbol_conf.field_sep;
  547. indent = hists->nr_hpp_node;
  548. /* preserve max indent depth for column headers */
  549. print_hierarchy_indent(sep, indent, spaces, fp);
  550. /* the first hpp_list_node is for overhead columns */
  551. fmt_node = list_first_entry(&hists->hpp_formats,
  552. struct perf_hpp_list_node, list);
  553. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  554. fmt->header(fmt, hpp, hists, 0, NULL);
  555. fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
  556. }
  557. /* combine sort headers with ' / ' */
  558. first_node = true;
  559. list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
  560. if (!first_node)
  561. header_width += fprintf(fp, " / ");
  562. first_node = false;
  563. first_col = true;
  564. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  565. if (perf_hpp__should_skip(fmt, hists))
  566. continue;
  567. if (!first_col)
  568. header_width += fprintf(fp, "+");
  569. first_col = false;
  570. fmt->header(fmt, hpp, hists, 0, NULL);
  571. header_width += fprintf(fp, "%s", trim(hpp->buf));
  572. }
  573. }
  574. fprintf(fp, "\n# ");
  575. /* preserve max indent depth for initial dots */
  576. print_hierarchy_indent(sep, indent, dots, fp);
  577. /* the first hpp_list_node is for overhead columns */
  578. fmt_node = list_first_entry(&hists->hpp_formats,
  579. struct perf_hpp_list_node, list);
  580. first_col = true;
  581. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  582. if (!first_col)
  583. fprintf(fp, "%s", sep ?: "..");
  584. first_col = false;
  585. width = fmt->width(fmt, hpp, hists);
  586. fprintf(fp, "%.*s", width, dots);
  587. }
  588. depth = 0;
  589. list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
  590. first_col = true;
  591. width = depth * HIERARCHY_INDENT;
  592. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  593. if (perf_hpp__should_skip(fmt, hists))
  594. continue;
  595. if (!first_col)
  596. width++; /* for '+' sign between column header */
  597. first_col = false;
  598. width += fmt->width(fmt, hpp, hists);
  599. }
  600. if (width > header_width)
  601. header_width = width;
  602. depth++;
  603. }
  604. fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
  605. fprintf(fp, "\n#\n");
  606. return 2;
  607. }
  608. static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
  609. int line, FILE *fp)
  610. {
  611. struct perf_hpp_fmt *fmt;
  612. const char *sep = symbol_conf.field_sep;
  613. bool first = true;
  614. int span = 0;
  615. hists__for_each_format(hists, fmt) {
  616. if (perf_hpp__should_skip(fmt, hists))
  617. continue;
  618. if (!first && !span)
  619. fprintf(fp, "%s", sep ?: " ");
  620. else
  621. first = false;
  622. fmt->header(fmt, hpp, hists, line, &span);
  623. if (!span)
  624. fprintf(fp, "%s", hpp->buf);
  625. }
  626. }
  627. static int
  628. hists__fprintf_standard_headers(struct hists *hists,
  629. struct perf_hpp *hpp,
  630. FILE *fp)
  631. {
  632. struct perf_hpp_list *hpp_list = hists->hpp_list;
  633. struct perf_hpp_fmt *fmt;
  634. unsigned int width;
  635. const char *sep = symbol_conf.field_sep;
  636. bool first = true;
  637. int line;
  638. for (line = 0; line < hpp_list->nr_header_lines; line++) {
  639. /* first # is displayed one level up */
  640. if (line)
  641. fprintf(fp, "# ");
  642. fprintf_line(hists, hpp, line, fp);
  643. fprintf(fp, "\n");
  644. }
  645. if (sep)
  646. return hpp_list->nr_header_lines;
  647. first = true;
  648. fprintf(fp, "# ");
  649. hists__for_each_format(hists, fmt) {
  650. unsigned int i;
  651. if (perf_hpp__should_skip(fmt, hists))
  652. continue;
  653. if (!first)
  654. fprintf(fp, "%s", sep ?: " ");
  655. else
  656. first = false;
  657. width = fmt->width(fmt, hpp, hists);
  658. for (i = 0; i < width; i++)
  659. fprintf(fp, ".");
  660. }
  661. fprintf(fp, "\n");
  662. fprintf(fp, "#\n");
  663. return hpp_list->nr_header_lines + 2;
  664. }
  665. int hists__fprintf_headers(struct hists *hists, FILE *fp)
  666. {
  667. char bf[1024];
  668. struct perf_hpp dummy_hpp = {
  669. .buf = bf,
  670. .size = sizeof(bf),
  671. };
  672. fprintf(fp, "# ");
  673. if (symbol_conf.report_hierarchy)
  674. return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
  675. else
  676. return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
  677. }
  678. size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
  679. int max_cols, float min_pcnt, FILE *fp,
  680. bool use_callchain)
  681. {
  682. struct rb_node *nd;
  683. size_t ret = 0;
  684. const char *sep = symbol_conf.field_sep;
  685. int nr_rows = 0;
  686. size_t linesz;
  687. char *line = NULL;
  688. unsigned indent;
  689. init_rem_hits();
  690. hists__reset_column_width(hists);
  691. if (symbol_conf.col_width_list_str)
  692. perf_hpp__set_user_width(symbol_conf.col_width_list_str);
  693. if (show_header)
  694. nr_rows += hists__fprintf_headers(hists, fp);
  695. if (max_rows && nr_rows >= max_rows)
  696. goto out;
  697. linesz = hists__sort_list_width(hists) + 3 + 1;
  698. linesz += perf_hpp__color_overhead();
  699. line = malloc(linesz);
  700. if (line == NULL) {
  701. ret = -1;
  702. goto out;
  703. }
  704. indent = hists__overhead_width(hists) + 4;
  705. for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
  706. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  707. float percent;
  708. if (h->filtered)
  709. continue;
  710. percent = hist_entry__get_percent_limit(h);
  711. if (percent < min_pcnt)
  712. continue;
  713. ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
  714. if (max_rows && ++nr_rows >= max_rows)
  715. break;
  716. /*
  717. * If all children are filtered out or percent-limited,
  718. * display "no entry >= x.xx%" message.
  719. */
  720. if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
  721. int depth = hists->nr_hpp_node + h->depth + 1;
  722. print_hierarchy_indent(sep, depth, spaces, fp);
  723. fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
  724. if (max_rows && ++nr_rows >= max_rows)
  725. break;
  726. }
  727. if (h->ms.map == NULL && verbose > 1) {
  728. __map_groups__fprintf_maps(h->thread->mg,
  729. MAP__FUNCTION, fp);
  730. fprintf(fp, "%.10s end\n", graph_dotted_line);
  731. }
  732. }
  733. free(line);
  734. out:
  735. zfree(&rem_sq_bracket);
  736. return ret;
  737. }
  738. size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
  739. {
  740. int i;
  741. size_t ret = 0;
  742. for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
  743. const char *name;
  744. if (stats->nr_events[i] == 0)
  745. continue;
  746. name = perf_event__name(i);
  747. if (!strcmp(name, "UNKNOWN"))
  748. continue;
  749. ret += fprintf(fp, "%16s events: %10d\n", name,
  750. stats->nr_events[i]);
  751. }
  752. return ret;
  753. }