hist.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848
  1. #include <stdio.h>
  2. #include "../../util/util.h"
  3. #include "../../util/hist.h"
  4. #include "../../util/sort.h"
  5. #include "../../util/evsel.h"
  6. static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
  7. {
  8. int i;
  9. int ret = fprintf(fp, " ");
  10. for (i = 0; i < left_margin; i++)
  11. ret += fprintf(fp, " ");
  12. return ret;
  13. }
  14. static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
  15. int left_margin)
  16. {
  17. int i;
  18. size_t ret = callchain__fprintf_left_margin(fp, left_margin);
  19. for (i = 0; i < depth; i++)
  20. if (depth_mask & (1 << i))
  21. ret += fprintf(fp, "| ");
  22. else
  23. ret += fprintf(fp, " ");
  24. ret += fprintf(fp, "\n");
  25. return ret;
  26. }
  27. static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
  28. struct callchain_list *chain,
  29. int depth, int depth_mask, int period,
  30. u64 total_samples, int left_margin)
  31. {
  32. int i;
  33. size_t ret = 0;
  34. char bf[1024], *alloc_str = NULL;
  35. char buf[64];
  36. const char *str;
  37. ret += callchain__fprintf_left_margin(fp, left_margin);
  38. for (i = 0; i < depth; i++) {
  39. if (depth_mask & (1 << i))
  40. ret += fprintf(fp, "|");
  41. else
  42. ret += fprintf(fp, " ");
  43. if (!period && i == depth - 1) {
  44. ret += fprintf(fp, "--");
  45. ret += callchain_node__fprintf_value(node, fp, total_samples);
  46. ret += fprintf(fp, "--");
  47. } else
  48. ret += fprintf(fp, "%s", " ");
  49. }
  50. str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
  51. if (symbol_conf.show_branchflag_count) {
  52. if (!period)
  53. callchain_list_counts__printf_value(node, chain, NULL,
  54. buf, sizeof(buf));
  55. else
  56. callchain_list_counts__printf_value(NULL, chain, NULL,
  57. buf, sizeof(buf));
  58. if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
  59. str = "Not enough memory!";
  60. else
  61. str = alloc_str;
  62. }
  63. fputs(str, fp);
  64. fputc('\n', fp);
  65. free(alloc_str);
  66. return ret;
  67. }
  68. static struct symbol *rem_sq_bracket;
  69. static struct callchain_list rem_hits;
  70. static void init_rem_hits(void)
  71. {
  72. rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
  73. if (!rem_sq_bracket) {
  74. fprintf(stderr, "Not enough memory to display remaining hits\n");
  75. return;
  76. }
  77. strcpy(rem_sq_bracket->name, "[...]");
  78. rem_hits.ms.sym = rem_sq_bracket;
  79. }
  80. static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
  81. u64 total_samples, int depth,
  82. int depth_mask, int left_margin)
  83. {
  84. struct rb_node *node, *next;
  85. struct callchain_node *child = NULL;
  86. struct callchain_list *chain;
  87. int new_depth_mask = depth_mask;
  88. u64 remaining;
  89. size_t ret = 0;
  90. int i;
  91. uint entries_printed = 0;
  92. int cumul_count = 0;
  93. remaining = total_samples;
  94. node = rb_first(root);
  95. while (node) {
  96. u64 new_total;
  97. u64 cumul;
  98. child = rb_entry(node, struct callchain_node, rb_node);
  99. cumul = callchain_cumul_hits(child);
  100. remaining -= cumul;
  101. cumul_count += callchain_cumul_counts(child);
  102. /*
  103. * The depth mask manages the output of pipes that show
  104. * the depth. We don't want to keep the pipes of the current
  105. * level for the last child of this depth.
  106. * Except if we have remaining filtered hits. They will
  107. * supersede the last child
  108. */
  109. next = rb_next(node);
  110. if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
  111. new_depth_mask &= ~(1 << (depth - 1));
  112. /*
  113. * But we keep the older depth mask for the line separator
  114. * to keep the level link until we reach the last child
  115. */
  116. ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
  117. left_margin);
  118. i = 0;
  119. list_for_each_entry(chain, &child->val, list) {
  120. ret += ipchain__fprintf_graph(fp, child, chain, depth,
  121. new_depth_mask, i++,
  122. total_samples,
  123. left_margin);
  124. }
  125. if (callchain_param.mode == CHAIN_GRAPH_REL)
  126. new_total = child->children_hit;
  127. else
  128. new_total = total_samples;
  129. ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
  130. depth + 1,
  131. new_depth_mask | (1 << depth),
  132. left_margin);
  133. node = next;
  134. if (++entries_printed == callchain_param.print_limit)
  135. break;
  136. }
  137. if (callchain_param.mode == CHAIN_GRAPH_REL &&
  138. remaining && remaining != total_samples) {
  139. struct callchain_node rem_node = {
  140. .hit = remaining,
  141. };
  142. if (!rem_sq_bracket)
  143. return ret;
  144. if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
  145. rem_node.count = child->parent->children_count - cumul_count;
  146. if (rem_node.count <= 0)
  147. return ret;
  148. }
  149. new_depth_mask &= ~(1 << (depth - 1));
  150. ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
  151. new_depth_mask, 0, total_samples,
  152. left_margin);
  153. }
  154. return ret;
  155. }
  156. /*
  157. * If have one single callchain root, don't bother printing
  158. * its percentage (100 % in fractal mode and the same percentage
  159. * than the hist in graph mode). This also avoid one level of column.
  160. *
  161. * However when percent-limit applied, it's possible that single callchain
  162. * node have different (non-100% in fractal mode) percentage.
  163. */
  164. static bool need_percent_display(struct rb_node *node, u64 parent_samples)
  165. {
  166. struct callchain_node *cnode;
  167. if (rb_next(node))
  168. return true;
  169. cnode = rb_entry(node, struct callchain_node, rb_node);
  170. return callchain_cumul_hits(cnode) != parent_samples;
  171. }
  172. static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
  173. u64 total_samples, u64 parent_samples,
  174. int left_margin)
  175. {
  176. struct callchain_node *cnode;
  177. struct callchain_list *chain;
  178. u32 entries_printed = 0;
  179. bool printed = false;
  180. struct rb_node *node;
  181. int i = 0;
  182. int ret = 0;
  183. char bf[1024];
  184. node = rb_first(root);
  185. if (node && !need_percent_display(node, parent_samples)) {
  186. cnode = rb_entry(node, struct callchain_node, rb_node);
  187. list_for_each_entry(chain, &cnode->val, list) {
  188. /*
  189. * If we sort by symbol, the first entry is the same than
  190. * the symbol. No need to print it otherwise it appears as
  191. * displayed twice.
  192. */
  193. if (!i++ && field_order == NULL &&
  194. sort_order && !prefixcmp(sort_order, "sym"))
  195. continue;
  196. if (!printed) {
  197. ret += callchain__fprintf_left_margin(fp, left_margin);
  198. ret += fprintf(fp, "|\n");
  199. ret += callchain__fprintf_left_margin(fp, left_margin);
  200. ret += fprintf(fp, "---");
  201. left_margin += 3;
  202. printed = true;
  203. } else
  204. ret += callchain__fprintf_left_margin(fp, left_margin);
  205. ret += fprintf(fp, "%s",
  206. callchain_list__sym_name(chain, bf,
  207. sizeof(bf),
  208. false));
  209. if (symbol_conf.show_branchflag_count)
  210. ret += callchain_list_counts__printf_value(
  211. NULL, chain, fp, NULL, 0);
  212. ret += fprintf(fp, "\n");
  213. if (++entries_printed == callchain_param.print_limit)
  214. break;
  215. }
  216. root = &cnode->rb_root;
  217. }
  218. if (callchain_param.mode == CHAIN_GRAPH_REL)
  219. total_samples = parent_samples;
  220. ret += __callchain__fprintf_graph(fp, root, total_samples,
  221. 1, 1, left_margin);
  222. if (ret) {
  223. /* do not add a blank line if it printed nothing */
  224. ret += fprintf(fp, "\n");
  225. }
  226. return ret;
  227. }
  228. static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
  229. u64 total_samples)
  230. {
  231. struct callchain_list *chain;
  232. size_t ret = 0;
  233. char bf[1024];
  234. if (!node)
  235. return 0;
  236. ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
  237. list_for_each_entry(chain, &node->val, list) {
  238. if (chain->ip >= PERF_CONTEXT_MAX)
  239. continue;
  240. ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
  241. bf, sizeof(bf), false));
  242. }
  243. return ret;
  244. }
  245. static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
  246. u64 total_samples)
  247. {
  248. size_t ret = 0;
  249. u32 entries_printed = 0;
  250. struct callchain_node *chain;
  251. struct rb_node *rb_node = rb_first(tree);
  252. while (rb_node) {
  253. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  254. ret += fprintf(fp, " ");
  255. ret += callchain_node__fprintf_value(chain, fp, total_samples);
  256. ret += fprintf(fp, "\n");
  257. ret += __callchain__fprintf_flat(fp, chain, total_samples);
  258. ret += fprintf(fp, "\n");
  259. if (++entries_printed == callchain_param.print_limit)
  260. break;
  261. rb_node = rb_next(rb_node);
  262. }
  263. return ret;
  264. }
  265. static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
  266. {
  267. const char *sep = symbol_conf.field_sep ?: ";";
  268. struct callchain_list *chain;
  269. size_t ret = 0;
  270. char bf[1024];
  271. bool first;
  272. if (!node)
  273. return 0;
  274. ret += __callchain__fprintf_folded(fp, node->parent);
  275. first = (ret == 0);
  276. list_for_each_entry(chain, &node->val, list) {
  277. if (chain->ip >= PERF_CONTEXT_MAX)
  278. continue;
  279. ret += fprintf(fp, "%s%s", first ? "" : sep,
  280. callchain_list__sym_name(chain,
  281. bf, sizeof(bf), false));
  282. first = false;
  283. }
  284. return ret;
  285. }
  286. static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
  287. u64 total_samples)
  288. {
  289. size_t ret = 0;
  290. u32 entries_printed = 0;
  291. struct callchain_node *chain;
  292. struct rb_node *rb_node = rb_first(tree);
  293. while (rb_node) {
  294. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  295. ret += callchain_node__fprintf_value(chain, fp, total_samples);
  296. ret += fprintf(fp, " ");
  297. ret += __callchain__fprintf_folded(fp, chain);
  298. ret += fprintf(fp, "\n");
  299. if (++entries_printed == callchain_param.print_limit)
  300. break;
  301. rb_node = rb_next(rb_node);
  302. }
  303. return ret;
  304. }
  305. static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
  306. u64 total_samples, int left_margin,
  307. FILE *fp)
  308. {
  309. u64 parent_samples = he->stat.period;
  310. if (symbol_conf.cumulate_callchain)
  311. parent_samples = he->stat_acc->period;
  312. switch (callchain_param.mode) {
  313. case CHAIN_GRAPH_REL:
  314. return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
  315. parent_samples, left_margin);
  316. break;
  317. case CHAIN_GRAPH_ABS:
  318. return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
  319. parent_samples, left_margin);
  320. break;
  321. case CHAIN_FLAT:
  322. return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
  323. break;
  324. case CHAIN_FOLDED:
  325. return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
  326. break;
  327. case CHAIN_NONE:
  328. break;
  329. default:
  330. pr_err("Bad callchain mode\n");
  331. }
  332. return 0;
  333. }
  334. int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
  335. struct perf_hpp_list *hpp_list)
  336. {
  337. const char *sep = symbol_conf.field_sep;
  338. struct perf_hpp_fmt *fmt;
  339. char *start = hpp->buf;
  340. int ret;
  341. bool first = true;
  342. if (symbol_conf.exclude_other && !he->parent)
  343. return 0;
  344. perf_hpp_list__for_each_format(hpp_list, fmt) {
  345. if (perf_hpp__should_skip(fmt, he->hists))
  346. continue;
  347. /*
  348. * If there's no field_sep, we still need
  349. * to display initial ' '.
  350. */
  351. if (!sep || !first) {
  352. ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
  353. advance_hpp(hpp, ret);
  354. } else
  355. first = false;
  356. if (perf_hpp__use_color() && fmt->color)
  357. ret = fmt->color(fmt, hpp, he);
  358. else
  359. ret = fmt->entry(fmt, hpp, he);
  360. ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
  361. advance_hpp(hpp, ret);
  362. }
  363. return hpp->buf - start;
  364. }
  365. static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
  366. {
  367. return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
  368. }
  369. static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
  370. struct perf_hpp *hpp,
  371. struct hists *hists,
  372. FILE *fp)
  373. {
  374. const char *sep = symbol_conf.field_sep;
  375. struct perf_hpp_fmt *fmt;
  376. struct perf_hpp_list_node *fmt_node;
  377. char *buf = hpp->buf;
  378. size_t size = hpp->size;
  379. int ret, printed = 0;
  380. bool first = true;
  381. if (symbol_conf.exclude_other && !he->parent)
  382. return 0;
  383. ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
  384. advance_hpp(hpp, ret);
  385. /* the first hpp_list_node is for overhead columns */
  386. fmt_node = list_first_entry(&hists->hpp_formats,
  387. struct perf_hpp_list_node, list);
  388. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  389. /*
  390. * If there's no field_sep, we still need
  391. * to display initial ' '.
  392. */
  393. if (!sep || !first) {
  394. ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
  395. advance_hpp(hpp, ret);
  396. } else
  397. first = false;
  398. if (perf_hpp__use_color() && fmt->color)
  399. ret = fmt->color(fmt, hpp, he);
  400. else
  401. ret = fmt->entry(fmt, hpp, he);
  402. ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
  403. advance_hpp(hpp, ret);
  404. }
  405. if (!sep)
  406. ret = scnprintf(hpp->buf, hpp->size, "%*s",
  407. (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
  408. advance_hpp(hpp, ret);
  409. printed += fprintf(fp, "%s", buf);
  410. perf_hpp_list__for_each_format(he->hpp_list, fmt) {
  411. hpp->buf = buf;
  412. hpp->size = size;
  413. /*
  414. * No need to call hist_entry__snprintf_alignment() since this
  415. * fmt is always the last column in the hierarchy mode.
  416. */
  417. if (perf_hpp__use_color() && fmt->color)
  418. fmt->color(fmt, hpp, he);
  419. else
  420. fmt->entry(fmt, hpp, he);
  421. /*
  422. * dynamic entries are right-aligned but we want left-aligned
  423. * in the hierarchy mode
  424. */
  425. printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
  426. }
  427. printed += putc('\n', fp);
  428. if (symbol_conf.use_callchain && he->leaf) {
  429. u64 total = hists__total_period(hists);
  430. printed += hist_entry_callchain__fprintf(he, total, 0, fp);
  431. goto out;
  432. }
  433. out:
  434. return printed;
  435. }
  436. static int hist_entry__fprintf(struct hist_entry *he, size_t size,
  437. char *bf, size_t bfsz, FILE *fp,
  438. bool use_callchain)
  439. {
  440. int ret;
  441. struct perf_hpp hpp = {
  442. .buf = bf,
  443. .size = size,
  444. };
  445. struct hists *hists = he->hists;
  446. u64 total_period = hists->stats.total_period;
  447. if (size == 0 || size > bfsz)
  448. size = hpp.size = bfsz;
  449. if (symbol_conf.report_hierarchy)
  450. return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
  451. hist_entry__snprintf(he, &hpp);
  452. ret = fprintf(fp, "%s\n", bf);
  453. if (use_callchain)
  454. ret += hist_entry_callchain__fprintf(he, total_period, 0, fp);
  455. return ret;
  456. }
  457. static int print_hierarchy_indent(const char *sep, int indent,
  458. const char *line, FILE *fp)
  459. {
  460. if (sep != NULL || indent < 2)
  461. return 0;
  462. return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
  463. }
  464. static int hists__fprintf_hierarchy_headers(struct hists *hists,
  465. struct perf_hpp *hpp, FILE *fp)
  466. {
  467. bool first_node, first_col;
  468. int indent;
  469. int depth;
  470. unsigned width = 0;
  471. unsigned header_width = 0;
  472. struct perf_hpp_fmt *fmt;
  473. struct perf_hpp_list_node *fmt_node;
  474. const char *sep = symbol_conf.field_sep;
  475. indent = hists->nr_hpp_node;
  476. /* preserve max indent depth for column headers */
  477. print_hierarchy_indent(sep, indent, spaces, fp);
  478. /* the first hpp_list_node is for overhead columns */
  479. fmt_node = list_first_entry(&hists->hpp_formats,
  480. struct perf_hpp_list_node, list);
  481. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  482. fmt->header(fmt, hpp, hists, 0, NULL);
  483. fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
  484. }
  485. /* combine sort headers with ' / ' */
  486. first_node = true;
  487. list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
  488. if (!first_node)
  489. header_width += fprintf(fp, " / ");
  490. first_node = false;
  491. first_col = true;
  492. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  493. if (perf_hpp__should_skip(fmt, hists))
  494. continue;
  495. if (!first_col)
  496. header_width += fprintf(fp, "+");
  497. first_col = false;
  498. fmt->header(fmt, hpp, hists, 0, NULL);
  499. header_width += fprintf(fp, "%s", trim(hpp->buf));
  500. }
  501. }
  502. fprintf(fp, "\n# ");
  503. /* preserve max indent depth for initial dots */
  504. print_hierarchy_indent(sep, indent, dots, fp);
  505. /* the first hpp_list_node is for overhead columns */
  506. fmt_node = list_first_entry(&hists->hpp_formats,
  507. struct perf_hpp_list_node, list);
  508. first_col = true;
  509. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  510. if (!first_col)
  511. fprintf(fp, "%s", sep ?: "..");
  512. first_col = false;
  513. width = fmt->width(fmt, hpp, hists);
  514. fprintf(fp, "%.*s", width, dots);
  515. }
  516. depth = 0;
  517. list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
  518. first_col = true;
  519. width = depth * HIERARCHY_INDENT;
  520. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  521. if (perf_hpp__should_skip(fmt, hists))
  522. continue;
  523. if (!first_col)
  524. width++; /* for '+' sign between column header */
  525. first_col = false;
  526. width += fmt->width(fmt, hpp, hists);
  527. }
  528. if (width > header_width)
  529. header_width = width;
  530. depth++;
  531. }
  532. fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
  533. fprintf(fp, "\n#\n");
  534. return 2;
  535. }
  536. static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
  537. int line, FILE *fp)
  538. {
  539. struct perf_hpp_fmt *fmt;
  540. const char *sep = symbol_conf.field_sep;
  541. bool first = true;
  542. int span = 0;
  543. hists__for_each_format(hists, fmt) {
  544. if (perf_hpp__should_skip(fmt, hists))
  545. continue;
  546. if (!first && !span)
  547. fprintf(fp, "%s", sep ?: " ");
  548. else
  549. first = false;
  550. fmt->header(fmt, hpp, hists, line, &span);
  551. if (!span)
  552. fprintf(fp, "%s", hpp->buf);
  553. }
  554. }
  555. static int
  556. hists__fprintf_standard_headers(struct hists *hists,
  557. struct perf_hpp *hpp,
  558. FILE *fp)
  559. {
  560. struct perf_hpp_list *hpp_list = hists->hpp_list;
  561. struct perf_hpp_fmt *fmt;
  562. unsigned int width;
  563. const char *sep = symbol_conf.field_sep;
  564. bool first = true;
  565. int line;
  566. for (line = 0; line < hpp_list->nr_header_lines; line++) {
  567. /* first # is displayed one level up */
  568. if (line)
  569. fprintf(fp, "# ");
  570. fprintf_line(hists, hpp, line, fp);
  571. fprintf(fp, "\n");
  572. }
  573. if (sep)
  574. return hpp_list->nr_header_lines;
  575. first = true;
  576. fprintf(fp, "# ");
  577. hists__for_each_format(hists, fmt) {
  578. unsigned int i;
  579. if (perf_hpp__should_skip(fmt, hists))
  580. continue;
  581. if (!first)
  582. fprintf(fp, "%s", sep ?: " ");
  583. else
  584. first = false;
  585. width = fmt->width(fmt, hpp, hists);
  586. for (i = 0; i < width; i++)
  587. fprintf(fp, ".");
  588. }
  589. fprintf(fp, "\n");
  590. fprintf(fp, "#\n");
  591. return hpp_list->nr_header_lines + 2;
  592. }
  593. int hists__fprintf_headers(struct hists *hists, FILE *fp)
  594. {
  595. char bf[1024];
  596. struct perf_hpp dummy_hpp = {
  597. .buf = bf,
  598. .size = sizeof(bf),
  599. };
  600. fprintf(fp, "# ");
  601. if (symbol_conf.report_hierarchy)
  602. return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
  603. else
  604. return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
  605. }
  606. size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
  607. int max_cols, float min_pcnt, FILE *fp,
  608. bool use_callchain)
  609. {
  610. struct rb_node *nd;
  611. size_t ret = 0;
  612. const char *sep = symbol_conf.field_sep;
  613. int nr_rows = 0;
  614. size_t linesz;
  615. char *line = NULL;
  616. unsigned indent;
  617. init_rem_hits();
  618. hists__reset_column_width(hists);
  619. if (symbol_conf.col_width_list_str)
  620. perf_hpp__set_user_width(symbol_conf.col_width_list_str);
  621. if (show_header)
  622. nr_rows += hists__fprintf_headers(hists, fp);
  623. if (max_rows && nr_rows >= max_rows)
  624. goto out;
  625. linesz = hists__sort_list_width(hists) + 3 + 1;
  626. linesz += perf_hpp__color_overhead();
  627. line = malloc(linesz);
  628. if (line == NULL) {
  629. ret = -1;
  630. goto out;
  631. }
  632. indent = hists__overhead_width(hists) + 4;
  633. for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
  634. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  635. float percent;
  636. if (h->filtered)
  637. continue;
  638. percent = hist_entry__get_percent_limit(h);
  639. if (percent < min_pcnt)
  640. continue;
  641. ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
  642. if (max_rows && ++nr_rows >= max_rows)
  643. break;
  644. /*
  645. * If all children are filtered out or percent-limited,
  646. * display "no entry >= x.xx%" message.
  647. */
  648. if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
  649. int depth = hists->nr_hpp_node + h->depth + 1;
  650. print_hierarchy_indent(sep, depth, spaces, fp);
  651. fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
  652. if (max_rows && ++nr_rows >= max_rows)
  653. break;
  654. }
  655. if (h->ms.map == NULL && verbose > 1) {
  656. __map_groups__fprintf_maps(h->thread->mg,
  657. MAP__FUNCTION, fp);
  658. fprintf(fp, "%.10s end\n", graph_dotted_line);
  659. }
  660. }
  661. free(line);
  662. out:
  663. zfree(&rem_sq_bracket);
  664. return ret;
  665. }
  666. size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
  667. {
  668. int i;
  669. size_t ret = 0;
  670. for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
  671. const char *name;
  672. if (stats->nr_events[i] == 0)
  673. continue;
  674. name = perf_event__name(i);
  675. if (!strcmp(name, "UNKNOWN"))
  676. continue;
  677. ret += fprintf(fp, "%16s events: %10d\n", name,
  678. stats->nr_events[i]);
  679. }
  680. return ret;
  681. }