hist.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. #include <inttypes.h>
  2. #include <math.h>
  3. #include <linux/compiler.h>
  4. #include "../util/hist.h"
  5. #include "../util/util.h"
  6. #include "../util/sort.h"
  7. #include "../util/evsel.h"
  8. #include "../util/evlist.h"
  9. /* hist period print (hpp) functions */
  10. #define hpp__call_print_fn(hpp, fn, fmt, ...) \
  11. ({ \
  12. int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
  13. advance_hpp(hpp, __ret); \
  14. __ret; \
  15. })
  16. static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
  17. hpp_field_fn get_field, const char *fmt, int len,
  18. hpp_snprint_fn print_fn, bool fmt_percent)
  19. {
  20. int ret;
  21. struct hists *hists = he->hists;
  22. struct perf_evsel *evsel = hists_to_evsel(hists);
  23. char *buf = hpp->buf;
  24. size_t size = hpp->size;
  25. if (fmt_percent) {
  26. double percent = 0.0;
  27. u64 total = hists__total_period(hists);
  28. if (total)
  29. percent = 100.0 * get_field(he) / total;
  30. ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
  31. } else
  32. ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
  33. if (perf_evsel__is_group_event(evsel)) {
  34. int prev_idx, idx_delta;
  35. struct hist_entry *pair;
  36. int nr_members = evsel->nr_members;
  37. prev_idx = perf_evsel__group_idx(evsel);
  38. list_for_each_entry(pair, &he->pairs.head, pairs.node) {
  39. u64 period = get_field(pair);
  40. u64 total = hists__total_period(pair->hists);
  41. if (!total)
  42. continue;
  43. evsel = hists_to_evsel(pair->hists);
  44. idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
  45. while (idx_delta--) {
  46. /*
  47. * zero-fill group members in the middle which
  48. * have no sample
  49. */
  50. if (fmt_percent) {
  51. ret += hpp__call_print_fn(hpp, print_fn,
  52. fmt, len, 0.0);
  53. } else {
  54. ret += hpp__call_print_fn(hpp, print_fn,
  55. fmt, len, 0ULL);
  56. }
  57. }
  58. if (fmt_percent) {
  59. ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
  60. 100.0 * period / total);
  61. } else {
  62. ret += hpp__call_print_fn(hpp, print_fn, fmt,
  63. len, period);
  64. }
  65. prev_idx = perf_evsel__group_idx(evsel);
  66. }
  67. idx_delta = nr_members - prev_idx - 1;
  68. while (idx_delta--) {
  69. /*
  70. * zero-fill group members at last which have no sample
  71. */
  72. if (fmt_percent) {
  73. ret += hpp__call_print_fn(hpp, print_fn,
  74. fmt, len, 0.0);
  75. } else {
  76. ret += hpp__call_print_fn(hpp, print_fn,
  77. fmt, len, 0ULL);
  78. }
  79. }
  80. }
  81. /*
  82. * Restore original buf and size as it's where caller expects
  83. * the result will be saved.
  84. */
  85. hpp->buf = buf;
  86. hpp->size = size;
  87. return ret;
  88. }
  89. int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  90. struct hist_entry *he, hpp_field_fn get_field,
  91. const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
  92. {
  93. int len = fmt->user_len ?: fmt->len;
  94. if (symbol_conf.field_sep) {
  95. return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
  96. print_fn, fmt_percent);
  97. }
  98. if (fmt_percent)
  99. len -= 2; /* 2 for a space and a % sign */
  100. else
  101. len -= 1;
  102. return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
  103. }
  104. int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  105. struct hist_entry *he, hpp_field_fn get_field,
  106. const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
  107. {
  108. if (!symbol_conf.cumulate_callchain) {
  109. int len = fmt->user_len ?: fmt->len;
  110. return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
  111. }
  112. return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
  113. }
  114. static int field_cmp(u64 field_a, u64 field_b)
  115. {
  116. if (field_a > field_b)
  117. return 1;
  118. if (field_a < field_b)
  119. return -1;
  120. return 0;
  121. }
  122. static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
  123. hpp_field_fn get_field)
  124. {
  125. s64 ret;
  126. int i, nr_members;
  127. struct perf_evsel *evsel;
  128. struct hist_entry *pair;
  129. u64 *fields_a, *fields_b;
  130. ret = field_cmp(get_field(a), get_field(b));
  131. if (ret || !symbol_conf.event_group)
  132. return ret;
  133. evsel = hists_to_evsel(a->hists);
  134. if (!perf_evsel__is_group_event(evsel))
  135. return ret;
  136. nr_members = evsel->nr_members;
  137. fields_a = calloc(nr_members, sizeof(*fields_a));
  138. fields_b = calloc(nr_members, sizeof(*fields_b));
  139. if (!fields_a || !fields_b)
  140. goto out;
  141. list_for_each_entry(pair, &a->pairs.head, pairs.node) {
  142. evsel = hists_to_evsel(pair->hists);
  143. fields_a[perf_evsel__group_idx(evsel)] = get_field(pair);
  144. }
  145. list_for_each_entry(pair, &b->pairs.head, pairs.node) {
  146. evsel = hists_to_evsel(pair->hists);
  147. fields_b[perf_evsel__group_idx(evsel)] = get_field(pair);
  148. }
  149. for (i = 1; i < nr_members; i++) {
  150. ret = field_cmp(fields_a[i], fields_b[i]);
  151. if (ret)
  152. break;
  153. }
  154. out:
  155. free(fields_a);
  156. free(fields_b);
  157. return ret;
  158. }
  159. static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
  160. hpp_field_fn get_field)
  161. {
  162. s64 ret = 0;
  163. if (symbol_conf.cumulate_callchain) {
  164. /*
  165. * Put caller above callee when they have equal period.
  166. */
  167. ret = field_cmp(get_field(a), get_field(b));
  168. if (ret)
  169. return ret;
  170. if (a->thread != b->thread || !symbol_conf.use_callchain)
  171. return 0;
  172. ret = b->callchain->max_depth - a->callchain->max_depth;
  173. if (callchain_param.order == ORDER_CALLER)
  174. ret = -ret;
  175. }
  176. return ret;
  177. }
  178. static int hpp__width_fn(struct perf_hpp_fmt *fmt,
  179. struct perf_hpp *hpp __maybe_unused,
  180. struct hists *hists)
  181. {
  182. int len = fmt->user_len ?: fmt->len;
  183. struct perf_evsel *evsel = hists_to_evsel(hists);
  184. if (symbol_conf.event_group)
  185. len = max(len, evsel->nr_members * fmt->len);
  186. if (len < (int)strlen(fmt->name))
  187. len = strlen(fmt->name);
  188. return len;
  189. }
  190. static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  191. struct hists *hists, int line __maybe_unused,
  192. int *span __maybe_unused)
  193. {
  194. int len = hpp__width_fn(fmt, hpp, hists);
  195. return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
  196. }
  197. int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
  198. {
  199. va_list args;
  200. ssize_t ssize = hpp->size;
  201. double percent;
  202. int ret, len;
  203. va_start(args, fmt);
  204. len = va_arg(args, int);
  205. percent = va_arg(args, double);
  206. ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
  207. va_end(args);
  208. return (ret >= ssize) ? (ssize - 1) : ret;
  209. }
  210. static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
  211. {
  212. va_list args;
  213. ssize_t ssize = hpp->size;
  214. int ret;
  215. va_start(args, fmt);
  216. ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
  217. va_end(args);
  218. return (ret >= ssize) ? (ssize - 1) : ret;
  219. }
  220. #define __HPP_COLOR_PERCENT_FN(_type, _field) \
  221. static u64 he_get_##_field(struct hist_entry *he) \
  222. { \
  223. return he->stat._field; \
  224. } \
  225. \
  226. static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
  227. struct perf_hpp *hpp, struct hist_entry *he) \
  228. { \
  229. return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
  230. hpp_color_scnprintf, true); \
  231. }
  232. #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
  233. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  234. struct perf_hpp *hpp, struct hist_entry *he) \
  235. { \
  236. return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
  237. hpp_entry_scnprintf, true); \
  238. }
  239. #define __HPP_SORT_FN(_type, _field) \
  240. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  241. struct hist_entry *a, struct hist_entry *b) \
  242. { \
  243. return __hpp__sort(a, b, he_get_##_field); \
  244. }
  245. #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
  246. static u64 he_get_acc_##_field(struct hist_entry *he) \
  247. { \
  248. return he->stat_acc->_field; \
  249. } \
  250. \
  251. static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
  252. struct perf_hpp *hpp, struct hist_entry *he) \
  253. { \
  254. return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
  255. hpp_color_scnprintf, true); \
  256. }
  257. #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
  258. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  259. struct perf_hpp *hpp, struct hist_entry *he) \
  260. { \
  261. return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
  262. hpp_entry_scnprintf, true); \
  263. }
  264. #define __HPP_SORT_ACC_FN(_type, _field) \
  265. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  266. struct hist_entry *a, struct hist_entry *b) \
  267. { \
  268. return __hpp__sort_acc(a, b, he_get_acc_##_field); \
  269. }
  270. #define __HPP_ENTRY_RAW_FN(_type, _field) \
  271. static u64 he_get_raw_##_field(struct hist_entry *he) \
  272. { \
  273. return he->stat._field; \
  274. } \
  275. \
  276. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  277. struct perf_hpp *hpp, struct hist_entry *he) \
  278. { \
  279. return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
  280. hpp_entry_scnprintf, false); \
  281. }
  282. #define __HPP_SORT_RAW_FN(_type, _field) \
  283. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  284. struct hist_entry *a, struct hist_entry *b) \
  285. { \
  286. return __hpp__sort(a, b, he_get_raw_##_field); \
  287. }
  288. #define HPP_PERCENT_FNS(_type, _field) \
  289. __HPP_COLOR_PERCENT_FN(_type, _field) \
  290. __HPP_ENTRY_PERCENT_FN(_type, _field) \
  291. __HPP_SORT_FN(_type, _field)
  292. #define HPP_PERCENT_ACC_FNS(_type, _field) \
  293. __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
  294. __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
  295. __HPP_SORT_ACC_FN(_type, _field)
  296. #define HPP_RAW_FNS(_type, _field) \
  297. __HPP_ENTRY_RAW_FN(_type, _field) \
  298. __HPP_SORT_RAW_FN(_type, _field)
  299. HPP_PERCENT_FNS(overhead, period)
  300. HPP_PERCENT_FNS(overhead_sys, period_sys)
  301. HPP_PERCENT_FNS(overhead_us, period_us)
  302. HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
  303. HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
  304. HPP_PERCENT_ACC_FNS(overhead_acc, period)
  305. HPP_RAW_FNS(samples, nr_events)
  306. HPP_RAW_FNS(period, period)
  307. static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
  308. struct hist_entry *a __maybe_unused,
  309. struct hist_entry *b __maybe_unused)
  310. {
  311. return 0;
  312. }
  313. static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
  314. {
  315. return a->header == hpp__header_fn;
  316. }
  317. static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  318. {
  319. if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
  320. return false;
  321. return a->idx == b->idx;
  322. }
  323. #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
  324. { \
  325. .name = _name, \
  326. .header = hpp__header_fn, \
  327. .width = hpp__width_fn, \
  328. .color = hpp__color_ ## _fn, \
  329. .entry = hpp__entry_ ## _fn, \
  330. .cmp = hpp__nop_cmp, \
  331. .collapse = hpp__nop_cmp, \
  332. .sort = hpp__sort_ ## _fn, \
  333. .idx = PERF_HPP__ ## _idx, \
  334. .equal = hpp__equal, \
  335. }
  336. #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
  337. { \
  338. .name = _name, \
  339. .header = hpp__header_fn, \
  340. .width = hpp__width_fn, \
  341. .color = hpp__color_ ## _fn, \
  342. .entry = hpp__entry_ ## _fn, \
  343. .cmp = hpp__nop_cmp, \
  344. .collapse = hpp__nop_cmp, \
  345. .sort = hpp__sort_ ## _fn, \
  346. .idx = PERF_HPP__ ## _idx, \
  347. .equal = hpp__equal, \
  348. }
  349. #define HPP__PRINT_FNS(_name, _fn, _idx) \
  350. { \
  351. .name = _name, \
  352. .header = hpp__header_fn, \
  353. .width = hpp__width_fn, \
  354. .entry = hpp__entry_ ## _fn, \
  355. .cmp = hpp__nop_cmp, \
  356. .collapse = hpp__nop_cmp, \
  357. .sort = hpp__sort_ ## _fn, \
  358. .idx = PERF_HPP__ ## _idx, \
  359. .equal = hpp__equal, \
  360. }
  361. struct perf_hpp_fmt perf_hpp__format[] = {
  362. HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
  363. HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
  364. HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
  365. HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
  366. HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
  367. HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
  368. HPP__PRINT_FNS("Samples", samples, SAMPLES),
  369. HPP__PRINT_FNS("Period", period, PERIOD)
  370. };
  371. struct perf_hpp_list perf_hpp_list = {
  372. .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
  373. .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
  374. .nr_header_lines = 1,
  375. };
  376. #undef HPP__COLOR_PRINT_FNS
  377. #undef HPP__COLOR_ACC_PRINT_FNS
  378. #undef HPP__PRINT_FNS
  379. #undef HPP_PERCENT_FNS
  380. #undef HPP_PERCENT_ACC_FNS
  381. #undef HPP_RAW_FNS
  382. #undef __HPP_HEADER_FN
  383. #undef __HPP_WIDTH_FN
  384. #undef __HPP_COLOR_PERCENT_FN
  385. #undef __HPP_ENTRY_PERCENT_FN
  386. #undef __HPP_COLOR_ACC_PERCENT_FN
  387. #undef __HPP_ENTRY_ACC_PERCENT_FN
  388. #undef __HPP_ENTRY_RAW_FN
  389. #undef __HPP_SORT_FN
  390. #undef __HPP_SORT_ACC_FN
  391. #undef __HPP_SORT_RAW_FN
  392. void perf_hpp__init(void)
  393. {
  394. int i;
  395. for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
  396. struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
  397. INIT_LIST_HEAD(&fmt->list);
  398. /* sort_list may be linked by setup_sorting() */
  399. if (fmt->sort_list.next == NULL)
  400. INIT_LIST_HEAD(&fmt->sort_list);
  401. }
  402. /*
  403. * If user specified field order, no need to setup default fields.
  404. */
  405. if (is_strict_order(field_order))
  406. return;
  407. if (symbol_conf.cumulate_callchain) {
  408. hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
  409. perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
  410. }
  411. hpp_dimension__add_output(PERF_HPP__OVERHEAD);
  412. if (symbol_conf.show_cpu_utilization) {
  413. hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
  414. hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
  415. if (perf_guest) {
  416. hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
  417. hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
  418. }
  419. }
  420. if (symbol_conf.show_nr_samples)
  421. hpp_dimension__add_output(PERF_HPP__SAMPLES);
  422. if (symbol_conf.show_total_period)
  423. hpp_dimension__add_output(PERF_HPP__PERIOD);
  424. }
  425. void perf_hpp_list__column_register(struct perf_hpp_list *list,
  426. struct perf_hpp_fmt *format)
  427. {
  428. list_add_tail(&format->list, &list->fields);
  429. }
  430. void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
  431. struct perf_hpp_fmt *format)
  432. {
  433. list_add_tail(&format->sort_list, &list->sorts);
  434. }
  435. void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
  436. struct perf_hpp_fmt *format)
  437. {
  438. list_add(&format->sort_list, &list->sorts);
  439. }
  440. void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
  441. {
  442. list_del(&format->list);
  443. }
  444. void perf_hpp__cancel_cumulate(void)
  445. {
  446. struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
  447. if (is_strict_order(field_order))
  448. return;
  449. ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
  450. acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
  451. perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
  452. if (acc->equal(acc, fmt)) {
  453. perf_hpp__column_unregister(fmt);
  454. continue;
  455. }
  456. if (ovh->equal(ovh, fmt))
  457. fmt->name = "Overhead";
  458. }
  459. }
  460. static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  461. {
  462. return a->equal && a->equal(a, b);
  463. }
  464. void perf_hpp__setup_output_field(struct perf_hpp_list *list)
  465. {
  466. struct perf_hpp_fmt *fmt;
  467. /* append sort keys to output field */
  468. perf_hpp_list__for_each_sort_list(list, fmt) {
  469. struct perf_hpp_fmt *pos;
  470. /* skip sort-only fields ("sort_compute" in perf diff) */
  471. if (!fmt->entry && !fmt->color)
  472. continue;
  473. perf_hpp_list__for_each_format(list, pos) {
  474. if (fmt_equal(fmt, pos))
  475. goto next;
  476. }
  477. perf_hpp__column_register(fmt);
  478. next:
  479. continue;
  480. }
  481. }
  482. void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
  483. {
  484. struct perf_hpp_fmt *fmt;
  485. /* append output fields to sort keys */
  486. perf_hpp_list__for_each_format(list, fmt) {
  487. struct perf_hpp_fmt *pos;
  488. perf_hpp_list__for_each_sort_list(list, pos) {
  489. if (fmt_equal(fmt, pos))
  490. goto next;
  491. }
  492. perf_hpp__register_sort_field(fmt);
  493. next:
  494. continue;
  495. }
  496. }
  497. static void fmt_free(struct perf_hpp_fmt *fmt)
  498. {
  499. if (fmt->free)
  500. fmt->free(fmt);
  501. }
  502. void perf_hpp__reset_output_field(struct perf_hpp_list *list)
  503. {
  504. struct perf_hpp_fmt *fmt, *tmp;
  505. /* reset output fields */
  506. perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
  507. list_del_init(&fmt->list);
  508. list_del_init(&fmt->sort_list);
  509. fmt_free(fmt);
  510. }
  511. /* reset sort keys */
  512. perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
  513. list_del_init(&fmt->list);
  514. list_del_init(&fmt->sort_list);
  515. fmt_free(fmt);
  516. }
  517. }
  518. /*
  519. * See hists__fprintf to match the column widths
  520. */
  521. unsigned int hists__sort_list_width(struct hists *hists)
  522. {
  523. struct perf_hpp_fmt *fmt;
  524. int ret = 0;
  525. bool first = true;
  526. struct perf_hpp dummy_hpp;
  527. hists__for_each_format(hists, fmt) {
  528. if (perf_hpp__should_skip(fmt, hists))
  529. continue;
  530. if (first)
  531. first = false;
  532. else
  533. ret += 2;
  534. ret += fmt->width(fmt, &dummy_hpp, hists);
  535. }
  536. if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
  537. ret += 3 + BITS_PER_LONG / 4;
  538. return ret;
  539. }
  540. unsigned int hists__overhead_width(struct hists *hists)
  541. {
  542. struct perf_hpp_fmt *fmt;
  543. int ret = 0;
  544. bool first = true;
  545. struct perf_hpp dummy_hpp;
  546. hists__for_each_format(hists, fmt) {
  547. if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
  548. break;
  549. if (first)
  550. first = false;
  551. else
  552. ret += 2;
  553. ret += fmt->width(fmt, &dummy_hpp, hists);
  554. }
  555. return ret;
  556. }
  557. void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
  558. {
  559. if (perf_hpp__is_sort_entry(fmt))
  560. return perf_hpp__reset_sort_width(fmt, hists);
  561. if (perf_hpp__is_dynamic_entry(fmt))
  562. return;
  563. BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
  564. switch (fmt->idx) {
  565. case PERF_HPP__OVERHEAD:
  566. case PERF_HPP__OVERHEAD_SYS:
  567. case PERF_HPP__OVERHEAD_US:
  568. case PERF_HPP__OVERHEAD_ACC:
  569. fmt->len = 8;
  570. break;
  571. case PERF_HPP__OVERHEAD_GUEST_SYS:
  572. case PERF_HPP__OVERHEAD_GUEST_US:
  573. fmt->len = 9;
  574. break;
  575. case PERF_HPP__SAMPLES:
  576. case PERF_HPP__PERIOD:
  577. fmt->len = 12;
  578. break;
  579. default:
  580. break;
  581. }
  582. }
  583. void hists__reset_column_width(struct hists *hists)
  584. {
  585. struct perf_hpp_fmt *fmt;
  586. struct perf_hpp_list_node *node;
  587. hists__for_each_format(hists, fmt)
  588. perf_hpp__reset_width(fmt, hists);
  589. /* hierarchy entries have their own hpp list */
  590. list_for_each_entry(node, &hists->hpp_formats, list) {
  591. perf_hpp_list__for_each_format(&node->hpp, fmt)
  592. perf_hpp__reset_width(fmt, hists);
  593. }
  594. }
  595. void perf_hpp__set_user_width(const char *width_list_str)
  596. {
  597. struct perf_hpp_fmt *fmt;
  598. const char *ptr = width_list_str;
  599. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  600. char *p;
  601. int len = strtol(ptr, &p, 10);
  602. fmt->user_len = len;
  603. if (*p == ',')
  604. ptr = p + 1;
  605. else
  606. break;
  607. }
  608. }
  609. static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
  610. {
  611. struct perf_hpp_list_node *node = NULL;
  612. struct perf_hpp_fmt *fmt_copy;
  613. bool found = false;
  614. bool skip = perf_hpp__should_skip(fmt, hists);
  615. list_for_each_entry(node, &hists->hpp_formats, list) {
  616. if (node->level == fmt->level) {
  617. found = true;
  618. break;
  619. }
  620. }
  621. if (!found) {
  622. node = malloc(sizeof(*node));
  623. if (node == NULL)
  624. return -1;
  625. node->skip = skip;
  626. node->level = fmt->level;
  627. perf_hpp_list__init(&node->hpp);
  628. hists->nr_hpp_node++;
  629. list_add_tail(&node->list, &hists->hpp_formats);
  630. }
  631. fmt_copy = perf_hpp_fmt__dup(fmt);
  632. if (fmt_copy == NULL)
  633. return -1;
  634. if (!skip)
  635. node->skip = false;
  636. list_add_tail(&fmt_copy->list, &node->hpp.fields);
  637. list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
  638. return 0;
  639. }
  640. int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
  641. struct perf_evlist *evlist)
  642. {
  643. struct perf_evsel *evsel;
  644. struct perf_hpp_fmt *fmt;
  645. struct hists *hists;
  646. int ret;
  647. if (!symbol_conf.report_hierarchy)
  648. return 0;
  649. evlist__for_each_entry(evlist, evsel) {
  650. hists = evsel__hists(evsel);
  651. perf_hpp_list__for_each_sort_list(list, fmt) {
  652. if (perf_hpp__is_dynamic_entry(fmt) &&
  653. !perf_hpp__defined_dynamic_entry(fmt, hists))
  654. continue;
  655. ret = add_hierarchy_fmt(hists, fmt);
  656. if (ret < 0)
  657. return ret;
  658. }
  659. }
  660. return 0;
  661. }