hist.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624
  1. #include <math.h>
  2. #include <linux/compiler.h>
  3. #include "../util/hist.h"
  4. #include "../util/util.h"
  5. #include "../util/sort.h"
  6. #include "../util/evsel.h"
  7. /* hist period print (hpp) functions */
  8. #define hpp__call_print_fn(hpp, fn, fmt, ...) \
  9. ({ \
  10. int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
  11. advance_hpp(hpp, __ret); \
  12. __ret; \
  13. })
  14. int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
  15. hpp_field_fn get_field, const char *fmt,
  16. hpp_snprint_fn print_fn, bool fmt_percent)
  17. {
  18. int ret;
  19. struct hists *hists = he->hists;
  20. struct perf_evsel *evsel = hists_to_evsel(hists);
  21. char *buf = hpp->buf;
  22. size_t size = hpp->size;
  23. if (fmt_percent) {
  24. double percent = 0.0;
  25. u64 total = hists__total_period(hists);
  26. if (total)
  27. percent = 100.0 * get_field(he) / total;
  28. ret = hpp__call_print_fn(hpp, print_fn, fmt, percent);
  29. } else
  30. ret = hpp__call_print_fn(hpp, print_fn, fmt, get_field(he));
  31. if (perf_evsel__is_group_event(evsel)) {
  32. int prev_idx, idx_delta;
  33. struct hist_entry *pair;
  34. int nr_members = evsel->nr_members;
  35. prev_idx = perf_evsel__group_idx(evsel);
  36. list_for_each_entry(pair, &he->pairs.head, pairs.node) {
  37. u64 period = get_field(pair);
  38. u64 total = hists__total_period(pair->hists);
  39. if (!total)
  40. continue;
  41. evsel = hists_to_evsel(pair->hists);
  42. idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
  43. while (idx_delta--) {
  44. /*
  45. * zero-fill group members in the middle which
  46. * have no sample
  47. */
  48. if (fmt_percent) {
  49. ret += hpp__call_print_fn(hpp, print_fn,
  50. fmt, 0.0);
  51. } else {
  52. ret += hpp__call_print_fn(hpp, print_fn,
  53. fmt, 0ULL);
  54. }
  55. }
  56. if (fmt_percent) {
  57. ret += hpp__call_print_fn(hpp, print_fn, fmt,
  58. 100.0 * period / total);
  59. } else {
  60. ret += hpp__call_print_fn(hpp, print_fn, fmt,
  61. period);
  62. }
  63. prev_idx = perf_evsel__group_idx(evsel);
  64. }
  65. idx_delta = nr_members - prev_idx - 1;
  66. while (idx_delta--) {
  67. /*
  68. * zero-fill group members at last which have no sample
  69. */
  70. if (fmt_percent) {
  71. ret += hpp__call_print_fn(hpp, print_fn,
  72. fmt, 0.0);
  73. } else {
  74. ret += hpp__call_print_fn(hpp, print_fn,
  75. fmt, 0ULL);
  76. }
  77. }
  78. }
  79. /*
  80. * Restore original buf and size as it's where caller expects
  81. * the result will be saved.
  82. */
  83. hpp->buf = buf;
  84. hpp->size = size;
  85. return ret;
  86. }
  87. int __hpp__fmt_acc(struct perf_hpp *hpp, struct hist_entry *he,
  88. hpp_field_fn get_field, const char *fmt,
  89. hpp_snprint_fn print_fn, bool fmt_percent)
  90. {
  91. if (!symbol_conf.cumulate_callchain) {
  92. return snprintf(hpp->buf, hpp->size, "%*s",
  93. fmt_percent ? 8 : 12, "N/A");
  94. }
  95. return __hpp__fmt(hpp, he, get_field, fmt, print_fn, fmt_percent);
  96. }
  97. static int field_cmp(u64 field_a, u64 field_b)
  98. {
  99. if (field_a > field_b)
  100. return 1;
  101. if (field_a < field_b)
  102. return -1;
  103. return 0;
  104. }
  105. static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
  106. hpp_field_fn get_field)
  107. {
  108. s64 ret;
  109. int i, nr_members;
  110. struct perf_evsel *evsel;
  111. struct hist_entry *pair;
  112. u64 *fields_a, *fields_b;
  113. ret = field_cmp(get_field(a), get_field(b));
  114. if (ret || !symbol_conf.event_group)
  115. return ret;
  116. evsel = hists_to_evsel(a->hists);
  117. if (!perf_evsel__is_group_event(evsel))
  118. return ret;
  119. nr_members = evsel->nr_members;
  120. fields_a = calloc(sizeof(*fields_a), nr_members);
  121. fields_b = calloc(sizeof(*fields_b), nr_members);
  122. if (!fields_a || !fields_b)
  123. goto out;
  124. list_for_each_entry(pair, &a->pairs.head, pairs.node) {
  125. evsel = hists_to_evsel(pair->hists);
  126. fields_a[perf_evsel__group_idx(evsel)] = get_field(pair);
  127. }
  128. list_for_each_entry(pair, &b->pairs.head, pairs.node) {
  129. evsel = hists_to_evsel(pair->hists);
  130. fields_b[perf_evsel__group_idx(evsel)] = get_field(pair);
  131. }
  132. for (i = 1; i < nr_members; i++) {
  133. ret = field_cmp(fields_a[i], fields_b[i]);
  134. if (ret)
  135. break;
  136. }
  137. out:
  138. free(fields_a);
  139. free(fields_b);
  140. return ret;
  141. }
  142. static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
  143. hpp_field_fn get_field)
  144. {
  145. s64 ret = 0;
  146. if (symbol_conf.cumulate_callchain) {
  147. /*
  148. * Put caller above callee when they have equal period.
  149. */
  150. ret = field_cmp(get_field(a), get_field(b));
  151. if (ret)
  152. return ret;
  153. ret = b->callchain->max_depth - a->callchain->max_depth;
  154. }
  155. return ret;
  156. }
  157. #define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
  158. static int hpp__header_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  159. struct perf_hpp *hpp, \
  160. struct perf_evsel *evsel) \
  161. { \
  162. int len = _min_width; \
  163. \
  164. if (symbol_conf.event_group) \
  165. len = max(len, evsel->nr_members * _unit_width); \
  166. \
  167. return scnprintf(hpp->buf, hpp->size, "%*s", len, _str); \
  168. }
  169. #define __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
  170. static int hpp__width_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  171. struct perf_hpp *hpp __maybe_unused, \
  172. struct perf_evsel *evsel) \
  173. { \
  174. int len = _min_width; \
  175. \
  176. if (symbol_conf.event_group) \
  177. len = max(len, evsel->nr_members * _unit_width); \
  178. \
  179. return len; \
  180. }
  181. static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
  182. {
  183. va_list args;
  184. ssize_t ssize = hpp->size;
  185. double percent;
  186. int ret;
  187. va_start(args, fmt);
  188. percent = va_arg(args, double);
  189. ret = value_color_snprintf(hpp->buf, hpp->size, fmt, percent);
  190. va_end(args);
  191. return (ret >= ssize) ? (ssize - 1) : ret;
  192. }
  193. static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
  194. {
  195. va_list args;
  196. ssize_t ssize = hpp->size;
  197. int ret;
  198. va_start(args, fmt);
  199. ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
  200. va_end(args);
  201. return (ret >= ssize) ? (ssize - 1) : ret;
  202. }
  203. #define __HPP_COLOR_PERCENT_FN(_type, _field) \
  204. static u64 he_get_##_field(struct hist_entry *he) \
  205. { \
  206. return he->stat._field; \
  207. } \
  208. \
  209. static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  210. struct perf_hpp *hpp, struct hist_entry *he) \
  211. { \
  212. return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \
  213. hpp_color_scnprintf, true); \
  214. }
  215. #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
  216. static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
  217. struct perf_hpp *hpp, struct hist_entry *he) \
  218. { \
  219. const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \
  220. return __hpp__fmt(hpp, he, he_get_##_field, fmt, \
  221. hpp_entry_scnprintf, true); \
  222. }
  223. #define __HPP_SORT_FN(_type, _field) \
  224. static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \
  225. { \
  226. return __hpp__sort(a, b, he_get_##_field); \
  227. }
  228. #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
  229. static u64 he_get_acc_##_field(struct hist_entry *he) \
  230. { \
  231. return he->stat_acc->_field; \
  232. } \
  233. \
  234. static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  235. struct perf_hpp *hpp, struct hist_entry *he) \
  236. { \
  237. return __hpp__fmt_acc(hpp, he, he_get_acc_##_field, " %6.2f%%", \
  238. hpp_color_scnprintf, true); \
  239. }
  240. #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
  241. static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
  242. struct perf_hpp *hpp, struct hist_entry *he) \
  243. { \
  244. const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \
  245. return __hpp__fmt_acc(hpp, he, he_get_acc_##_field, fmt, \
  246. hpp_entry_scnprintf, true); \
  247. }
  248. #define __HPP_SORT_ACC_FN(_type, _field) \
  249. static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \
  250. { \
  251. return __hpp__sort_acc(a, b, he_get_acc_##_field); \
  252. }
  253. #define __HPP_ENTRY_RAW_FN(_type, _field) \
  254. static u64 he_get_raw_##_field(struct hist_entry *he) \
  255. { \
  256. return he->stat._field; \
  257. } \
  258. \
  259. static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
  260. struct perf_hpp *hpp, struct hist_entry *he) \
  261. { \
  262. const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64; \
  263. return __hpp__fmt(hpp, he, he_get_raw_##_field, fmt, \
  264. hpp_entry_scnprintf, false); \
  265. }
  266. #define __HPP_SORT_RAW_FN(_type, _field) \
  267. static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \
  268. { \
  269. return __hpp__sort(a, b, he_get_raw_##_field); \
  270. }
  271. #define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width) \
  272. __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
  273. __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
  274. __HPP_COLOR_PERCENT_FN(_type, _field) \
  275. __HPP_ENTRY_PERCENT_FN(_type, _field) \
  276. __HPP_SORT_FN(_type, _field)
  277. #define HPP_PERCENT_ACC_FNS(_type, _str, _field, _min_width, _unit_width)\
  278. __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
  279. __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
  280. __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
  281. __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
  282. __HPP_SORT_ACC_FN(_type, _field)
  283. #define HPP_RAW_FNS(_type, _str, _field, _min_width, _unit_width) \
  284. __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
  285. __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
  286. __HPP_ENTRY_RAW_FN(_type, _field) \
  287. __HPP_SORT_RAW_FN(_type, _field)
  288. __HPP_HEADER_FN(overhead_self, "Self", 8, 8)
  289. HPP_PERCENT_FNS(overhead, "Overhead", period, 8, 8)
  290. HPP_PERCENT_FNS(overhead_sys, "sys", period_sys, 8, 8)
  291. HPP_PERCENT_FNS(overhead_us, "usr", period_us, 8, 8)
  292. HPP_PERCENT_FNS(overhead_guest_sys, "guest sys", period_guest_sys, 9, 8)
  293. HPP_PERCENT_FNS(overhead_guest_us, "guest usr", period_guest_us, 9, 8)
  294. HPP_PERCENT_ACC_FNS(overhead_acc, "Children", period, 8, 8)
  295. HPP_RAW_FNS(samples, "Samples", nr_events, 12, 12)
  296. HPP_RAW_FNS(period, "Period", period, 12, 12)
  297. static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused,
  298. struct hist_entry *b __maybe_unused)
  299. {
  300. return 0;
  301. }
  302. #define HPP__COLOR_PRINT_FNS(_name) \
  303. { \
  304. .header = hpp__header_ ## _name, \
  305. .width = hpp__width_ ## _name, \
  306. .color = hpp__color_ ## _name, \
  307. .entry = hpp__entry_ ## _name, \
  308. .cmp = hpp__nop_cmp, \
  309. .collapse = hpp__nop_cmp, \
  310. .sort = hpp__sort_ ## _name, \
  311. }
  312. #define HPP__COLOR_ACC_PRINT_FNS(_name) \
  313. { \
  314. .header = hpp__header_ ## _name, \
  315. .width = hpp__width_ ## _name, \
  316. .color = hpp__color_ ## _name, \
  317. .entry = hpp__entry_ ## _name, \
  318. .cmp = hpp__nop_cmp, \
  319. .collapse = hpp__nop_cmp, \
  320. .sort = hpp__sort_ ## _name, \
  321. }
  322. #define HPP__PRINT_FNS(_name) \
  323. { \
  324. .header = hpp__header_ ## _name, \
  325. .width = hpp__width_ ## _name, \
  326. .entry = hpp__entry_ ## _name, \
  327. .cmp = hpp__nop_cmp, \
  328. .collapse = hpp__nop_cmp, \
  329. .sort = hpp__sort_ ## _name, \
  330. }
  331. struct perf_hpp_fmt perf_hpp__format[] = {
  332. HPP__COLOR_PRINT_FNS(overhead),
  333. HPP__COLOR_PRINT_FNS(overhead_sys),
  334. HPP__COLOR_PRINT_FNS(overhead_us),
  335. HPP__COLOR_PRINT_FNS(overhead_guest_sys),
  336. HPP__COLOR_PRINT_FNS(overhead_guest_us),
  337. HPP__COLOR_ACC_PRINT_FNS(overhead_acc),
  338. HPP__PRINT_FNS(samples),
  339. HPP__PRINT_FNS(period)
  340. };
  341. LIST_HEAD(perf_hpp__list);
  342. LIST_HEAD(perf_hpp__sort_list);
  343. #undef HPP__COLOR_PRINT_FNS
  344. #undef HPP__COLOR_ACC_PRINT_FNS
  345. #undef HPP__PRINT_FNS
  346. #undef HPP_PERCENT_FNS
  347. #undef HPP_PERCENT_ACC_FNS
  348. #undef HPP_RAW_FNS
  349. #undef __HPP_HEADER_FN
  350. #undef __HPP_WIDTH_FN
  351. #undef __HPP_COLOR_PERCENT_FN
  352. #undef __HPP_ENTRY_PERCENT_FN
  353. #undef __HPP_COLOR_ACC_PERCENT_FN
  354. #undef __HPP_ENTRY_ACC_PERCENT_FN
  355. #undef __HPP_ENTRY_RAW_FN
  356. #undef __HPP_SORT_FN
  357. #undef __HPP_SORT_ACC_FN
  358. #undef __HPP_SORT_RAW_FN
  359. void perf_hpp__init(void)
  360. {
  361. struct list_head *list;
  362. int i;
  363. for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
  364. struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
  365. INIT_LIST_HEAD(&fmt->list);
  366. /* sort_list may be linked by setup_sorting() */
  367. if (fmt->sort_list.next == NULL)
  368. INIT_LIST_HEAD(&fmt->sort_list);
  369. }
  370. /*
  371. * If user specified field order, no need to setup default fields.
  372. */
  373. if (field_order)
  374. return;
  375. if (symbol_conf.cumulate_callchain) {
  376. perf_hpp__column_enable(PERF_HPP__OVERHEAD_ACC);
  377. perf_hpp__format[PERF_HPP__OVERHEAD].header =
  378. hpp__header_overhead_self;
  379. }
  380. perf_hpp__column_enable(PERF_HPP__OVERHEAD);
  381. if (symbol_conf.show_cpu_utilization) {
  382. perf_hpp__column_enable(PERF_HPP__OVERHEAD_SYS);
  383. perf_hpp__column_enable(PERF_HPP__OVERHEAD_US);
  384. if (perf_guest) {
  385. perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_SYS);
  386. perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_US);
  387. }
  388. }
  389. if (symbol_conf.show_nr_samples)
  390. perf_hpp__column_enable(PERF_HPP__SAMPLES);
  391. if (symbol_conf.show_total_period)
  392. perf_hpp__column_enable(PERF_HPP__PERIOD);
  393. /* prepend overhead field for backward compatiblity. */
  394. list = &perf_hpp__format[PERF_HPP__OVERHEAD].sort_list;
  395. if (list_empty(list))
  396. list_add(list, &perf_hpp__sort_list);
  397. if (symbol_conf.cumulate_callchain) {
  398. list = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC].sort_list;
  399. if (list_empty(list))
  400. list_add(list, &perf_hpp__sort_list);
  401. }
  402. }
  403. void perf_hpp__column_register(struct perf_hpp_fmt *format)
  404. {
  405. list_add_tail(&format->list, &perf_hpp__list);
  406. }
  407. void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
  408. {
  409. list_del(&format->list);
  410. }
  411. void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
  412. {
  413. list_add_tail(&format->sort_list, &perf_hpp__sort_list);
  414. }
  415. void perf_hpp__column_enable(unsigned col)
  416. {
  417. BUG_ON(col >= PERF_HPP__MAX_INDEX);
  418. perf_hpp__column_register(&perf_hpp__format[col]);
  419. }
  420. void perf_hpp__column_disable(unsigned col)
  421. {
  422. BUG_ON(col >= PERF_HPP__MAX_INDEX);
  423. perf_hpp__column_unregister(&perf_hpp__format[col]);
  424. }
  425. void perf_hpp__cancel_cumulate(void)
  426. {
  427. if (field_order)
  428. return;
  429. perf_hpp__column_disable(PERF_HPP__OVERHEAD_ACC);
  430. perf_hpp__format[PERF_HPP__OVERHEAD].header = hpp__header_overhead;
  431. }
  432. void perf_hpp__setup_output_field(void)
  433. {
  434. struct perf_hpp_fmt *fmt;
  435. /* append sort keys to output field */
  436. perf_hpp__for_each_sort_list(fmt) {
  437. if (!list_empty(&fmt->list))
  438. continue;
  439. /*
  440. * sort entry fields are dynamically created,
  441. * so they can share a same sort key even though
  442. * the list is empty.
  443. */
  444. if (perf_hpp__is_sort_entry(fmt)) {
  445. struct perf_hpp_fmt *pos;
  446. perf_hpp__for_each_format(pos) {
  447. if (perf_hpp__same_sort_entry(pos, fmt))
  448. goto next;
  449. }
  450. }
  451. perf_hpp__column_register(fmt);
  452. next:
  453. continue;
  454. }
  455. }
  456. void perf_hpp__append_sort_keys(void)
  457. {
  458. struct perf_hpp_fmt *fmt;
  459. /* append output fields to sort keys */
  460. perf_hpp__for_each_format(fmt) {
  461. if (!list_empty(&fmt->sort_list))
  462. continue;
  463. /*
  464. * sort entry fields are dynamically created,
  465. * so they can share a same sort key even though
  466. * the list is empty.
  467. */
  468. if (perf_hpp__is_sort_entry(fmt)) {
  469. struct perf_hpp_fmt *pos;
  470. perf_hpp__for_each_sort_list(pos) {
  471. if (perf_hpp__same_sort_entry(pos, fmt))
  472. goto next;
  473. }
  474. }
  475. perf_hpp__register_sort_field(fmt);
  476. next:
  477. continue;
  478. }
  479. }
  480. void perf_hpp__reset_output_field(void)
  481. {
  482. struct perf_hpp_fmt *fmt, *tmp;
  483. /* reset output fields */
  484. perf_hpp__for_each_format_safe(fmt, tmp) {
  485. list_del_init(&fmt->list);
  486. list_del_init(&fmt->sort_list);
  487. }
  488. /* reset sort keys */
  489. perf_hpp__for_each_sort_list_safe(fmt, tmp) {
  490. list_del_init(&fmt->list);
  491. list_del_init(&fmt->sort_list);
  492. }
  493. }
  494. /*
  495. * See hists__fprintf to match the column widths
  496. */
  497. unsigned int hists__sort_list_width(struct hists *hists)
  498. {
  499. struct perf_hpp_fmt *fmt;
  500. int ret = 0;
  501. bool first = true;
  502. struct perf_hpp dummy_hpp;
  503. perf_hpp__for_each_format(fmt) {
  504. if (perf_hpp__should_skip(fmt))
  505. continue;
  506. if (first)
  507. first = false;
  508. else
  509. ret += 2;
  510. ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
  511. }
  512. if (verbose && sort__has_sym) /* Addr + origin */
  513. ret += 3 + BITS_PER_LONG / 4;
  514. return ret;
  515. }