stat-shadow.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. #include <stdio.h>
  2. #include "evsel.h"
  3. #include "stat.h"
  4. #include "color.h"
  5. #include "pmu.h"
  6. enum {
  7. CTX_BIT_USER = 1 << 0,
  8. CTX_BIT_KERNEL = 1 << 1,
  9. CTX_BIT_HV = 1 << 2,
  10. CTX_BIT_HOST = 1 << 3,
  11. CTX_BIT_IDLE = 1 << 4,
  12. CTX_BIT_MAX = 1 << 5,
  13. };
  14. #define NUM_CTX CTX_BIT_MAX
  15. /*
  16. * AGGR_GLOBAL: Use CPU 0
  17. * AGGR_SOCKET: Use first CPU of socket
  18. * AGGR_CORE: Use first CPU of core
  19. * AGGR_NONE: Use matching CPU
  20. * AGGR_THREAD: Not supported?
  21. */
  22. static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
  23. static struct stats runtime_cycles_stats[NUM_CTX][MAX_NR_CPUS];
  24. static struct stats runtime_stalled_cycles_front_stats[NUM_CTX][MAX_NR_CPUS];
  25. static struct stats runtime_stalled_cycles_back_stats[NUM_CTX][MAX_NR_CPUS];
  26. static struct stats runtime_branches_stats[NUM_CTX][MAX_NR_CPUS];
  27. static struct stats runtime_cacherefs_stats[NUM_CTX][MAX_NR_CPUS];
  28. static struct stats runtime_l1_dcache_stats[NUM_CTX][MAX_NR_CPUS];
  29. static struct stats runtime_l1_icache_stats[NUM_CTX][MAX_NR_CPUS];
  30. static struct stats runtime_ll_cache_stats[NUM_CTX][MAX_NR_CPUS];
  31. static struct stats runtime_itlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
  32. static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
  33. static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS];
  34. static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS];
  35. static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS];
  36. static bool have_frontend_stalled;
  37. struct stats walltime_nsecs_stats;
  38. void perf_stat__init_shadow_stats(void)
  39. {
  40. have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
  41. }
  42. static int evsel_context(struct perf_evsel *evsel)
  43. {
  44. int ctx = 0;
  45. if (evsel->attr.exclude_kernel)
  46. ctx |= CTX_BIT_KERNEL;
  47. if (evsel->attr.exclude_user)
  48. ctx |= CTX_BIT_USER;
  49. if (evsel->attr.exclude_hv)
  50. ctx |= CTX_BIT_HV;
  51. if (evsel->attr.exclude_host)
  52. ctx |= CTX_BIT_HOST;
  53. if (evsel->attr.exclude_idle)
  54. ctx |= CTX_BIT_IDLE;
  55. return ctx;
  56. }
  57. void perf_stat__reset_shadow_stats(void)
  58. {
  59. memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats));
  60. memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats));
  61. memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats));
  62. memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats));
  63. memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats));
  64. memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats));
  65. memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats));
  66. memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats));
  67. memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats));
  68. memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats));
  69. memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats));
  70. memset(runtime_cycles_in_tx_stats, 0,
  71. sizeof(runtime_cycles_in_tx_stats));
  72. memset(runtime_transaction_stats, 0,
  73. sizeof(runtime_transaction_stats));
  74. memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats));
  75. memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
  76. }
  77. /*
  78. * Update various tracking values we maintain to print
  79. * more semantic information such as miss/hit ratios,
  80. * instruction rates, etc:
  81. */
  82. void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
  83. int cpu)
  84. {
  85. int ctx = evsel_context(counter);
  86. if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
  87. update_stats(&runtime_nsecs_stats[cpu], count[0]);
  88. else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
  89. update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
  90. else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
  91. update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
  92. else if (perf_stat_evsel__is(counter, TRANSACTION_START))
  93. update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
  94. else if (perf_stat_evsel__is(counter, ELISION_START))
  95. update_stats(&runtime_elision_stats[ctx][cpu], count[0]);
  96. else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
  97. update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count[0]);
  98. else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
  99. update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count[0]);
  100. else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
  101. update_stats(&runtime_branches_stats[ctx][cpu], count[0]);
  102. else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
  103. update_stats(&runtime_cacherefs_stats[ctx][cpu], count[0]);
  104. else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
  105. update_stats(&runtime_l1_dcache_stats[ctx][cpu], count[0]);
  106. else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
  107. update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]);
  108. else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
  109. update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]);
  110. else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
  111. update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count[0]);
  112. else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
  113. update_stats(&runtime_itlb_cache_stats[ctx][cpu], count[0]);
  114. }
  115. /* used for get_ratio_color() */
  116. enum grc_type {
  117. GRC_STALLED_CYCLES_FE,
  118. GRC_STALLED_CYCLES_BE,
  119. GRC_CACHE_MISSES,
  120. GRC_MAX_NR
  121. };
  122. static const char *get_ratio_color(enum grc_type type, double ratio)
  123. {
  124. static const double grc_table[GRC_MAX_NR][3] = {
  125. [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
  126. [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
  127. [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 },
  128. };
  129. const char *color = PERF_COLOR_NORMAL;
  130. if (ratio > grc_table[type][0])
  131. color = PERF_COLOR_RED;
  132. else if (ratio > grc_table[type][1])
  133. color = PERF_COLOR_MAGENTA;
  134. else if (ratio > grc_table[type][2])
  135. color = PERF_COLOR_YELLOW;
  136. return color;
  137. }
  138. static void print_stalled_cycles_frontend(int cpu,
  139. struct perf_evsel *evsel, double avg,
  140. struct perf_stat_output_ctx *out)
  141. {
  142. double total, ratio = 0.0;
  143. const char *color;
  144. int ctx = evsel_context(evsel);
  145. total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
  146. if (total)
  147. ratio = avg / total * 100.0;
  148. color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
  149. if (ratio)
  150. out->print_metric(out->ctx, color, "%7.2f%%", "frontend cycles idle",
  151. ratio);
  152. else
  153. out->print_metric(out->ctx, NULL, NULL, "frontend cycles idle", 0);
  154. }
  155. static void print_stalled_cycles_backend(int cpu,
  156. struct perf_evsel *evsel, double avg,
  157. struct perf_stat_output_ctx *out)
  158. {
  159. double total, ratio = 0.0;
  160. const char *color;
  161. int ctx = evsel_context(evsel);
  162. total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
  163. if (total)
  164. ratio = avg / total * 100.0;
  165. color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
  166. out->print_metric(out->ctx, color, "%6.2f%%", "backend cycles idle", ratio);
  167. }
  168. static void print_branch_misses(int cpu,
  169. struct perf_evsel *evsel,
  170. double avg,
  171. struct perf_stat_output_ctx *out)
  172. {
  173. double total, ratio = 0.0;
  174. const char *color;
  175. int ctx = evsel_context(evsel);
  176. total = avg_stats(&runtime_branches_stats[ctx][cpu]);
  177. if (total)
  178. ratio = avg / total * 100.0;
  179. color = get_ratio_color(GRC_CACHE_MISSES, ratio);
  180. out->print_metric(out->ctx, color, "%7.2f%%", "of all branches", ratio);
  181. }
  182. static void print_l1_dcache_misses(int cpu,
  183. struct perf_evsel *evsel,
  184. double avg,
  185. struct perf_stat_output_ctx *out)
  186. {
  187. double total, ratio = 0.0;
  188. const char *color;
  189. int ctx = evsel_context(evsel);
  190. total = avg_stats(&runtime_l1_dcache_stats[ctx][cpu]);
  191. if (total)
  192. ratio = avg / total * 100.0;
  193. color = get_ratio_color(GRC_CACHE_MISSES, ratio);
  194. out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-dcache hits", ratio);
  195. }
  196. static void print_l1_icache_misses(int cpu,
  197. struct perf_evsel *evsel,
  198. double avg,
  199. struct perf_stat_output_ctx *out)
  200. {
  201. double total, ratio = 0.0;
  202. const char *color;
  203. int ctx = evsel_context(evsel);
  204. total = avg_stats(&runtime_l1_icache_stats[ctx][cpu]);
  205. if (total)
  206. ratio = avg / total * 100.0;
  207. color = get_ratio_color(GRC_CACHE_MISSES, ratio);
  208. out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-icache hits", ratio);
  209. }
  210. static void print_dtlb_cache_misses(int cpu,
  211. struct perf_evsel *evsel,
  212. double avg,
  213. struct perf_stat_output_ctx *out)
  214. {
  215. double total, ratio = 0.0;
  216. const char *color;
  217. int ctx = evsel_context(evsel);
  218. total = avg_stats(&runtime_dtlb_cache_stats[ctx][cpu]);
  219. if (total)
  220. ratio = avg / total * 100.0;
  221. color = get_ratio_color(GRC_CACHE_MISSES, ratio);
  222. out->print_metric(out->ctx, color, "%7.2f%%", "of all dTLB cache hits", ratio);
  223. }
  224. static void print_itlb_cache_misses(int cpu,
  225. struct perf_evsel *evsel,
  226. double avg,
  227. struct perf_stat_output_ctx *out)
  228. {
  229. double total, ratio = 0.0;
  230. const char *color;
  231. int ctx = evsel_context(evsel);
  232. total = avg_stats(&runtime_itlb_cache_stats[ctx][cpu]);
  233. if (total)
  234. ratio = avg / total * 100.0;
  235. color = get_ratio_color(GRC_CACHE_MISSES, ratio);
  236. out->print_metric(out->ctx, color, "%7.2f%%", "of all iTLB cache hits", ratio);
  237. }
  238. static void print_ll_cache_misses(int cpu,
  239. struct perf_evsel *evsel,
  240. double avg,
  241. struct perf_stat_output_ctx *out)
  242. {
  243. double total, ratio = 0.0;
  244. const char *color;
  245. int ctx = evsel_context(evsel);
  246. total = avg_stats(&runtime_ll_cache_stats[ctx][cpu]);
  247. if (total)
  248. ratio = avg / total * 100.0;
  249. color = get_ratio_color(GRC_CACHE_MISSES, ratio);
  250. out->print_metric(out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio);
  251. }
  252. void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
  253. double avg, int cpu,
  254. struct perf_stat_output_ctx *out)
  255. {
  256. void *ctxp = out->ctx;
  257. print_metric_t print_metric = out->print_metric;
  258. double total, ratio = 0.0, total2;
  259. int ctx = evsel_context(evsel);
  260. if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
  261. total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
  262. if (total) {
  263. ratio = avg / total;
  264. print_metric(ctxp, NULL, "%7.2f ",
  265. "insn per cycle", ratio);
  266. } else {
  267. print_metric(ctxp, NULL, NULL, "insn per cycle", 0);
  268. }
  269. total = avg_stats(&runtime_stalled_cycles_front_stats[ctx][cpu]);
  270. total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[ctx][cpu]));
  271. if (total && avg) {
  272. out->new_line(ctxp);
  273. ratio = total / avg;
  274. print_metric(ctxp, NULL, "%7.2f ",
  275. "stalled cycles per insn",
  276. ratio);
  277. } else if (have_frontend_stalled) {
  278. print_metric(ctxp, NULL, NULL,
  279. "stalled cycles per insn", 0);
  280. }
  281. } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
  282. if (runtime_branches_stats[ctx][cpu].n != 0)
  283. print_branch_misses(cpu, evsel, avg, out);
  284. else
  285. print_metric(ctxp, NULL, NULL, "of all branches", 0);
  286. } else if (
  287. evsel->attr.type == PERF_TYPE_HW_CACHE &&
  288. evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
  289. ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
  290. ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
  291. if (runtime_l1_dcache_stats[ctx][cpu].n != 0)
  292. print_l1_dcache_misses(cpu, evsel, avg, out);
  293. else
  294. print_metric(ctxp, NULL, NULL, "of all L1-dcache hits", 0);
  295. } else if (
  296. evsel->attr.type == PERF_TYPE_HW_CACHE &&
  297. evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
  298. ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
  299. ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
  300. if (runtime_l1_icache_stats[ctx][cpu].n != 0)
  301. print_l1_icache_misses(cpu, evsel, avg, out);
  302. else
  303. print_metric(ctxp, NULL, NULL, "of all L1-icache hits", 0);
  304. } else if (
  305. evsel->attr.type == PERF_TYPE_HW_CACHE &&
  306. evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
  307. ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
  308. ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
  309. if (runtime_dtlb_cache_stats[ctx][cpu].n != 0)
  310. print_dtlb_cache_misses(cpu, evsel, avg, out);
  311. else
  312. print_metric(ctxp, NULL, NULL, "of all dTLB cache hits", 0);
  313. } else if (
  314. evsel->attr.type == PERF_TYPE_HW_CACHE &&
  315. evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
  316. ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
  317. ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
  318. if (runtime_itlb_cache_stats[ctx][cpu].n != 0)
  319. print_itlb_cache_misses(cpu, evsel, avg, out);
  320. else
  321. print_metric(ctxp, NULL, NULL, "of all iTLB cache hits", 0);
  322. } else if (
  323. evsel->attr.type == PERF_TYPE_HW_CACHE &&
  324. evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
  325. ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
  326. ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
  327. if (runtime_ll_cache_stats[ctx][cpu].n != 0)
  328. print_ll_cache_misses(cpu, evsel, avg, out);
  329. else
  330. print_metric(ctxp, NULL, NULL, "of all LL-cache hits", 0);
  331. } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
  332. total = avg_stats(&runtime_cacherefs_stats[ctx][cpu]);
  333. if (total)
  334. ratio = avg * 100 / total;
  335. if (runtime_cacherefs_stats[ctx][cpu].n != 0)
  336. print_metric(ctxp, NULL, "%8.3f %%",
  337. "of all cache refs", ratio);
  338. else
  339. print_metric(ctxp, NULL, NULL, "of all cache refs", 0);
  340. } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
  341. print_stalled_cycles_frontend(cpu, evsel, avg, out);
  342. } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
  343. print_stalled_cycles_backend(cpu, evsel, avg, out);
  344. } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
  345. total = avg_stats(&runtime_nsecs_stats[cpu]);
  346. if (total) {
  347. ratio = avg / total;
  348. print_metric(ctxp, NULL, "%8.3f", "GHz", ratio);
  349. } else {
  350. print_metric(ctxp, NULL, NULL, "Ghz", 0);
  351. }
  352. } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
  353. total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
  354. if (total)
  355. print_metric(ctxp, NULL,
  356. "%7.2f%%", "transactional cycles",
  357. 100.0 * (avg / total));
  358. else
  359. print_metric(ctxp, NULL, NULL, "transactional cycles",
  360. 0);
  361. } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
  362. total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
  363. total2 = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
  364. if (total2 < avg)
  365. total2 = avg;
  366. if (total)
  367. print_metric(ctxp, NULL, "%7.2f%%", "aborted cycles",
  368. 100.0 * ((total2-avg) / total));
  369. else
  370. print_metric(ctxp, NULL, NULL, "aborted cycles", 0);
  371. } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
  372. total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
  373. if (avg)
  374. ratio = total / avg;
  375. if (runtime_cycles_in_tx_stats[ctx][cpu].n != 0)
  376. print_metric(ctxp, NULL, "%8.0f",
  377. "cycles / transaction", ratio);
  378. else
  379. print_metric(ctxp, NULL, NULL, "cycles / transaction",
  380. 0);
  381. } else if (perf_stat_evsel__is(evsel, ELISION_START)) {
  382. total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
  383. if (avg)
  384. ratio = total / avg;
  385. print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
  386. } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) {
  387. if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
  388. print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
  389. avg / ratio);
  390. else
  391. print_metric(ctxp, NULL, NULL, "CPUs utilized", 0);
  392. } else if (runtime_nsecs_stats[cpu].n != 0) {
  393. char unit = 'M';
  394. char unit_buf[10];
  395. total = avg_stats(&runtime_nsecs_stats[cpu]);
  396. if (total)
  397. ratio = 1000.0 * avg / total;
  398. if (ratio < 0.001) {
  399. ratio *= 1000;
  400. unit = 'K';
  401. }
  402. snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
  403. print_metric(ctxp, NULL, "%8.3f", unit_buf, ratio);
  404. } else {
  405. print_metric(ctxp, NULL, NULL, NULL, 0);
  406. }
  407. }