builtin-top.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375
  1. /*
  2. * builtin-top.c
  3. *
  4. * Builtin top command: Display a continuously updated profile of
  5. * any workload, CPU or specific PID.
  6. *
  7. * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
  8. * 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Improvements and fixes by:
  11. *
  12. * Arjan van de Ven <arjan@linux.intel.com>
  13. * Yanmin Zhang <yanmin.zhang@intel.com>
  14. * Wu Fengguang <fengguang.wu@intel.com>
  15. * Mike Galbraith <efault@gmx.de>
  16. * Paul Mackerras <paulus@samba.org>
  17. *
  18. * Released under the GPL v2. (and only v2, not any later version)
  19. */
  20. #include "builtin.h"
  21. #include "perf.h"
  22. #include "util/annotate.h"
  23. #include "util/config.h"
  24. #include "util/color.h"
  25. #include "util/drv_configs.h"
  26. #include "util/evlist.h"
  27. #include "util/evsel.h"
  28. #include "util/event.h"
  29. #include "util/machine.h"
  30. #include "util/session.h"
  31. #include "util/symbol.h"
  32. #include "util/thread.h"
  33. #include "util/thread_map.h"
  34. #include "util/top.h"
  35. #include <linux/rbtree.h>
  36. #include <subcmd/parse-options.h>
  37. #include "util/parse-events.h"
  38. #include "util/cpumap.h"
  39. #include "util/xyarray.h"
  40. #include "util/sort.h"
  41. #include "util/term.h"
  42. #include "util/intlist.h"
  43. #include "util/parse-branch-options.h"
  44. #include "arch/common.h"
  45. #include "util/debug.h"
  46. #include <assert.h>
  47. #include <elf.h>
  48. #include <fcntl.h>
  49. #include <stdio.h>
  50. #include <termios.h>
  51. #include <unistd.h>
  52. #include <inttypes.h>
  53. #include <errno.h>
  54. #include <time.h>
  55. #include <sched.h>
  56. #include <signal.h>
  57. #include <sys/syscall.h>
  58. #include <sys/ioctl.h>
  59. #include <poll.h>
  60. #include <sys/prctl.h>
  61. #include <sys/wait.h>
  62. #include <sys/uio.h>
  63. #include <sys/utsname.h>
  64. #include <sys/mman.h>
  65. #include <linux/stringify.h>
  66. #include <linux/time64.h>
  67. #include <linux/types.h>
  68. #include "sane_ctype.h"
  69. static volatile int done;
  70. static volatile int resize;
  71. #define HEADER_LINE_NR 5
  72. static void perf_top__update_print_entries(struct perf_top *top)
  73. {
  74. top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
  75. }
  76. static void winch_sig(int sig __maybe_unused)
  77. {
  78. resize = 1;
  79. }
  80. static void perf_top__resize(struct perf_top *top)
  81. {
  82. get_term_dimensions(&top->winsize);
  83. perf_top__update_print_entries(top);
  84. }
  85. static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
  86. {
  87. struct perf_evsel *evsel = hists_to_evsel(he->hists);
  88. struct symbol *sym;
  89. struct annotation *notes;
  90. struct map *map;
  91. int err = -1;
  92. if (!he || !he->ms.sym)
  93. return -1;
  94. sym = he->ms.sym;
  95. map = he->ms.map;
  96. /*
  97. * We can't annotate with just /proc/kallsyms
  98. */
  99. if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
  100. !dso__is_kcore(map->dso)) {
  101. pr_err("Can't annotate %s: No vmlinux file was found in the "
  102. "path\n", sym->name);
  103. sleep(1);
  104. return -1;
  105. }
  106. notes = symbol__annotation(sym);
  107. if (notes->src != NULL) {
  108. pthread_mutex_lock(&notes->lock);
  109. goto out_assign;
  110. }
  111. pthread_mutex_lock(&notes->lock);
  112. if (symbol__alloc_hist(sym) < 0) {
  113. pthread_mutex_unlock(&notes->lock);
  114. pr_err("Not enough memory for annotating '%s' symbol!\n",
  115. sym->name);
  116. sleep(1);
  117. return err;
  118. }
  119. err = symbol__annotate(sym, map, evsel, 0, NULL);
  120. if (err == 0) {
  121. out_assign:
  122. top->sym_filter_entry = he;
  123. } else {
  124. char msg[BUFSIZ];
  125. symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
  126. pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
  127. }
  128. pthread_mutex_unlock(&notes->lock);
  129. return err;
  130. }
  131. static void __zero_source_counters(struct hist_entry *he)
  132. {
  133. struct symbol *sym = he->ms.sym;
  134. symbol__annotate_zero_histograms(sym);
  135. }
  136. static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
  137. {
  138. struct utsname uts;
  139. int err = uname(&uts);
  140. ui__warning("Out of bounds address found:\n\n"
  141. "Addr: %" PRIx64 "\n"
  142. "DSO: %s %c\n"
  143. "Map: %" PRIx64 "-%" PRIx64 "\n"
  144. "Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
  145. "Arch: %s\n"
  146. "Kernel: %s\n"
  147. "Tools: %s\n\n"
  148. "Not all samples will be on the annotation output.\n\n"
  149. "Please report to linux-kernel@vger.kernel.org\n",
  150. ip, map->dso->long_name, dso__symtab_origin(map->dso),
  151. map->start, map->end, sym->start, sym->end,
  152. sym->binding == STB_GLOBAL ? 'g' :
  153. sym->binding == STB_LOCAL ? 'l' : 'w', sym->name,
  154. err ? "[unknown]" : uts.machine,
  155. err ? "[unknown]" : uts.release, perf_version_string);
  156. if (use_browser <= 0)
  157. sleep(5);
  158. map->erange_warned = true;
  159. }
  160. static void perf_top__record_precise_ip(struct perf_top *top,
  161. struct hist_entry *he,
  162. struct perf_sample *sample,
  163. int counter, u64 ip)
  164. {
  165. struct annotation *notes;
  166. struct symbol *sym = he->ms.sym;
  167. int err = 0;
  168. if (sym == NULL || (use_browser == 0 &&
  169. (top->sym_filter_entry == NULL ||
  170. top->sym_filter_entry->ms.sym != sym)))
  171. return;
  172. notes = symbol__annotation(sym);
  173. if (pthread_mutex_trylock(&notes->lock))
  174. return;
  175. err = hist_entry__inc_addr_samples(he, sample, counter, ip);
  176. pthread_mutex_unlock(&notes->lock);
  177. if (unlikely(err)) {
  178. /*
  179. * This function is now called with he->hists->lock held.
  180. * Release it before going to sleep.
  181. */
  182. pthread_mutex_unlock(&he->hists->lock);
  183. if (err == -ERANGE && !he->ms.map->erange_warned)
  184. ui__warn_map_erange(he->ms.map, sym, ip);
  185. else if (err == -ENOMEM) {
  186. pr_err("Not enough memory for annotating '%s' symbol!\n",
  187. sym->name);
  188. sleep(1);
  189. }
  190. pthread_mutex_lock(&he->hists->lock);
  191. }
  192. }
  193. static void perf_top__show_details(struct perf_top *top)
  194. {
  195. struct hist_entry *he = top->sym_filter_entry;
  196. struct perf_evsel *evsel = hists_to_evsel(he->hists);
  197. struct annotation *notes;
  198. struct symbol *symbol;
  199. int more;
  200. if (!he)
  201. return;
  202. symbol = he->ms.sym;
  203. notes = symbol__annotation(symbol);
  204. pthread_mutex_lock(&notes->lock);
  205. symbol__calc_percent(symbol, evsel);
  206. if (notes->src == NULL)
  207. goto out_unlock;
  208. printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
  209. printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
  210. more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel,
  211. 0, top->sym_pcnt_filter, top->print_entries, 4);
  212. if (top->evlist->enabled) {
  213. if (top->zero)
  214. symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
  215. else
  216. symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
  217. }
  218. if (more != 0)
  219. printf("%d lines not displayed, maybe increase display entries [e]\n", more);
  220. out_unlock:
  221. pthread_mutex_unlock(&notes->lock);
  222. }
  223. static void perf_top__print_sym_table(struct perf_top *top)
  224. {
  225. char bf[160];
  226. int printed = 0;
  227. const int win_width = top->winsize.ws_col - 1;
  228. struct perf_evsel *evsel = top->sym_evsel;
  229. struct hists *hists = evsel__hists(evsel);
  230. puts(CONSOLE_CLEAR);
  231. perf_top__header_snprintf(top, bf, sizeof(bf));
  232. printf("%s\n", bf);
  233. perf_top__reset_sample_counters(top);
  234. printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
  235. if (hists->stats.nr_lost_warned !=
  236. hists->stats.nr_events[PERF_RECORD_LOST]) {
  237. hists->stats.nr_lost_warned =
  238. hists->stats.nr_events[PERF_RECORD_LOST];
  239. color_fprintf(stdout, PERF_COLOR_RED,
  240. "WARNING: LOST %d chunks, Check IO/CPU overload",
  241. hists->stats.nr_lost_warned);
  242. ++printed;
  243. }
  244. if (top->sym_filter_entry) {
  245. perf_top__show_details(top);
  246. return;
  247. }
  248. if (top->evlist->enabled) {
  249. if (top->zero) {
  250. hists__delete_entries(hists);
  251. } else {
  252. hists__decay_entries(hists, top->hide_user_symbols,
  253. top->hide_kernel_symbols);
  254. }
  255. }
  256. hists__collapse_resort(hists, NULL);
  257. perf_evsel__output_resort(evsel, NULL);
  258. hists__output_recalc_col_len(hists, top->print_entries - printed);
  259. putchar('\n');
  260. hists__fprintf(hists, false, top->print_entries - printed, win_width,
  261. top->min_percent, stdout, symbol_conf.use_callchain);
  262. }
  263. static void prompt_integer(int *target, const char *msg)
  264. {
  265. char *buf = malloc(0), *p;
  266. size_t dummy = 0;
  267. int tmp;
  268. fprintf(stdout, "\n%s: ", msg);
  269. if (getline(&buf, &dummy, stdin) < 0)
  270. return;
  271. p = strchr(buf, '\n');
  272. if (p)
  273. *p = 0;
  274. p = buf;
  275. while(*p) {
  276. if (!isdigit(*p))
  277. goto out_free;
  278. p++;
  279. }
  280. tmp = strtoul(buf, NULL, 10);
  281. *target = tmp;
  282. out_free:
  283. free(buf);
  284. }
  285. static void prompt_percent(int *target, const char *msg)
  286. {
  287. int tmp = 0;
  288. prompt_integer(&tmp, msg);
  289. if (tmp >= 0 && tmp <= 100)
  290. *target = tmp;
  291. }
  292. static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
  293. {
  294. char *buf = malloc(0), *p;
  295. struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
  296. struct hists *hists = evsel__hists(top->sym_evsel);
  297. struct rb_node *next;
  298. size_t dummy = 0;
  299. /* zero counters of active symbol */
  300. if (syme) {
  301. __zero_source_counters(syme);
  302. top->sym_filter_entry = NULL;
  303. }
  304. fprintf(stdout, "\n%s: ", msg);
  305. if (getline(&buf, &dummy, stdin) < 0)
  306. goto out_free;
  307. p = strchr(buf, '\n');
  308. if (p)
  309. *p = 0;
  310. next = rb_first(&hists->entries);
  311. while (next) {
  312. n = rb_entry(next, struct hist_entry, rb_node);
  313. if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
  314. found = n;
  315. break;
  316. }
  317. next = rb_next(&n->rb_node);
  318. }
  319. if (!found) {
  320. fprintf(stderr, "Sorry, %s is not active.\n", buf);
  321. sleep(1);
  322. } else
  323. perf_top__parse_source(top, found);
  324. out_free:
  325. free(buf);
  326. }
  327. static void perf_top__print_mapped_keys(struct perf_top *top)
  328. {
  329. char *name = NULL;
  330. if (top->sym_filter_entry) {
  331. struct symbol *sym = top->sym_filter_entry->ms.sym;
  332. name = sym->name;
  333. }
  334. fprintf(stdout, "\nMapped keys:\n");
  335. fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs);
  336. fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
  337. if (top->evlist->nr_entries > 1)
  338. fprintf(stdout, "\t[E] active event counter. \t(%s)\n", perf_evsel__name(top->sym_evsel));
  339. fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
  340. fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->sym_pcnt_filter);
  341. fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
  342. fprintf(stdout, "\t[S] stop annotation.\n");
  343. fprintf(stdout,
  344. "\t[K] hide kernel symbols. \t(%s)\n",
  345. top->hide_kernel_symbols ? "yes" : "no");
  346. fprintf(stdout,
  347. "\t[U] hide user symbols. \t(%s)\n",
  348. top->hide_user_symbols ? "yes" : "no");
  349. fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0);
  350. fprintf(stdout, "\t[qQ] quit.\n");
  351. }
  352. static int perf_top__key_mapped(struct perf_top *top, int c)
  353. {
  354. switch (c) {
  355. case 'd':
  356. case 'e':
  357. case 'f':
  358. case 'z':
  359. case 'q':
  360. case 'Q':
  361. case 'K':
  362. case 'U':
  363. case 'F':
  364. case 's':
  365. case 'S':
  366. return 1;
  367. case 'E':
  368. return top->evlist->nr_entries > 1 ? 1 : 0;
  369. default:
  370. break;
  371. }
  372. return 0;
  373. }
  374. static bool perf_top__handle_keypress(struct perf_top *top, int c)
  375. {
  376. bool ret = true;
  377. if (!perf_top__key_mapped(top, c)) {
  378. struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
  379. struct termios save;
  380. perf_top__print_mapped_keys(top);
  381. fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
  382. fflush(stdout);
  383. set_term_quiet_input(&save);
  384. poll(&stdin_poll, 1, -1);
  385. c = getc(stdin);
  386. tcsetattr(0, TCSAFLUSH, &save);
  387. if (!perf_top__key_mapped(top, c))
  388. return ret;
  389. }
  390. switch (c) {
  391. case 'd':
  392. prompt_integer(&top->delay_secs, "Enter display delay");
  393. if (top->delay_secs < 1)
  394. top->delay_secs = 1;
  395. break;
  396. case 'e':
  397. prompt_integer(&top->print_entries, "Enter display entries (lines)");
  398. if (top->print_entries == 0) {
  399. perf_top__resize(top);
  400. signal(SIGWINCH, winch_sig);
  401. } else {
  402. signal(SIGWINCH, SIG_DFL);
  403. }
  404. break;
  405. case 'E':
  406. if (top->evlist->nr_entries > 1) {
  407. /* Select 0 as the default event: */
  408. int counter = 0;
  409. fprintf(stderr, "\nAvailable events:");
  410. evlist__for_each_entry(top->evlist, top->sym_evsel)
  411. fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
  412. prompt_integer(&counter, "Enter details event counter");
  413. if (counter >= top->evlist->nr_entries) {
  414. top->sym_evsel = perf_evlist__first(top->evlist);
  415. fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
  416. sleep(1);
  417. break;
  418. }
  419. evlist__for_each_entry(top->evlist, top->sym_evsel)
  420. if (top->sym_evsel->idx == counter)
  421. break;
  422. } else
  423. top->sym_evsel = perf_evlist__first(top->evlist);
  424. break;
  425. case 'f':
  426. prompt_integer(&top->count_filter, "Enter display event count filter");
  427. break;
  428. case 'F':
  429. prompt_percent(&top->sym_pcnt_filter,
  430. "Enter details display event filter (percent)");
  431. break;
  432. case 'K':
  433. top->hide_kernel_symbols = !top->hide_kernel_symbols;
  434. break;
  435. case 'q':
  436. case 'Q':
  437. printf("exiting.\n");
  438. if (top->dump_symtab)
  439. perf_session__fprintf_dsos(top->session, stderr);
  440. ret = false;
  441. break;
  442. case 's':
  443. perf_top__prompt_symbol(top, "Enter details symbol");
  444. break;
  445. case 'S':
  446. if (!top->sym_filter_entry)
  447. break;
  448. else {
  449. struct hist_entry *syme = top->sym_filter_entry;
  450. top->sym_filter_entry = NULL;
  451. __zero_source_counters(syme);
  452. }
  453. break;
  454. case 'U':
  455. top->hide_user_symbols = !top->hide_user_symbols;
  456. break;
  457. case 'z':
  458. top->zero = !top->zero;
  459. break;
  460. default:
  461. break;
  462. }
  463. return ret;
  464. }
  465. static void perf_top__sort_new_samples(void *arg)
  466. {
  467. struct perf_top *t = arg;
  468. struct perf_evsel *evsel = t->sym_evsel;
  469. struct hists *hists;
  470. perf_top__reset_sample_counters(t);
  471. if (t->evlist->selected != NULL)
  472. t->sym_evsel = t->evlist->selected;
  473. hists = evsel__hists(evsel);
  474. if (t->evlist->enabled) {
  475. if (t->zero) {
  476. hists__delete_entries(hists);
  477. } else {
  478. hists__decay_entries(hists, t->hide_user_symbols,
  479. t->hide_kernel_symbols);
  480. }
  481. }
  482. hists__collapse_resort(hists, NULL);
  483. perf_evsel__output_resort(evsel, NULL);
  484. }
  485. static void *display_thread_tui(void *arg)
  486. {
  487. struct perf_evsel *pos;
  488. struct perf_top *top = arg;
  489. const char *help = "For a higher level overview, try: perf top --sort comm,dso";
  490. struct hist_browser_timer hbt = {
  491. .timer = perf_top__sort_new_samples,
  492. .arg = top,
  493. .refresh = top->delay_secs,
  494. };
  495. /* In order to read symbols from other namespaces perf to needs to call
  496. * setns(2). This isn't permitted if the struct_fs has multiple users.
  497. * unshare(2) the fs so that we may continue to setns into namespaces
  498. * that we're observing.
  499. */
  500. unshare(CLONE_FS);
  501. perf_top__sort_new_samples(top);
  502. /*
  503. * Initialize the uid_filter_str, in the future the TUI will allow
  504. * Zooming in/out UIDs. For now juse use whatever the user passed
  505. * via --uid.
  506. */
  507. evlist__for_each_entry(top->evlist, pos) {
  508. struct hists *hists = evsel__hists(pos);
  509. hists->uid_filter_str = top->record_opts.target.uid_str;
  510. }
  511. perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
  512. top->min_percent,
  513. &top->session->header.env);
  514. done = 1;
  515. return NULL;
  516. }
  517. static void display_sig(int sig __maybe_unused)
  518. {
  519. done = 1;
  520. }
  521. static void display_setup_sig(void)
  522. {
  523. signal(SIGSEGV, sighandler_dump_stack);
  524. signal(SIGFPE, sighandler_dump_stack);
  525. signal(SIGINT, display_sig);
  526. signal(SIGQUIT, display_sig);
  527. signal(SIGTERM, display_sig);
  528. }
  529. static void *display_thread(void *arg)
  530. {
  531. struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
  532. struct termios save;
  533. struct perf_top *top = arg;
  534. int delay_msecs, c;
  535. /* In order to read symbols from other namespaces perf to needs to call
  536. * setns(2). This isn't permitted if the struct_fs has multiple users.
  537. * unshare(2) the fs so that we may continue to setns into namespaces
  538. * that we're observing.
  539. */
  540. unshare(CLONE_FS);
  541. display_setup_sig();
  542. pthread__unblock_sigwinch();
  543. repeat:
  544. delay_msecs = top->delay_secs * MSEC_PER_SEC;
  545. set_term_quiet_input(&save);
  546. /* trash return*/
  547. getc(stdin);
  548. while (!done) {
  549. perf_top__print_sym_table(top);
  550. /*
  551. * Either timeout expired or we got an EINTR due to SIGWINCH,
  552. * refresh screen in both cases.
  553. */
  554. switch (poll(&stdin_poll, 1, delay_msecs)) {
  555. case 0:
  556. continue;
  557. case -1:
  558. if (errno == EINTR)
  559. continue;
  560. __fallthrough;
  561. default:
  562. c = getc(stdin);
  563. tcsetattr(0, TCSAFLUSH, &save);
  564. if (perf_top__handle_keypress(top, c))
  565. goto repeat;
  566. done = 1;
  567. }
  568. }
  569. tcsetattr(0, TCSAFLUSH, &save);
  570. return NULL;
  571. }
  572. static int hist_iter__top_callback(struct hist_entry_iter *iter,
  573. struct addr_location *al, bool single,
  574. void *arg)
  575. {
  576. struct perf_top *top = arg;
  577. struct hist_entry *he = iter->he;
  578. struct perf_evsel *evsel = iter->evsel;
  579. if (perf_hpp_list.sym && single)
  580. perf_top__record_precise_ip(top, he, iter->sample, evsel->idx, al->addr);
  581. hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
  582. !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
  583. return 0;
  584. }
  585. static void perf_event__process_sample(struct perf_tool *tool,
  586. const union perf_event *event,
  587. struct perf_evsel *evsel,
  588. struct perf_sample *sample,
  589. struct machine *machine)
  590. {
  591. struct perf_top *top = container_of(tool, struct perf_top, tool);
  592. struct addr_location al;
  593. int err;
  594. if (!machine && perf_guest) {
  595. static struct intlist *seen;
  596. if (!seen)
  597. seen = intlist__new(NULL);
  598. if (!intlist__has_entry(seen, sample->pid)) {
  599. pr_err("Can't find guest [%d]'s kernel information\n",
  600. sample->pid);
  601. intlist__add(seen, sample->pid);
  602. }
  603. return;
  604. }
  605. if (!machine) {
  606. pr_err("%u unprocessable samples recorded.\r",
  607. top->session->evlist->stats.nr_unprocessable_samples++);
  608. return;
  609. }
  610. if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
  611. top->exact_samples++;
  612. if (machine__resolve(machine, &al, sample) < 0)
  613. return;
  614. if (!machine->kptr_restrict_warned &&
  615. symbol_conf.kptr_restrict &&
  616. al.cpumode == PERF_RECORD_MISC_KERNEL) {
  617. if (!perf_evlist__exclude_kernel(top->session->evlist)) {
  618. ui__warning(
  619. "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
  620. "Check /proc/sys/kernel/kptr_restrict.\n\n"
  621. "Kernel%s samples will not be resolved.\n",
  622. al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
  623. " modules" : "");
  624. if (use_browser <= 0)
  625. sleep(5);
  626. }
  627. machine->kptr_restrict_warned = true;
  628. }
  629. if (al.sym == NULL) {
  630. const char *msg = "Kernel samples will not be resolved.\n";
  631. /*
  632. * As we do lazy loading of symtabs we only will know if the
  633. * specified vmlinux file is invalid when we actually have a
  634. * hit in kernel space and then try to load it. So if we get
  635. * here and there are _no_ symbols in the DSO backing the
  636. * kernel map, bail out.
  637. *
  638. * We may never get here, for instance, if we use -K/
  639. * --hide-kernel-symbols, even if the user specifies an
  640. * invalid --vmlinux ;-)
  641. */
  642. if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
  643. al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
  644. RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
  645. if (symbol_conf.vmlinux_name) {
  646. char serr[256];
  647. dso__strerror_load(al.map->dso, serr, sizeof(serr));
  648. ui__warning("The %s file can't be used: %s\n%s",
  649. symbol_conf.vmlinux_name, serr, msg);
  650. } else {
  651. ui__warning("A vmlinux file was not found.\n%s",
  652. msg);
  653. }
  654. if (use_browser <= 0)
  655. sleep(5);
  656. top->vmlinux_warned = true;
  657. }
  658. }
  659. if (al.sym == NULL || !al.sym->idle) {
  660. struct hists *hists = evsel__hists(evsel);
  661. struct hist_entry_iter iter = {
  662. .evsel = evsel,
  663. .sample = sample,
  664. .add_entry_cb = hist_iter__top_callback,
  665. };
  666. if (symbol_conf.cumulate_callchain)
  667. iter.ops = &hist_iter_cumulative;
  668. else
  669. iter.ops = &hist_iter_normal;
  670. pthread_mutex_lock(&hists->lock);
  671. err = hist_entry_iter__add(&iter, &al, top->max_stack, top);
  672. if (err < 0)
  673. pr_err("Problem incrementing symbol period, skipping event\n");
  674. pthread_mutex_unlock(&hists->lock);
  675. }
  676. addr_location__put(&al);
  677. }
  678. static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
  679. {
  680. struct perf_sample sample;
  681. struct perf_evsel *evsel;
  682. struct perf_session *session = top->session;
  683. union perf_event *event;
  684. struct machine *machine;
  685. int ret;
  686. while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
  687. ret = perf_evlist__parse_sample(top->evlist, event, &sample);
  688. if (ret) {
  689. pr_err("Can't parse sample, err = %d\n", ret);
  690. goto next_event;
  691. }
  692. evsel = perf_evlist__id2evsel(session->evlist, sample.id);
  693. assert(evsel != NULL);
  694. if (event->header.type == PERF_RECORD_SAMPLE)
  695. ++top->samples;
  696. switch (sample.cpumode) {
  697. case PERF_RECORD_MISC_USER:
  698. ++top->us_samples;
  699. if (top->hide_user_symbols)
  700. goto next_event;
  701. machine = &session->machines.host;
  702. break;
  703. case PERF_RECORD_MISC_KERNEL:
  704. ++top->kernel_samples;
  705. if (top->hide_kernel_symbols)
  706. goto next_event;
  707. machine = &session->machines.host;
  708. break;
  709. case PERF_RECORD_MISC_GUEST_KERNEL:
  710. ++top->guest_kernel_samples;
  711. machine = perf_session__find_machine(session,
  712. sample.pid);
  713. break;
  714. case PERF_RECORD_MISC_GUEST_USER:
  715. ++top->guest_us_samples;
  716. /*
  717. * TODO: we don't process guest user from host side
  718. * except simple counting.
  719. */
  720. goto next_event;
  721. default:
  722. if (event->header.type == PERF_RECORD_SAMPLE)
  723. goto next_event;
  724. machine = &session->machines.host;
  725. break;
  726. }
  727. if (event->header.type == PERF_RECORD_SAMPLE) {
  728. perf_event__process_sample(&top->tool, event, evsel,
  729. &sample, machine);
  730. } else if (event->header.type < PERF_RECORD_MAX) {
  731. hists__inc_nr_events(evsel__hists(evsel), event->header.type);
  732. machine__process_event(machine, event, &sample);
  733. } else
  734. ++session->evlist->stats.nr_unknown_events;
  735. next_event:
  736. perf_evlist__mmap_consume(top->evlist, idx);
  737. }
  738. }
  739. static void perf_top__mmap_read(struct perf_top *top)
  740. {
  741. int i;
  742. for (i = 0; i < top->evlist->nr_mmaps; i++)
  743. perf_top__mmap_read_idx(top, i);
  744. }
  745. static int perf_top__start_counters(struct perf_top *top)
  746. {
  747. char msg[BUFSIZ];
  748. struct perf_evsel *counter;
  749. struct perf_evlist *evlist = top->evlist;
  750. struct record_opts *opts = &top->record_opts;
  751. perf_evlist__config(evlist, opts, &callchain_param);
  752. evlist__for_each_entry(evlist, counter) {
  753. try_again:
  754. if (perf_evsel__open(counter, top->evlist->cpus,
  755. top->evlist->threads) < 0) {
  756. if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
  757. if (verbose > 0)
  758. ui__warning("%s\n", msg);
  759. goto try_again;
  760. }
  761. perf_evsel__open_strerror(counter, &opts->target,
  762. errno, msg, sizeof(msg));
  763. ui__error("%s\n", msg);
  764. goto out_err;
  765. }
  766. }
  767. if (perf_evlist__mmap(evlist, opts->mmap_pages) < 0) {
  768. ui__error("Failed to mmap with %d (%s)\n",
  769. errno, str_error_r(errno, msg, sizeof(msg)));
  770. goto out_err;
  771. }
  772. return 0;
  773. out_err:
  774. return -1;
  775. }
  776. static int callchain_param__setup_sample_type(struct callchain_param *callchain)
  777. {
  778. if (!perf_hpp_list.sym) {
  779. if (callchain->enabled) {
  780. ui__error("Selected -g but \"sym\" not present in --sort/-s.");
  781. return -EINVAL;
  782. }
  783. } else if (callchain->mode != CHAIN_NONE) {
  784. if (callchain_register_param(callchain) < 0) {
  785. ui__error("Can't register callchain params.\n");
  786. return -EINVAL;
  787. }
  788. }
  789. return 0;
  790. }
  791. static int __cmd_top(struct perf_top *top)
  792. {
  793. char msg[512];
  794. struct perf_evsel *pos;
  795. struct perf_evsel_config_term *err_term;
  796. struct perf_evlist *evlist = top->evlist;
  797. struct record_opts *opts = &top->record_opts;
  798. pthread_t thread;
  799. int ret;
  800. top->session = perf_session__new(NULL, false, NULL);
  801. if (top->session == NULL)
  802. return -1;
  803. if (!objdump_path) {
  804. ret = perf_env__lookup_objdump(&top->session->header.env);
  805. if (ret)
  806. goto out_delete;
  807. }
  808. ret = callchain_param__setup_sample_type(&callchain_param);
  809. if (ret)
  810. goto out_delete;
  811. if (perf_session__register_idle_thread(top->session) < 0)
  812. goto out_delete;
  813. if (top->nr_threads_synthesize > 1)
  814. perf_set_multithreaded();
  815. machine__synthesize_threads(&top->session->machines.host, &opts->target,
  816. top->evlist->threads, false,
  817. opts->proc_map_timeout,
  818. top->nr_threads_synthesize);
  819. if (top->nr_threads_synthesize > 1)
  820. perf_set_singlethreaded();
  821. if (perf_hpp_list.socket) {
  822. ret = perf_env__read_cpu_topology_map(&perf_env);
  823. if (ret < 0)
  824. goto out_err_cpu_topo;
  825. }
  826. ret = perf_top__start_counters(top);
  827. if (ret)
  828. goto out_delete;
  829. ret = perf_evlist__apply_drv_configs(evlist, &pos, &err_term);
  830. if (ret) {
  831. pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
  832. err_term->val.drv_cfg, perf_evsel__name(pos), errno,
  833. str_error_r(errno, msg, sizeof(msg)));
  834. goto out_delete;
  835. }
  836. top->session->evlist = top->evlist;
  837. perf_session__set_id_hdr_size(top->session);
  838. /*
  839. * When perf is starting the traced process, all the events (apart from
  840. * group members) have enable_on_exec=1 set, so don't spoil it by
  841. * prematurely enabling them.
  842. *
  843. * XXX 'top' still doesn't start workloads like record, trace, but should,
  844. * so leave the check here.
  845. */
  846. if (!target__none(&opts->target))
  847. perf_evlist__enable(top->evlist);
  848. /* Wait for a minimal set of events before starting the snapshot */
  849. perf_evlist__poll(top->evlist, 100);
  850. perf_top__mmap_read(top);
  851. ret = -1;
  852. if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
  853. display_thread), top)) {
  854. ui__error("Could not create display thread.\n");
  855. goto out_delete;
  856. }
  857. if (top->realtime_prio) {
  858. struct sched_param param;
  859. param.sched_priority = top->realtime_prio;
  860. if (sched_setscheduler(0, SCHED_FIFO, &param)) {
  861. ui__error("Could not set realtime priority.\n");
  862. goto out_join;
  863. }
  864. }
  865. while (!done) {
  866. u64 hits = top->samples;
  867. perf_top__mmap_read(top);
  868. if (hits == top->samples)
  869. ret = perf_evlist__poll(top->evlist, 100);
  870. if (resize) {
  871. perf_top__resize(top);
  872. resize = 0;
  873. }
  874. }
  875. ret = 0;
  876. out_join:
  877. pthread_join(thread, NULL);
  878. out_delete:
  879. perf_session__delete(top->session);
  880. top->session = NULL;
  881. return ret;
  882. out_err_cpu_topo: {
  883. char errbuf[BUFSIZ];
  884. const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
  885. ui__error("Could not read the CPU topology map: %s\n", err);
  886. goto out_delete;
  887. }
  888. }
  889. static int
  890. callchain_opt(const struct option *opt, const char *arg, int unset)
  891. {
  892. symbol_conf.use_callchain = true;
  893. return record_callchain_opt(opt, arg, unset);
  894. }
  895. static int
  896. parse_callchain_opt(const struct option *opt, const char *arg, int unset)
  897. {
  898. struct callchain_param *callchain = opt->value;
  899. callchain->enabled = !unset;
  900. callchain->record_mode = CALLCHAIN_FP;
  901. /*
  902. * --no-call-graph
  903. */
  904. if (unset) {
  905. symbol_conf.use_callchain = false;
  906. callchain->record_mode = CALLCHAIN_NONE;
  907. return 0;
  908. }
  909. return parse_callchain_top_opt(arg);
  910. }
  911. static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
  912. {
  913. if (!strcmp(var, "top.call-graph"))
  914. var = "call-graph.record-mode"; /* fall-through */
  915. if (!strcmp(var, "top.children")) {
  916. symbol_conf.cumulate_callchain = perf_config_bool(var, value);
  917. return 0;
  918. }
  919. return 0;
  920. }
  921. static int
  922. parse_percent_limit(const struct option *opt, const char *arg,
  923. int unset __maybe_unused)
  924. {
  925. struct perf_top *top = opt->value;
  926. top->min_percent = strtof(arg, NULL);
  927. return 0;
  928. }
  929. const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
  930. "\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
  931. int cmd_top(int argc, const char **argv)
  932. {
  933. char errbuf[BUFSIZ];
  934. struct perf_top top = {
  935. .count_filter = 5,
  936. .delay_secs = 2,
  937. .record_opts = {
  938. .mmap_pages = UINT_MAX,
  939. .user_freq = UINT_MAX,
  940. .user_interval = ULLONG_MAX,
  941. .freq = 4000, /* 4 KHz */
  942. .target = {
  943. .uses_mmap = true,
  944. },
  945. .proc_map_timeout = 500,
  946. },
  947. .max_stack = sysctl_perf_event_max_stack,
  948. .sym_pcnt_filter = 5,
  949. .nr_threads_synthesize = UINT_MAX,
  950. };
  951. struct record_opts *opts = &top.record_opts;
  952. struct target *target = &opts->target;
  953. const struct option options[] = {
  954. OPT_CALLBACK('e', "event", &top.evlist, "event",
  955. "event selector. use 'perf list' to list available events",
  956. parse_events_option),
  957. OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
  958. OPT_STRING('p', "pid", &target->pid, "pid",
  959. "profile events on existing process id"),
  960. OPT_STRING('t', "tid", &target->tid, "tid",
  961. "profile events on existing thread id"),
  962. OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
  963. "system-wide collection from all CPUs"),
  964. OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
  965. "list of cpus to monitor"),
  966. OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
  967. "file", "vmlinux pathname"),
  968. OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
  969. "don't load vmlinux even if found"),
  970. OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
  971. "hide kernel symbols"),
  972. OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
  973. "number of mmap data pages",
  974. perf_evlist__parse_mmap_pages),
  975. OPT_INTEGER('r', "realtime", &top.realtime_prio,
  976. "collect data with this RT SCHED_FIFO priority"),
  977. OPT_INTEGER('d', "delay", &top.delay_secs,
  978. "number of seconds to delay between refreshes"),
  979. OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
  980. "dump the symbol table used for profiling"),
  981. OPT_INTEGER('f', "count-filter", &top.count_filter,
  982. "only display functions with more events than this"),
  983. OPT_BOOLEAN(0, "group", &opts->group,
  984. "put the counters into a counter group"),
  985. OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
  986. "child tasks do not inherit counters"),
  987. OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
  988. "symbol to annotate"),
  989. OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
  990. OPT_UINTEGER('F', "freq", &opts->user_freq, "profile at this frequency"),
  991. OPT_INTEGER('E', "entries", &top.print_entries,
  992. "display this many functions"),
  993. OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
  994. "hide user symbols"),
  995. OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
  996. OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
  997. OPT_INCR('v', "verbose", &verbose,
  998. "be more verbose (show counter open errors, etc)"),
  999. OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
  1000. "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
  1001. " Please refer the man page for the complete list."),
  1002. OPT_STRING(0, "fields", &field_order, "key[,keys...]",
  1003. "output field(s): overhead, period, sample plus all of sort keys"),
  1004. OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
  1005. "Show a column with the number of samples"),
  1006. OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
  1007. NULL, "enables call-graph recording and display",
  1008. &callchain_opt),
  1009. OPT_CALLBACK(0, "call-graph", &callchain_param,
  1010. "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
  1011. top_callchain_help, &parse_callchain_opt),
  1012. OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
  1013. "Accumulate callchains of children and show total overhead as well"),
  1014. OPT_INTEGER(0, "max-stack", &top.max_stack,
  1015. "Set the maximum stack depth when parsing the callchain. "
  1016. "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
  1017. OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
  1018. "ignore callees of these functions in call graphs",
  1019. report_parse_ignore_callees_opt),
  1020. OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
  1021. "Show a column with the sum of periods"),
  1022. OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
  1023. "only consider symbols in these dsos"),
  1024. OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
  1025. "only consider symbols in these comms"),
  1026. OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
  1027. "only consider these symbols"),
  1028. OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
  1029. "Interleave source code with assembly code (default)"),
  1030. OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
  1031. "Display raw encoding of assembly instructions (default)"),
  1032. OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
  1033. "Enable kernel symbol demangling"),
  1034. OPT_STRING(0, "objdump", &objdump_path, "path",
  1035. "objdump binary to use for disassembly and annotations"),
  1036. OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
  1037. "Specify disassembler style (e.g. -M intel for intel syntax)"),
  1038. OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
  1039. OPT_CALLBACK(0, "percent-limit", &top, "percent",
  1040. "Don't show entries under that percent", parse_percent_limit),
  1041. OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
  1042. "How to display percentage of filtered entries", parse_filter_percentage),
  1043. OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
  1044. "width[,width...]",
  1045. "don't try to adjust column width, use these fixed values"),
  1046. OPT_UINTEGER(0, "proc-map-timeout", &opts->proc_map_timeout,
  1047. "per thread proc mmap processing timeout in ms"),
  1048. OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
  1049. "branch any", "sample any taken branches",
  1050. parse_branch_stack),
  1051. OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
  1052. "branch filter mask", "branch stack filter modes",
  1053. parse_branch_stack),
  1054. OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
  1055. "Show raw trace event output (do not use print fmt or plugins)"),
  1056. OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
  1057. "Show entries in a hierarchy"),
  1058. OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
  1059. OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
  1060. "number of thread to run event synthesize"),
  1061. OPT_END()
  1062. };
  1063. const char * const top_usage[] = {
  1064. "perf top [<options>]",
  1065. NULL
  1066. };
  1067. int status = hists__init();
  1068. if (status < 0)
  1069. return status;
  1070. top.evlist = perf_evlist__new();
  1071. if (top.evlist == NULL)
  1072. return -ENOMEM;
  1073. status = perf_config(perf_top_config, &top);
  1074. if (status)
  1075. return status;
  1076. argc = parse_options(argc, argv, options, top_usage, 0);
  1077. if (argc)
  1078. usage_with_options(top_usage, options);
  1079. if (!top.evlist->nr_entries &&
  1080. perf_evlist__add_default(top.evlist) < 0) {
  1081. pr_err("Not enough memory for event selector list\n");
  1082. goto out_delete_evlist;
  1083. }
  1084. if (symbol_conf.report_hierarchy) {
  1085. /* disable incompatible options */
  1086. symbol_conf.event_group = false;
  1087. symbol_conf.cumulate_callchain = false;
  1088. if (field_order) {
  1089. pr_err("Error: --hierarchy and --fields options cannot be used together\n");
  1090. parse_options_usage(top_usage, options, "fields", 0);
  1091. parse_options_usage(NULL, options, "hierarchy", 0);
  1092. goto out_delete_evlist;
  1093. }
  1094. }
  1095. sort__mode = SORT_MODE__TOP;
  1096. /* display thread wants entries to be collapsed in a different tree */
  1097. perf_hpp_list.need_collapse = 1;
  1098. if (top.use_stdio)
  1099. use_browser = 0;
  1100. else if (top.use_tui)
  1101. use_browser = 1;
  1102. setup_browser(false);
  1103. if (setup_sorting(top.evlist) < 0) {
  1104. if (sort_order)
  1105. parse_options_usage(top_usage, options, "s", 1);
  1106. if (field_order)
  1107. parse_options_usage(sort_order ? NULL : top_usage,
  1108. options, "fields", 0);
  1109. goto out_delete_evlist;
  1110. }
  1111. status = target__validate(target);
  1112. if (status) {
  1113. target__strerror(target, status, errbuf, BUFSIZ);
  1114. ui__warning("%s\n", errbuf);
  1115. }
  1116. status = target__parse_uid(target);
  1117. if (status) {
  1118. int saved_errno = errno;
  1119. target__strerror(target, status, errbuf, BUFSIZ);
  1120. ui__error("%s\n", errbuf);
  1121. status = -saved_errno;
  1122. goto out_delete_evlist;
  1123. }
  1124. if (target__none(target))
  1125. target->system_wide = true;
  1126. if (perf_evlist__create_maps(top.evlist, target) < 0) {
  1127. ui__error("Couldn't create thread/CPU maps: %s\n",
  1128. errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
  1129. goto out_delete_evlist;
  1130. }
  1131. symbol_conf.nr_events = top.evlist->nr_entries;
  1132. if (top.delay_secs < 1)
  1133. top.delay_secs = 1;
  1134. if (record_opts__config(opts)) {
  1135. status = -EINVAL;
  1136. goto out_delete_evlist;
  1137. }
  1138. top.sym_evsel = perf_evlist__first(top.evlist);
  1139. if (!callchain_param.enabled) {
  1140. symbol_conf.cumulate_callchain = false;
  1141. perf_hpp__cancel_cumulate();
  1142. }
  1143. if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
  1144. callchain_param.order = ORDER_CALLER;
  1145. status = symbol__annotation_init();
  1146. if (status < 0)
  1147. goto out_delete_evlist;
  1148. symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
  1149. if (symbol__init(NULL) < 0)
  1150. return -1;
  1151. sort__setup_elide(stdout);
  1152. get_term_dimensions(&top.winsize);
  1153. if (top.print_entries == 0) {
  1154. perf_top__update_print_entries(&top);
  1155. signal(SIGWINCH, winch_sig);
  1156. }
  1157. status = __cmd_top(&top);
  1158. out_delete_evlist:
  1159. perf_evlist__delete(top.evlist);
  1160. return status;
  1161. }