builtin-report.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307
  1. /*
  2. * builtin-report.c
  3. *
  4. * Builtin report command: Analyze the perf.data input file,
  5. * look up and read DSOs and symbol information and display
  6. * a histogram of results, along various sorting keys.
  7. */
  8. #include "builtin.h"
  9. #include "util/util.h"
  10. #include "util/color.h"
  11. #include "util/list.h"
  12. #include "util/cache.h"
  13. #include "util/rbtree.h"
  14. #include "util/symbol.h"
  15. #include "util/string.h"
  16. #include "perf.h"
  17. #include "util/parse-options.h"
  18. #include "util/parse-events.h"
  19. #define SHOW_KERNEL 1
  20. #define SHOW_USER 2
  21. #define SHOW_HV 4
  22. static char const *input_name = "perf.data";
  23. static char *vmlinux = NULL;
  24. static char default_sort_order[] = "comm,dso";
  25. static char *sort_order = default_sort_order;
  26. static int input;
  27. static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
  28. static int dump_trace = 0;
  29. #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
  30. static int verbose;
  31. static int full_paths;
  32. static unsigned long page_size;
  33. static unsigned long mmap_window = 32;
  34. struct ip_event {
  35. struct perf_event_header header;
  36. __u64 ip;
  37. __u32 pid, tid;
  38. };
  39. struct mmap_event {
  40. struct perf_event_header header;
  41. __u32 pid, tid;
  42. __u64 start;
  43. __u64 len;
  44. __u64 pgoff;
  45. char filename[PATH_MAX];
  46. };
  47. struct comm_event {
  48. struct perf_event_header header;
  49. __u32 pid, tid;
  50. char comm[16];
  51. };
  52. struct fork_event {
  53. struct perf_event_header header;
  54. __u32 pid, ppid;
  55. };
  56. struct period_event {
  57. struct perf_event_header header;
  58. __u64 time;
  59. __u64 id;
  60. __u64 sample_period;
  61. };
  62. typedef union event_union {
  63. struct perf_event_header header;
  64. struct ip_event ip;
  65. struct mmap_event mmap;
  66. struct comm_event comm;
  67. struct fork_event fork;
  68. struct period_event period;
  69. } event_t;
  70. static LIST_HEAD(dsos);
  71. static struct dso *kernel_dso;
  72. static struct dso *vdso;
  73. static void dsos__add(struct dso *dso)
  74. {
  75. list_add_tail(&dso->node, &dsos);
  76. }
  77. static struct dso *dsos__find(const char *name)
  78. {
  79. struct dso *pos;
  80. list_for_each_entry(pos, &dsos, node)
  81. if (strcmp(pos->name, name) == 0)
  82. return pos;
  83. return NULL;
  84. }
  85. static struct dso *dsos__findnew(const char *name)
  86. {
  87. struct dso *dso = dsos__find(name);
  88. int nr;
  89. if (dso)
  90. return dso;
  91. dso = dso__new(name, 0);
  92. if (!dso)
  93. goto out_delete_dso;
  94. nr = dso__load(dso, NULL, verbose);
  95. if (nr < 0) {
  96. if (verbose)
  97. fprintf(stderr, "Failed to open: %s\n", name);
  98. goto out_delete_dso;
  99. }
  100. if (!nr && verbose) {
  101. fprintf(stderr,
  102. "No symbols found in: %s, maybe install a debug package?\n",
  103. name);
  104. }
  105. dsos__add(dso);
  106. return dso;
  107. out_delete_dso:
  108. dso__delete(dso);
  109. return NULL;
  110. }
  111. static void dsos__fprintf(FILE *fp)
  112. {
  113. struct dso *pos;
  114. list_for_each_entry(pos, &dsos, node)
  115. dso__fprintf(pos, fp);
  116. }
  117. static struct symbol *vdso__find_symbol(struct dso *dso, uint64_t ip)
  118. {
  119. return dso__find_symbol(kernel_dso, ip);
  120. }
  121. static int load_kernel(void)
  122. {
  123. int err;
  124. kernel_dso = dso__new("[kernel]", 0);
  125. if (!kernel_dso)
  126. return -1;
  127. err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
  128. if (err) {
  129. dso__delete(kernel_dso);
  130. kernel_dso = NULL;
  131. } else
  132. dsos__add(kernel_dso);
  133. vdso = dso__new("[vdso]", 0);
  134. if (!vdso)
  135. return -1;
  136. vdso->find_symbol = vdso__find_symbol;
  137. dsos__add(vdso);
  138. return err;
  139. }
  140. static char __cwd[PATH_MAX];
  141. static char *cwd = __cwd;
  142. static int cwdlen;
  143. static int strcommon(const char *pathname)
  144. {
  145. int n = 0;
  146. while (pathname[n] == cwd[n] && n < cwdlen)
  147. ++n;
  148. return n;
  149. }
  150. struct map {
  151. struct list_head node;
  152. uint64_t start;
  153. uint64_t end;
  154. uint64_t pgoff;
  155. uint64_t (*map_ip)(struct map *, uint64_t);
  156. struct dso *dso;
  157. };
  158. static uint64_t map__map_ip(struct map *map, uint64_t ip)
  159. {
  160. return ip - map->start + map->pgoff;
  161. }
  162. static uint64_t vdso__map_ip(struct map *map, uint64_t ip)
  163. {
  164. return ip;
  165. }
  166. static inline int is_anon_memory(const char *filename)
  167. {
  168. return strcmp(filename, "//anon") == 0;
  169. }
  170. static struct map *map__new(struct mmap_event *event)
  171. {
  172. struct map *self = malloc(sizeof(*self));
  173. if (self != NULL) {
  174. const char *filename = event->filename;
  175. char newfilename[PATH_MAX];
  176. int anon;
  177. if (cwd) {
  178. int n = strcommon(filename);
  179. if (n == cwdlen) {
  180. snprintf(newfilename, sizeof(newfilename),
  181. ".%s", filename + n);
  182. filename = newfilename;
  183. }
  184. }
  185. anon = is_anon_memory(filename);
  186. if (anon) {
  187. snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
  188. filename = newfilename;
  189. }
  190. self->start = event->start;
  191. self->end = event->start + event->len;
  192. self->pgoff = event->pgoff;
  193. self->dso = dsos__findnew(filename);
  194. if (self->dso == NULL)
  195. goto out_delete;
  196. if (self->dso == vdso || anon)
  197. self->map_ip = vdso__map_ip;
  198. else
  199. self->map_ip = map__map_ip;
  200. }
  201. return self;
  202. out_delete:
  203. free(self);
  204. return NULL;
  205. }
  206. static struct map *map__clone(struct map *self)
  207. {
  208. struct map *map = malloc(sizeof(*self));
  209. if (!map)
  210. return NULL;
  211. memcpy(map, self, sizeof(*self));
  212. return map;
  213. }
  214. static int map__overlap(struct map *l, struct map *r)
  215. {
  216. if (l->start > r->start) {
  217. struct map *t = l;
  218. l = r;
  219. r = t;
  220. }
  221. if (l->end > r->start)
  222. return 1;
  223. return 0;
  224. }
  225. static size_t map__fprintf(struct map *self, FILE *fp)
  226. {
  227. return fprintf(fp, " %"PRIx64"-%"PRIx64" %"PRIx64" %s\n",
  228. self->start, self->end, self->pgoff, self->dso->name);
  229. }
  230. struct thread {
  231. struct rb_node rb_node;
  232. struct list_head maps;
  233. pid_t pid;
  234. char *comm;
  235. };
  236. static struct thread *thread__new(pid_t pid)
  237. {
  238. struct thread *self = malloc(sizeof(*self));
  239. if (self != NULL) {
  240. self->pid = pid;
  241. self->comm = malloc(32);
  242. if (self->comm)
  243. snprintf(self->comm, 32, ":%d", self->pid);
  244. INIT_LIST_HEAD(&self->maps);
  245. }
  246. return self;
  247. }
  248. static int thread__set_comm(struct thread *self, const char *comm)
  249. {
  250. if (self->comm)
  251. free(self->comm);
  252. self->comm = strdup(comm);
  253. return self->comm ? 0 : -ENOMEM;
  254. }
  255. static size_t thread__fprintf(struct thread *self, FILE *fp)
  256. {
  257. struct map *pos;
  258. size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
  259. list_for_each_entry(pos, &self->maps, node)
  260. ret += map__fprintf(pos, fp);
  261. return ret;
  262. }
  263. static struct rb_root threads;
  264. static struct thread *last_match;
  265. static struct thread *threads__findnew(pid_t pid)
  266. {
  267. struct rb_node **p = &threads.rb_node;
  268. struct rb_node *parent = NULL;
  269. struct thread *th;
  270. /*
  271. * Font-end cache - PID lookups come in blocks,
  272. * so most of the time we dont have to look up
  273. * the full rbtree:
  274. */
  275. if (last_match && last_match->pid == pid)
  276. return last_match;
  277. while (*p != NULL) {
  278. parent = *p;
  279. th = rb_entry(parent, struct thread, rb_node);
  280. if (th->pid == pid) {
  281. last_match = th;
  282. return th;
  283. }
  284. if (pid < th->pid)
  285. p = &(*p)->rb_left;
  286. else
  287. p = &(*p)->rb_right;
  288. }
  289. th = thread__new(pid);
  290. if (th != NULL) {
  291. rb_link_node(&th->rb_node, parent, p);
  292. rb_insert_color(&th->rb_node, &threads);
  293. last_match = th;
  294. }
  295. return th;
  296. }
  297. static void thread__insert_map(struct thread *self, struct map *map)
  298. {
  299. struct map *pos, *tmp;
  300. list_for_each_entry_safe(pos, tmp, &self->maps, node) {
  301. if (map__overlap(pos, map)) {
  302. list_del_init(&pos->node);
  303. /* XXX leaks dsos */
  304. free(pos);
  305. }
  306. }
  307. list_add_tail(&map->node, &self->maps);
  308. }
  309. static int thread__fork(struct thread *self, struct thread *parent)
  310. {
  311. struct map *map;
  312. if (self->comm)
  313. free(self->comm);
  314. self->comm = strdup(parent->comm);
  315. if (!self->comm)
  316. return -ENOMEM;
  317. list_for_each_entry(map, &parent->maps, node) {
  318. struct map *new = map__clone(map);
  319. if (!new)
  320. return -ENOMEM;
  321. thread__insert_map(self, new);
  322. }
  323. return 0;
  324. }
  325. static struct map *thread__find_map(struct thread *self, uint64_t ip)
  326. {
  327. struct map *pos;
  328. if (self == NULL)
  329. return NULL;
  330. list_for_each_entry(pos, &self->maps, node)
  331. if (ip >= pos->start && ip <= pos->end)
  332. return pos;
  333. return NULL;
  334. }
  335. static size_t threads__fprintf(FILE *fp)
  336. {
  337. size_t ret = 0;
  338. struct rb_node *nd;
  339. for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
  340. struct thread *pos = rb_entry(nd, struct thread, rb_node);
  341. ret += thread__fprintf(pos, fp);
  342. }
  343. return ret;
  344. }
  345. /*
  346. * histogram, sorted on item, collects counts
  347. */
  348. static struct rb_root hist;
  349. struct hist_entry {
  350. struct rb_node rb_node;
  351. struct thread *thread;
  352. struct map *map;
  353. struct dso *dso;
  354. struct symbol *sym;
  355. uint64_t ip;
  356. char level;
  357. uint32_t count;
  358. };
  359. /*
  360. * configurable sorting bits
  361. */
  362. struct sort_entry {
  363. struct list_head list;
  364. char *header;
  365. int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
  366. int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
  367. size_t (*print)(FILE *fp, struct hist_entry *);
  368. };
  369. /* --sort pid */
  370. static int64_t
  371. sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
  372. {
  373. return right->thread->pid - left->thread->pid;
  374. }
  375. static size_t
  376. sort__thread_print(FILE *fp, struct hist_entry *self)
  377. {
  378. return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid);
  379. }
  380. static struct sort_entry sort_thread = {
  381. .header = " Command: Pid",
  382. .cmp = sort__thread_cmp,
  383. .print = sort__thread_print,
  384. };
  385. /* --sort comm */
  386. static int64_t
  387. sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
  388. {
  389. return right->thread->pid - left->thread->pid;
  390. }
  391. static int64_t
  392. sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
  393. {
  394. char *comm_l = left->thread->comm;
  395. char *comm_r = right->thread->comm;
  396. if (!comm_l || !comm_r) {
  397. if (!comm_l && !comm_r)
  398. return 0;
  399. else if (!comm_l)
  400. return -1;
  401. else
  402. return 1;
  403. }
  404. return strcmp(comm_l, comm_r);
  405. }
  406. static size_t
  407. sort__comm_print(FILE *fp, struct hist_entry *self)
  408. {
  409. return fprintf(fp, "%16s", self->thread->comm);
  410. }
  411. static struct sort_entry sort_comm = {
  412. .header = " Command",
  413. .cmp = sort__comm_cmp,
  414. .collapse = sort__comm_collapse,
  415. .print = sort__comm_print,
  416. };
  417. /* --sort dso */
  418. static int64_t
  419. sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
  420. {
  421. struct dso *dso_l = left->dso;
  422. struct dso *dso_r = right->dso;
  423. if (!dso_l || !dso_r) {
  424. if (!dso_l && !dso_r)
  425. return 0;
  426. else if (!dso_l)
  427. return -1;
  428. else
  429. return 1;
  430. }
  431. return strcmp(dso_l->name, dso_r->name);
  432. }
  433. static size_t
  434. sort__dso_print(FILE *fp, struct hist_entry *self)
  435. {
  436. if (self->dso)
  437. return fprintf(fp, "%-25s", self->dso->name);
  438. return fprintf(fp, "%016llx ", (__u64)self->ip);
  439. }
  440. static struct sort_entry sort_dso = {
  441. .header = "Shared Object ",
  442. .cmp = sort__dso_cmp,
  443. .print = sort__dso_print,
  444. };
  445. /* --sort symbol */
  446. static int64_t
  447. sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
  448. {
  449. uint64_t ip_l, ip_r;
  450. if (left->sym == right->sym)
  451. return 0;
  452. ip_l = left->sym ? left->sym->start : left->ip;
  453. ip_r = right->sym ? right->sym->start : right->ip;
  454. return (int64_t)(ip_r - ip_l);
  455. }
  456. static size_t
  457. sort__sym_print(FILE *fp, struct hist_entry *self)
  458. {
  459. size_t ret = 0;
  460. if (verbose)
  461. ret += fprintf(fp, "%#018llx ", (__u64)self->ip);
  462. if (self->sym) {
  463. ret += fprintf(fp, "[%c] %s",
  464. self->dso == kernel_dso ? 'k' : '.', self->sym->name);
  465. } else {
  466. ret += fprintf(fp, "%#016llx", (__u64)self->ip);
  467. }
  468. return ret;
  469. }
  470. static struct sort_entry sort_sym = {
  471. .header = "Symbol",
  472. .cmp = sort__sym_cmp,
  473. .print = sort__sym_print,
  474. };
  475. static int sort__need_collapse = 0;
  476. struct sort_dimension {
  477. char *name;
  478. struct sort_entry *entry;
  479. int taken;
  480. };
  481. static struct sort_dimension sort_dimensions[] = {
  482. { .name = "pid", .entry = &sort_thread, },
  483. { .name = "comm", .entry = &sort_comm, },
  484. { .name = "dso", .entry = &sort_dso, },
  485. { .name = "symbol", .entry = &sort_sym, },
  486. };
  487. static LIST_HEAD(hist_entry__sort_list);
  488. static int sort_dimension__add(char *tok)
  489. {
  490. int i;
  491. for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
  492. struct sort_dimension *sd = &sort_dimensions[i];
  493. if (sd->taken)
  494. continue;
  495. if (strncasecmp(tok, sd->name, strlen(tok)))
  496. continue;
  497. if (sd->entry->collapse)
  498. sort__need_collapse = 1;
  499. list_add_tail(&sd->entry->list, &hist_entry__sort_list);
  500. sd->taken = 1;
  501. return 0;
  502. }
  503. return -ESRCH;
  504. }
  505. static int64_t
  506. hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
  507. {
  508. struct sort_entry *se;
  509. int64_t cmp = 0;
  510. list_for_each_entry(se, &hist_entry__sort_list, list) {
  511. cmp = se->cmp(left, right);
  512. if (cmp)
  513. break;
  514. }
  515. return cmp;
  516. }
  517. static int64_t
  518. hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
  519. {
  520. struct sort_entry *se;
  521. int64_t cmp = 0;
  522. list_for_each_entry(se, &hist_entry__sort_list, list) {
  523. int64_t (*f)(struct hist_entry *, struct hist_entry *);
  524. f = se->collapse ?: se->cmp;
  525. cmp = f(left, right);
  526. if (cmp)
  527. break;
  528. }
  529. return cmp;
  530. }
  531. static size_t
  532. hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples)
  533. {
  534. struct sort_entry *se;
  535. size_t ret;
  536. if (total_samples) {
  537. double percent = self->count * 100.0 / total_samples;
  538. char *color = PERF_COLOR_NORMAL;
  539. /*
  540. * We color high-overhead entries in red, low-overhead
  541. * entries in green - and keep the middle ground normal:
  542. */
  543. if (percent >= 5.0)
  544. color = PERF_COLOR_RED;
  545. if (percent < 0.5)
  546. color = PERF_COLOR_GREEN;
  547. ret = color_fprintf(fp, color, " %6.2f%%",
  548. (self->count * 100.0) / total_samples);
  549. } else
  550. ret = fprintf(fp, "%12d ", self->count);
  551. list_for_each_entry(se, &hist_entry__sort_list, list) {
  552. fprintf(fp, " ");
  553. ret += se->print(fp, self);
  554. }
  555. ret += fprintf(fp, "\n");
  556. return ret;
  557. }
  558. /*
  559. * collect histogram counts
  560. */
  561. static int
  562. hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
  563. struct symbol *sym, uint64_t ip, char level)
  564. {
  565. struct rb_node **p = &hist.rb_node;
  566. struct rb_node *parent = NULL;
  567. struct hist_entry *he;
  568. struct hist_entry entry = {
  569. .thread = thread,
  570. .map = map,
  571. .dso = dso,
  572. .sym = sym,
  573. .ip = ip,
  574. .level = level,
  575. .count = 1,
  576. };
  577. int cmp;
  578. while (*p != NULL) {
  579. parent = *p;
  580. he = rb_entry(parent, struct hist_entry, rb_node);
  581. cmp = hist_entry__cmp(&entry, he);
  582. if (!cmp) {
  583. he->count++;
  584. return 0;
  585. }
  586. if (cmp < 0)
  587. p = &(*p)->rb_left;
  588. else
  589. p = &(*p)->rb_right;
  590. }
  591. he = malloc(sizeof(*he));
  592. if (!he)
  593. return -ENOMEM;
  594. *he = entry;
  595. rb_link_node(&he->rb_node, parent, p);
  596. rb_insert_color(&he->rb_node, &hist);
  597. return 0;
  598. }
  599. static void hist_entry__free(struct hist_entry *he)
  600. {
  601. free(he);
  602. }
  603. /*
  604. * collapse the histogram
  605. */
  606. static struct rb_root collapse_hists;
  607. static void collapse__insert_entry(struct hist_entry *he)
  608. {
  609. struct rb_node **p = &collapse_hists.rb_node;
  610. struct rb_node *parent = NULL;
  611. struct hist_entry *iter;
  612. int64_t cmp;
  613. while (*p != NULL) {
  614. parent = *p;
  615. iter = rb_entry(parent, struct hist_entry, rb_node);
  616. cmp = hist_entry__collapse(iter, he);
  617. if (!cmp) {
  618. iter->count += he->count;
  619. hist_entry__free(he);
  620. return;
  621. }
  622. if (cmp < 0)
  623. p = &(*p)->rb_left;
  624. else
  625. p = &(*p)->rb_right;
  626. }
  627. rb_link_node(&he->rb_node, parent, p);
  628. rb_insert_color(&he->rb_node, &collapse_hists);
  629. }
  630. static void collapse__resort(void)
  631. {
  632. struct rb_node *next;
  633. struct hist_entry *n;
  634. if (!sort__need_collapse)
  635. return;
  636. next = rb_first(&hist);
  637. while (next) {
  638. n = rb_entry(next, struct hist_entry, rb_node);
  639. next = rb_next(&n->rb_node);
  640. rb_erase(&n->rb_node, &hist);
  641. collapse__insert_entry(n);
  642. }
  643. }
  644. /*
  645. * reverse the map, sort on count.
  646. */
  647. static struct rb_root output_hists;
  648. static void output__insert_entry(struct hist_entry *he)
  649. {
  650. struct rb_node **p = &output_hists.rb_node;
  651. struct rb_node *parent = NULL;
  652. struct hist_entry *iter;
  653. while (*p != NULL) {
  654. parent = *p;
  655. iter = rb_entry(parent, struct hist_entry, rb_node);
  656. if (he->count > iter->count)
  657. p = &(*p)->rb_left;
  658. else
  659. p = &(*p)->rb_right;
  660. }
  661. rb_link_node(&he->rb_node, parent, p);
  662. rb_insert_color(&he->rb_node, &output_hists);
  663. }
  664. static void output__resort(void)
  665. {
  666. struct rb_node *next;
  667. struct hist_entry *n;
  668. struct rb_root *tree = &hist;
  669. if (sort__need_collapse)
  670. tree = &collapse_hists;
  671. next = rb_first(tree);
  672. while (next) {
  673. n = rb_entry(next, struct hist_entry, rb_node);
  674. next = rb_next(&n->rb_node);
  675. rb_erase(&n->rb_node, tree);
  676. output__insert_entry(n);
  677. }
  678. }
  679. static size_t output__fprintf(FILE *fp, uint64_t total_samples)
  680. {
  681. struct hist_entry *pos;
  682. struct sort_entry *se;
  683. struct rb_node *nd;
  684. size_t ret = 0;
  685. fprintf(fp, "\n");
  686. fprintf(fp, "#\n");
  687. fprintf(fp, "# (%Ld samples)\n", (__u64)total_samples);
  688. fprintf(fp, "#\n");
  689. fprintf(fp, "# Overhead");
  690. list_for_each_entry(se, &hist_entry__sort_list, list)
  691. fprintf(fp, " %s", se->header);
  692. fprintf(fp, "\n");
  693. fprintf(fp, "# ........");
  694. list_for_each_entry(se, &hist_entry__sort_list, list) {
  695. int i;
  696. fprintf(fp, " ");
  697. for (i = 0; i < strlen(se->header); i++)
  698. fprintf(fp, ".");
  699. }
  700. fprintf(fp, "\n");
  701. fprintf(fp, "#\n");
  702. for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
  703. pos = rb_entry(nd, struct hist_entry, rb_node);
  704. ret += hist_entry__fprintf(fp, pos, total_samples);
  705. }
  706. if (!strcmp(sort_order, default_sort_order)) {
  707. fprintf(fp, "#\n");
  708. fprintf(fp, "# (For more details, try: perf report --sort comm,dso,symbol)\n");
  709. fprintf(fp, "#\n");
  710. }
  711. fprintf(fp, "\n");
  712. return ret;
  713. }
  714. static void register_idle_thread(void)
  715. {
  716. struct thread *thread = threads__findnew(0);
  717. if (thread == NULL ||
  718. thread__set_comm(thread, "[idle]")) {
  719. fprintf(stderr, "problem inserting idle task.\n");
  720. exit(-1);
  721. }
  722. }
  723. static unsigned long total = 0,
  724. total_mmap = 0,
  725. total_comm = 0,
  726. total_fork = 0,
  727. total_unknown = 0;
  728. static int
  729. process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
  730. {
  731. char level;
  732. int show = 0;
  733. struct dso *dso = NULL;
  734. struct thread *thread = threads__findnew(event->ip.pid);
  735. uint64_t ip = event->ip.ip;
  736. struct map *map = NULL;
  737. dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
  738. (void *)(offset + head),
  739. (void *)(long)(event->header.size),
  740. event->header.misc,
  741. event->ip.pid,
  742. (void *)(long)ip);
  743. dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
  744. if (thread == NULL) {
  745. fprintf(stderr, "problem processing %d event, skipping it.\n",
  746. event->header.type);
  747. return -1;
  748. }
  749. if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
  750. show = SHOW_KERNEL;
  751. level = 'k';
  752. dso = kernel_dso;
  753. dprintf(" ...... dso: %s\n", dso->name);
  754. } else if (event->header.misc & PERF_EVENT_MISC_USER) {
  755. show = SHOW_USER;
  756. level = '.';
  757. map = thread__find_map(thread, ip);
  758. if (map != NULL) {
  759. ip = map->map_ip(map, ip);
  760. dso = map->dso;
  761. } else {
  762. /*
  763. * If this is outside of all known maps,
  764. * and is a negative address, try to look it
  765. * up in the kernel dso, as it might be a
  766. * vsyscall (which executes in user-mode):
  767. */
  768. if ((long long)ip < 0)
  769. dso = kernel_dso;
  770. }
  771. dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
  772. } else {
  773. show = SHOW_HV;
  774. level = 'H';
  775. dprintf(" ...... dso: [hypervisor]\n");
  776. }
  777. if (show & show_mask) {
  778. struct symbol *sym = NULL;
  779. if (dso)
  780. sym = dso->find_symbol(dso, ip);
  781. if (hist_entry__add(thread, map, dso, sym, ip, level)) {
  782. fprintf(stderr,
  783. "problem incrementing symbol count, skipping event\n");
  784. return -1;
  785. }
  786. }
  787. total++;
  788. return 0;
  789. }
  790. static int
  791. process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
  792. {
  793. struct thread *thread = threads__findnew(event->mmap.pid);
  794. struct map *map = map__new(&event->mmap);
  795. dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
  796. (void *)(offset + head),
  797. (void *)(long)(event->header.size),
  798. event->mmap.pid,
  799. (void *)(long)event->mmap.start,
  800. (void *)(long)event->mmap.len,
  801. (void *)(long)event->mmap.pgoff,
  802. event->mmap.filename);
  803. if (thread == NULL || map == NULL) {
  804. dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
  805. return 0;
  806. }
  807. thread__insert_map(thread, map);
  808. total_mmap++;
  809. return 0;
  810. }
  811. static int
  812. process_comm_event(event_t *event, unsigned long offset, unsigned long head)
  813. {
  814. struct thread *thread = threads__findnew(event->comm.pid);
  815. dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
  816. (void *)(offset + head),
  817. (void *)(long)(event->header.size),
  818. event->comm.comm, event->comm.pid);
  819. if (thread == NULL ||
  820. thread__set_comm(thread, event->comm.comm)) {
  821. dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
  822. return -1;
  823. }
  824. total_comm++;
  825. return 0;
  826. }
  827. static int
  828. process_fork_event(event_t *event, unsigned long offset, unsigned long head)
  829. {
  830. struct thread *thread = threads__findnew(event->fork.pid);
  831. struct thread *parent = threads__findnew(event->fork.ppid);
  832. dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
  833. (void *)(offset + head),
  834. (void *)(long)(event->header.size),
  835. event->fork.pid, event->fork.ppid);
  836. if (!thread || !parent || thread__fork(thread, parent)) {
  837. dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
  838. return -1;
  839. }
  840. total_fork++;
  841. return 0;
  842. }
  843. static int
  844. process_period_event(event_t *event, unsigned long offset, unsigned long head)
  845. {
  846. dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n",
  847. (void *)(offset + head),
  848. (void *)(long)(event->header.size),
  849. event->period.time,
  850. event->period.id,
  851. event->period.sample_period);
  852. return 0;
  853. }
  854. static int
  855. process_event(event_t *event, unsigned long offset, unsigned long head)
  856. {
  857. if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
  858. return process_overflow_event(event, offset, head);
  859. switch (event->header.type) {
  860. case PERF_EVENT_MMAP:
  861. return process_mmap_event(event, offset, head);
  862. case PERF_EVENT_COMM:
  863. return process_comm_event(event, offset, head);
  864. case PERF_EVENT_FORK:
  865. return process_fork_event(event, offset, head);
  866. case PERF_EVENT_PERIOD:
  867. return process_period_event(event, offset, head);
  868. /*
  869. * We dont process them right now but they are fine:
  870. */
  871. case PERF_EVENT_THROTTLE:
  872. case PERF_EVENT_UNTHROTTLE:
  873. return 0;
  874. default:
  875. return -1;
  876. }
  877. return 0;
  878. }
  879. static int __cmd_report(void)
  880. {
  881. int ret, rc = EXIT_FAILURE;
  882. unsigned long offset = 0;
  883. unsigned long head = 0;
  884. struct stat stat;
  885. event_t *event;
  886. uint32_t size;
  887. char *buf;
  888. register_idle_thread();
  889. input = open(input_name, O_RDONLY);
  890. if (input < 0) {
  891. fprintf(stderr, " failed to open file: %s", input_name);
  892. if (!strcmp(input_name, "perf.data"))
  893. fprintf(stderr, " (try 'perf record' first)");
  894. fprintf(stderr, "\n");
  895. exit(-1);
  896. }
  897. ret = fstat(input, &stat);
  898. if (ret < 0) {
  899. perror("failed to stat file");
  900. exit(-1);
  901. }
  902. if (!stat.st_size) {
  903. fprintf(stderr, "zero-sized file, nothing to do!\n");
  904. exit(0);
  905. }
  906. if (load_kernel() < 0) {
  907. perror("failed to load kernel symbols");
  908. return EXIT_FAILURE;
  909. }
  910. if (!full_paths) {
  911. if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
  912. perror("failed to get the current directory");
  913. return EXIT_FAILURE;
  914. }
  915. cwdlen = strlen(cwd);
  916. } else {
  917. cwd = NULL;
  918. cwdlen = 0;
  919. }
  920. remap:
  921. buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
  922. MAP_SHARED, input, offset);
  923. if (buf == MAP_FAILED) {
  924. perror("failed to mmap file");
  925. exit(-1);
  926. }
  927. more:
  928. event = (event_t *)(buf + head);
  929. size = event->header.size;
  930. if (!size)
  931. size = 8;
  932. if (head + event->header.size >= page_size * mmap_window) {
  933. unsigned long shift = page_size * (head / page_size);
  934. int ret;
  935. ret = munmap(buf, page_size * mmap_window);
  936. assert(ret == 0);
  937. offset += shift;
  938. head -= shift;
  939. goto remap;
  940. }
  941. size = event->header.size;
  942. dprintf("%p [%p]: event: %d\n",
  943. (void *)(offset + head),
  944. (void *)(long)event->header.size,
  945. event->header.type);
  946. if (!size || process_event(event, offset, head) < 0) {
  947. dprintf("%p [%p]: skipping unknown header type: %d\n",
  948. (void *)(offset + head),
  949. (void *)(long)(event->header.size),
  950. event->header.type);
  951. total_unknown++;
  952. /*
  953. * assume we lost track of the stream, check alignment, and
  954. * increment a single u64 in the hope to catch on again 'soon'.
  955. */
  956. if (unlikely(head & 7))
  957. head &= ~7ULL;
  958. size = 8;
  959. }
  960. head += size;
  961. if (offset + head < stat.st_size)
  962. goto more;
  963. rc = EXIT_SUCCESS;
  964. close(input);
  965. dprintf(" IP events: %10ld\n", total);
  966. dprintf(" mmap events: %10ld\n", total_mmap);
  967. dprintf(" comm events: %10ld\n", total_comm);
  968. dprintf(" fork events: %10ld\n", total_fork);
  969. dprintf(" unknown events: %10ld\n", total_unknown);
  970. if (dump_trace)
  971. return 0;
  972. if (verbose >= 3)
  973. threads__fprintf(stdout);
  974. if (verbose >= 2)
  975. dsos__fprintf(stdout);
  976. collapse__resort();
  977. output__resort();
  978. output__fprintf(stdout, total);
  979. return rc;
  980. }
  981. static const char * const report_usage[] = {
  982. "perf report [<options>] <command>",
  983. NULL
  984. };
  985. static const struct option options[] = {
  986. OPT_STRING('i', "input", &input_name, "file",
  987. "input file name"),
  988. OPT_BOOLEAN('v', "verbose", &verbose,
  989. "be more verbose (show symbol address, etc)"),
  990. OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
  991. "dump raw trace in ASCII"),
  992. OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
  993. OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
  994. "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"),
  995. OPT_BOOLEAN('P', "full-paths", &full_paths,
  996. "Don't shorten the pathnames taking into account the cwd"),
  997. OPT_END()
  998. };
  999. static void setup_sorting(void)
  1000. {
  1001. char *tmp, *tok, *str = strdup(sort_order);
  1002. for (tok = strtok_r(str, ", ", &tmp);
  1003. tok; tok = strtok_r(NULL, ", ", &tmp)) {
  1004. if (sort_dimension__add(tok) < 0) {
  1005. error("Unknown --sort key: `%s'", tok);
  1006. usage_with_options(report_usage, options);
  1007. }
  1008. }
  1009. free(str);
  1010. }
  1011. int cmd_report(int argc, const char **argv, const char *prefix)
  1012. {
  1013. symbol__init();
  1014. page_size = getpagesize();
  1015. argc = parse_options(argc, argv, options, report_usage, 0);
  1016. setup_sorting();
  1017. /*
  1018. * Any (unrecognized) arguments left?
  1019. */
  1020. if (argc)
  1021. usage_with_options(report_usage, options);
  1022. setup_pager();
  1023. return __cmd_report();
  1024. }