callchain.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
  4. *
  5. * Handle the callchains from the stream in an ad-hoc radix tree and then
  6. * sort them in an rbtree.
  7. *
  8. * Using a radix for code path provides a fast retrieval and factorizes
  9. * memory use. Also that lets us use the paths in a hierarchical graph view.
  10. *
  11. */
  12. #include <inttypes.h>
  13. #include <stdlib.h>
  14. #include <stdio.h>
  15. #include <stdbool.h>
  16. #include <errno.h>
  17. #include <math.h>
  18. #include "asm/bug.h"
  19. #include "hist.h"
  20. #include "util.h"
  21. #include "sort.h"
  22. #include "machine.h"
  23. #include "callchain.h"
  24. #include "branch.h"
  25. #define CALLCHAIN_PARAM_DEFAULT \
  26. .mode = CHAIN_GRAPH_ABS, \
  27. .min_percent = 0.5, \
  28. .order = ORDER_CALLEE, \
  29. .key = CCKEY_FUNCTION, \
  30. .value = CCVAL_PERCENT, \
  31. struct callchain_param callchain_param = {
  32. CALLCHAIN_PARAM_DEFAULT
  33. };
  34. /*
  35. * Are there any events usind DWARF callchains?
  36. *
  37. * I.e.
  38. *
  39. * -e cycles/call-graph=dwarf/
  40. */
  41. bool dwarf_callchain_users;
  42. struct callchain_param callchain_param_default = {
  43. CALLCHAIN_PARAM_DEFAULT
  44. };
  45. __thread struct callchain_cursor callchain_cursor;
  46. int parse_callchain_record_opt(const char *arg, struct callchain_param *param)
  47. {
  48. return parse_callchain_record(arg, param);
  49. }
  50. static int parse_callchain_mode(const char *value)
  51. {
  52. if (!strncmp(value, "graph", strlen(value))) {
  53. callchain_param.mode = CHAIN_GRAPH_ABS;
  54. return 0;
  55. }
  56. if (!strncmp(value, "flat", strlen(value))) {
  57. callchain_param.mode = CHAIN_FLAT;
  58. return 0;
  59. }
  60. if (!strncmp(value, "fractal", strlen(value))) {
  61. callchain_param.mode = CHAIN_GRAPH_REL;
  62. return 0;
  63. }
  64. if (!strncmp(value, "folded", strlen(value))) {
  65. callchain_param.mode = CHAIN_FOLDED;
  66. return 0;
  67. }
  68. return -1;
  69. }
  70. static int parse_callchain_order(const char *value)
  71. {
  72. if (!strncmp(value, "caller", strlen(value))) {
  73. callchain_param.order = ORDER_CALLER;
  74. callchain_param.order_set = true;
  75. return 0;
  76. }
  77. if (!strncmp(value, "callee", strlen(value))) {
  78. callchain_param.order = ORDER_CALLEE;
  79. callchain_param.order_set = true;
  80. return 0;
  81. }
  82. return -1;
  83. }
  84. static int parse_callchain_sort_key(const char *value)
  85. {
  86. if (!strncmp(value, "function", strlen(value))) {
  87. callchain_param.key = CCKEY_FUNCTION;
  88. return 0;
  89. }
  90. if (!strncmp(value, "address", strlen(value))) {
  91. callchain_param.key = CCKEY_ADDRESS;
  92. return 0;
  93. }
  94. if (!strncmp(value, "srcline", strlen(value))) {
  95. callchain_param.key = CCKEY_SRCLINE;
  96. return 0;
  97. }
  98. if (!strncmp(value, "branch", strlen(value))) {
  99. callchain_param.branch_callstack = 1;
  100. return 0;
  101. }
  102. return -1;
  103. }
  104. static int parse_callchain_value(const char *value)
  105. {
  106. if (!strncmp(value, "percent", strlen(value))) {
  107. callchain_param.value = CCVAL_PERCENT;
  108. return 0;
  109. }
  110. if (!strncmp(value, "period", strlen(value))) {
  111. callchain_param.value = CCVAL_PERIOD;
  112. return 0;
  113. }
  114. if (!strncmp(value, "count", strlen(value))) {
  115. callchain_param.value = CCVAL_COUNT;
  116. return 0;
  117. }
  118. return -1;
  119. }
  120. static int get_stack_size(const char *str, unsigned long *_size)
  121. {
  122. char *endptr;
  123. unsigned long size;
  124. unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
  125. size = strtoul(str, &endptr, 0);
  126. do {
  127. if (*endptr)
  128. break;
  129. size = round_up(size, sizeof(u64));
  130. if (!size || size > max_size)
  131. break;
  132. *_size = size;
  133. return 0;
  134. } while (0);
  135. pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
  136. max_size, str);
  137. return -1;
  138. }
  139. static int
  140. __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
  141. {
  142. char *tok;
  143. char *endptr, *saveptr = NULL;
  144. bool minpcnt_set = false;
  145. bool record_opt_set = false;
  146. bool try_stack_size = false;
  147. callchain_param.enabled = true;
  148. symbol_conf.use_callchain = true;
  149. if (!arg)
  150. return 0;
  151. while ((tok = strtok_r((char *)arg, ",", &saveptr)) != NULL) {
  152. if (!strncmp(tok, "none", strlen(tok))) {
  153. callchain_param.mode = CHAIN_NONE;
  154. callchain_param.enabled = false;
  155. symbol_conf.use_callchain = false;
  156. return 0;
  157. }
  158. if (!parse_callchain_mode(tok) ||
  159. !parse_callchain_order(tok) ||
  160. !parse_callchain_sort_key(tok) ||
  161. !parse_callchain_value(tok)) {
  162. /* parsing ok - move on to the next */
  163. try_stack_size = false;
  164. goto next;
  165. } else if (allow_record_opt && !record_opt_set) {
  166. if (parse_callchain_record(tok, &callchain_param))
  167. goto try_numbers;
  168. /* assume that number followed by 'dwarf' is stack size */
  169. if (callchain_param.record_mode == CALLCHAIN_DWARF)
  170. try_stack_size = true;
  171. record_opt_set = true;
  172. goto next;
  173. }
  174. try_numbers:
  175. if (try_stack_size) {
  176. unsigned long size = 0;
  177. if (get_stack_size(tok, &size) < 0)
  178. return -1;
  179. callchain_param.dump_size = size;
  180. try_stack_size = false;
  181. } else if (!minpcnt_set) {
  182. /* try to get the min percent */
  183. callchain_param.min_percent = strtod(tok, &endptr);
  184. if (tok == endptr)
  185. return -1;
  186. minpcnt_set = true;
  187. } else {
  188. /* try print limit at last */
  189. callchain_param.print_limit = strtoul(tok, &endptr, 0);
  190. if (tok == endptr)
  191. return -1;
  192. }
  193. next:
  194. arg = NULL;
  195. }
  196. if (callchain_register_param(&callchain_param) < 0) {
  197. pr_err("Can't register callchain params\n");
  198. return -1;
  199. }
  200. return 0;
  201. }
  202. int parse_callchain_report_opt(const char *arg)
  203. {
  204. return __parse_callchain_report_opt(arg, false);
  205. }
  206. int parse_callchain_top_opt(const char *arg)
  207. {
  208. return __parse_callchain_report_opt(arg, true);
  209. }
  210. int parse_callchain_record(const char *arg, struct callchain_param *param)
  211. {
  212. char *tok, *name, *saveptr = NULL;
  213. char *buf;
  214. int ret = -1;
  215. /* We need buffer that we know we can write to. */
  216. buf = malloc(strlen(arg) + 1);
  217. if (!buf)
  218. return -ENOMEM;
  219. strcpy(buf, arg);
  220. tok = strtok_r((char *)buf, ",", &saveptr);
  221. name = tok ? : (char *)buf;
  222. do {
  223. /* Framepointer style */
  224. if (!strncmp(name, "fp", sizeof("fp"))) {
  225. if (!strtok_r(NULL, ",", &saveptr)) {
  226. param->record_mode = CALLCHAIN_FP;
  227. ret = 0;
  228. } else
  229. pr_err("callchain: No more arguments "
  230. "needed for --call-graph fp\n");
  231. break;
  232. /* Dwarf style */
  233. } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
  234. const unsigned long default_stack_dump_size = 8192;
  235. ret = 0;
  236. param->record_mode = CALLCHAIN_DWARF;
  237. param->dump_size = default_stack_dump_size;
  238. dwarf_callchain_users = true;
  239. tok = strtok_r(NULL, ",", &saveptr);
  240. if (tok) {
  241. unsigned long size = 0;
  242. ret = get_stack_size(tok, &size);
  243. param->dump_size = size;
  244. }
  245. } else if (!strncmp(name, "lbr", sizeof("lbr"))) {
  246. if (!strtok_r(NULL, ",", &saveptr)) {
  247. param->record_mode = CALLCHAIN_LBR;
  248. ret = 0;
  249. } else
  250. pr_err("callchain: No more arguments "
  251. "needed for --call-graph lbr\n");
  252. break;
  253. } else {
  254. pr_err("callchain: Unknown --call-graph option "
  255. "value: %s\n", arg);
  256. break;
  257. }
  258. } while (0);
  259. free(buf);
  260. return ret;
  261. }
  262. int perf_callchain_config(const char *var, const char *value)
  263. {
  264. char *endptr;
  265. if (!strstarts(var, "call-graph."))
  266. return 0;
  267. var += sizeof("call-graph.") - 1;
  268. if (!strcmp(var, "record-mode"))
  269. return parse_callchain_record_opt(value, &callchain_param);
  270. if (!strcmp(var, "dump-size")) {
  271. unsigned long size = 0;
  272. int ret;
  273. ret = get_stack_size(value, &size);
  274. callchain_param.dump_size = size;
  275. return ret;
  276. }
  277. if (!strcmp(var, "print-type")){
  278. int ret;
  279. ret = parse_callchain_mode(value);
  280. if (ret == -1)
  281. pr_err("Invalid callchain mode: %s\n", value);
  282. return ret;
  283. }
  284. if (!strcmp(var, "order")){
  285. int ret;
  286. ret = parse_callchain_order(value);
  287. if (ret == -1)
  288. pr_err("Invalid callchain order: %s\n", value);
  289. return ret;
  290. }
  291. if (!strcmp(var, "sort-key")){
  292. int ret;
  293. ret = parse_callchain_sort_key(value);
  294. if (ret == -1)
  295. pr_err("Invalid callchain sort key: %s\n", value);
  296. return ret;
  297. }
  298. if (!strcmp(var, "threshold")) {
  299. callchain_param.min_percent = strtod(value, &endptr);
  300. if (value == endptr) {
  301. pr_err("Invalid callchain threshold: %s\n", value);
  302. return -1;
  303. }
  304. }
  305. if (!strcmp(var, "print-limit")) {
  306. callchain_param.print_limit = strtod(value, &endptr);
  307. if (value == endptr) {
  308. pr_err("Invalid callchain print limit: %s\n", value);
  309. return -1;
  310. }
  311. }
  312. return 0;
  313. }
  314. static void
  315. rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
  316. enum chain_mode mode)
  317. {
  318. struct rb_node **p = &root->rb_node;
  319. struct rb_node *parent = NULL;
  320. struct callchain_node *rnode;
  321. u64 chain_cumul = callchain_cumul_hits(chain);
  322. while (*p) {
  323. u64 rnode_cumul;
  324. parent = *p;
  325. rnode = rb_entry(parent, struct callchain_node, rb_node);
  326. rnode_cumul = callchain_cumul_hits(rnode);
  327. switch (mode) {
  328. case CHAIN_FLAT:
  329. case CHAIN_FOLDED:
  330. if (rnode->hit < chain->hit)
  331. p = &(*p)->rb_left;
  332. else
  333. p = &(*p)->rb_right;
  334. break;
  335. case CHAIN_GRAPH_ABS: /* Falldown */
  336. case CHAIN_GRAPH_REL:
  337. if (rnode_cumul < chain_cumul)
  338. p = &(*p)->rb_left;
  339. else
  340. p = &(*p)->rb_right;
  341. break;
  342. case CHAIN_NONE:
  343. default:
  344. break;
  345. }
  346. }
  347. rb_link_node(&chain->rb_node, parent, p);
  348. rb_insert_color(&chain->rb_node, root);
  349. }
  350. static void
  351. __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
  352. u64 min_hit)
  353. {
  354. struct rb_node *n;
  355. struct callchain_node *child;
  356. n = rb_first(&node->rb_root_in);
  357. while (n) {
  358. child = rb_entry(n, struct callchain_node, rb_node_in);
  359. n = rb_next(n);
  360. __sort_chain_flat(rb_root, child, min_hit);
  361. }
  362. if (node->hit && node->hit >= min_hit)
  363. rb_insert_callchain(rb_root, node, CHAIN_FLAT);
  364. }
  365. /*
  366. * Once we get every callchains from the stream, we can now
  367. * sort them by hit
  368. */
  369. static void
  370. sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
  371. u64 min_hit, struct callchain_param *param __maybe_unused)
  372. {
  373. *rb_root = RB_ROOT;
  374. __sort_chain_flat(rb_root, &root->node, min_hit);
  375. }
  376. static void __sort_chain_graph_abs(struct callchain_node *node,
  377. u64 min_hit)
  378. {
  379. struct rb_node *n;
  380. struct callchain_node *child;
  381. node->rb_root = RB_ROOT;
  382. n = rb_first(&node->rb_root_in);
  383. while (n) {
  384. child = rb_entry(n, struct callchain_node, rb_node_in);
  385. n = rb_next(n);
  386. __sort_chain_graph_abs(child, min_hit);
  387. if (callchain_cumul_hits(child) >= min_hit)
  388. rb_insert_callchain(&node->rb_root, child,
  389. CHAIN_GRAPH_ABS);
  390. }
  391. }
  392. static void
  393. sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
  394. u64 min_hit, struct callchain_param *param __maybe_unused)
  395. {
  396. __sort_chain_graph_abs(&chain_root->node, min_hit);
  397. rb_root->rb_node = chain_root->node.rb_root.rb_node;
  398. }
  399. static void __sort_chain_graph_rel(struct callchain_node *node,
  400. double min_percent)
  401. {
  402. struct rb_node *n;
  403. struct callchain_node *child;
  404. u64 min_hit;
  405. node->rb_root = RB_ROOT;
  406. min_hit = ceil(node->children_hit * min_percent);
  407. n = rb_first(&node->rb_root_in);
  408. while (n) {
  409. child = rb_entry(n, struct callchain_node, rb_node_in);
  410. n = rb_next(n);
  411. __sort_chain_graph_rel(child, min_percent);
  412. if (callchain_cumul_hits(child) >= min_hit)
  413. rb_insert_callchain(&node->rb_root, child,
  414. CHAIN_GRAPH_REL);
  415. }
  416. }
  417. static void
  418. sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
  419. u64 min_hit __maybe_unused, struct callchain_param *param)
  420. {
  421. __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
  422. rb_root->rb_node = chain_root->node.rb_root.rb_node;
  423. }
  424. int callchain_register_param(struct callchain_param *param)
  425. {
  426. switch (param->mode) {
  427. case CHAIN_GRAPH_ABS:
  428. param->sort = sort_chain_graph_abs;
  429. break;
  430. case CHAIN_GRAPH_REL:
  431. param->sort = sort_chain_graph_rel;
  432. break;
  433. case CHAIN_FLAT:
  434. case CHAIN_FOLDED:
  435. param->sort = sort_chain_flat;
  436. break;
  437. case CHAIN_NONE:
  438. default:
  439. return -1;
  440. }
  441. return 0;
  442. }
  443. /*
  444. * Create a child for a parent. If inherit_children, then the new child
  445. * will become the new parent of it's parent children
  446. */
  447. static struct callchain_node *
  448. create_child(struct callchain_node *parent, bool inherit_children)
  449. {
  450. struct callchain_node *new;
  451. new = zalloc(sizeof(*new));
  452. if (!new) {
  453. perror("not enough memory to create child for code path tree");
  454. return NULL;
  455. }
  456. new->parent = parent;
  457. INIT_LIST_HEAD(&new->val);
  458. INIT_LIST_HEAD(&new->parent_val);
  459. if (inherit_children) {
  460. struct rb_node *n;
  461. struct callchain_node *child;
  462. new->rb_root_in = parent->rb_root_in;
  463. parent->rb_root_in = RB_ROOT;
  464. n = rb_first(&new->rb_root_in);
  465. while (n) {
  466. child = rb_entry(n, struct callchain_node, rb_node_in);
  467. child->parent = new;
  468. n = rb_next(n);
  469. }
  470. /* make it the first child */
  471. rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node);
  472. rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
  473. }
  474. return new;
  475. }
  476. /*
  477. * Fill the node with callchain values
  478. */
  479. static int
  480. fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
  481. {
  482. struct callchain_cursor_node *cursor_node;
  483. node->val_nr = cursor->nr - cursor->pos;
  484. if (!node->val_nr)
  485. pr_warning("Warning: empty node in callchain tree\n");
  486. cursor_node = callchain_cursor_current(cursor);
  487. while (cursor_node) {
  488. struct callchain_list *call;
  489. call = zalloc(sizeof(*call));
  490. if (!call) {
  491. perror("not enough memory for the code path tree");
  492. return -1;
  493. }
  494. call->ip = cursor_node->ip;
  495. call->ms.sym = cursor_node->sym;
  496. call->ms.map = map__get(cursor_node->map);
  497. call->srcline = cursor_node->srcline;
  498. if (cursor_node->branch) {
  499. call->branch_count = 1;
  500. if (cursor_node->branch_from) {
  501. /*
  502. * branch_from is set with value somewhere else
  503. * to imply it's "to" of a branch.
  504. */
  505. call->brtype_stat.branch_to = true;
  506. if (cursor_node->branch_flags.predicted)
  507. call->predicted_count = 1;
  508. if (cursor_node->branch_flags.abort)
  509. call->abort_count = 1;
  510. branch_type_count(&call->brtype_stat,
  511. &cursor_node->branch_flags,
  512. cursor_node->branch_from,
  513. cursor_node->ip);
  514. } else {
  515. /*
  516. * It's "from" of a branch
  517. */
  518. call->brtype_stat.branch_to = false;
  519. call->cycles_count =
  520. cursor_node->branch_flags.cycles;
  521. call->iter_count = cursor_node->nr_loop_iter;
  522. call->iter_cycles = cursor_node->iter_cycles;
  523. }
  524. }
  525. list_add_tail(&call->list, &node->val);
  526. callchain_cursor_advance(cursor);
  527. cursor_node = callchain_cursor_current(cursor);
  528. }
  529. return 0;
  530. }
  531. static struct callchain_node *
  532. add_child(struct callchain_node *parent,
  533. struct callchain_cursor *cursor,
  534. u64 period)
  535. {
  536. struct callchain_node *new;
  537. new = create_child(parent, false);
  538. if (new == NULL)
  539. return NULL;
  540. if (fill_node(new, cursor) < 0) {
  541. struct callchain_list *call, *tmp;
  542. list_for_each_entry_safe(call, tmp, &new->val, list) {
  543. list_del(&call->list);
  544. map__zput(call->ms.map);
  545. free(call);
  546. }
  547. free(new);
  548. return NULL;
  549. }
  550. new->children_hit = 0;
  551. new->hit = period;
  552. new->children_count = 0;
  553. new->count = 1;
  554. return new;
  555. }
  556. enum match_result {
  557. MATCH_ERROR = -1,
  558. MATCH_EQ,
  559. MATCH_LT,
  560. MATCH_GT,
  561. };
  562. static enum match_result match_chain_strings(const char *left,
  563. const char *right)
  564. {
  565. enum match_result ret = MATCH_EQ;
  566. int cmp;
  567. if (left && right)
  568. cmp = strcmp(left, right);
  569. else if (!left && right)
  570. cmp = 1;
  571. else if (left && !right)
  572. cmp = -1;
  573. else
  574. return MATCH_ERROR;
  575. if (cmp != 0)
  576. ret = cmp < 0 ? MATCH_LT : MATCH_GT;
  577. return ret;
  578. }
  579. /*
  580. * We need to always use relative addresses because we're aggregating
  581. * callchains from multiple threads, i.e. different address spaces, so
  582. * comparing absolute addresses make no sense as a symbol in a DSO may end up
  583. * in a different address when used in a different binary or even the same
  584. * binary but with some sort of address randomization technique, thus we need
  585. * to compare just relative addresses. -acme
  586. */
  587. static enum match_result match_chain_dso_addresses(struct map *left_map, u64 left_ip,
  588. struct map *right_map, u64 right_ip)
  589. {
  590. struct dso *left_dso = left_map ? left_map->dso : NULL;
  591. struct dso *right_dso = right_map ? right_map->dso : NULL;
  592. if (left_dso != right_dso)
  593. return left_dso < right_dso ? MATCH_LT : MATCH_GT;
  594. if (left_ip != right_ip)
  595. return left_ip < right_ip ? MATCH_LT : MATCH_GT;
  596. return MATCH_EQ;
  597. }
  598. static enum match_result match_chain(struct callchain_cursor_node *node,
  599. struct callchain_list *cnode)
  600. {
  601. enum match_result match = MATCH_ERROR;
  602. switch (callchain_param.key) {
  603. case CCKEY_SRCLINE:
  604. match = match_chain_strings(cnode->srcline, node->srcline);
  605. if (match != MATCH_ERROR)
  606. break;
  607. /* otherwise fall-back to symbol-based comparison below */
  608. __fallthrough;
  609. case CCKEY_FUNCTION:
  610. if (node->sym && cnode->ms.sym) {
  611. /*
  612. * Compare inlined frames based on their symbol name
  613. * because different inlined frames will have the same
  614. * symbol start. Otherwise do a faster comparison based
  615. * on the symbol start address.
  616. */
  617. if (cnode->ms.sym->inlined || node->sym->inlined) {
  618. match = match_chain_strings(cnode->ms.sym->name,
  619. node->sym->name);
  620. if (match != MATCH_ERROR)
  621. break;
  622. } else {
  623. match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start,
  624. node->map, node->sym->start);
  625. break;
  626. }
  627. }
  628. /* otherwise fall-back to IP-based comparison below */
  629. __fallthrough;
  630. case CCKEY_ADDRESS:
  631. default:
  632. match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->map, node->ip);
  633. break;
  634. }
  635. if (match == MATCH_EQ && node->branch) {
  636. cnode->branch_count++;
  637. if (node->branch_from) {
  638. /*
  639. * It's "to" of a branch
  640. */
  641. cnode->brtype_stat.branch_to = true;
  642. if (node->branch_flags.predicted)
  643. cnode->predicted_count++;
  644. if (node->branch_flags.abort)
  645. cnode->abort_count++;
  646. branch_type_count(&cnode->brtype_stat,
  647. &node->branch_flags,
  648. node->branch_from,
  649. node->ip);
  650. } else {
  651. /*
  652. * It's "from" of a branch
  653. */
  654. cnode->brtype_stat.branch_to = false;
  655. cnode->cycles_count += node->branch_flags.cycles;
  656. cnode->iter_count += node->nr_loop_iter;
  657. cnode->iter_cycles += node->iter_cycles;
  658. }
  659. }
  660. return match;
  661. }
  662. /*
  663. * Split the parent in two parts (a new child is created) and
  664. * give a part of its callchain to the created child.
  665. * Then create another child to host the given callchain of new branch
  666. */
  667. static int
  668. split_add_child(struct callchain_node *parent,
  669. struct callchain_cursor *cursor,
  670. struct callchain_list *to_split,
  671. u64 idx_parents, u64 idx_local, u64 period)
  672. {
  673. struct callchain_node *new;
  674. struct list_head *old_tail;
  675. unsigned int idx_total = idx_parents + idx_local;
  676. /* split */
  677. new = create_child(parent, true);
  678. if (new == NULL)
  679. return -1;
  680. /* split the callchain and move a part to the new child */
  681. old_tail = parent->val.prev;
  682. list_del_range(&to_split->list, old_tail);
  683. new->val.next = &to_split->list;
  684. new->val.prev = old_tail;
  685. to_split->list.prev = &new->val;
  686. old_tail->next = &new->val;
  687. /* split the hits */
  688. new->hit = parent->hit;
  689. new->children_hit = parent->children_hit;
  690. parent->children_hit = callchain_cumul_hits(new);
  691. new->val_nr = parent->val_nr - idx_local;
  692. parent->val_nr = idx_local;
  693. new->count = parent->count;
  694. new->children_count = parent->children_count;
  695. parent->children_count = callchain_cumul_counts(new);
  696. /* create a new child for the new branch if any */
  697. if (idx_total < cursor->nr) {
  698. struct callchain_node *first;
  699. struct callchain_list *cnode;
  700. struct callchain_cursor_node *node;
  701. struct rb_node *p, **pp;
  702. parent->hit = 0;
  703. parent->children_hit += period;
  704. parent->count = 0;
  705. parent->children_count += 1;
  706. node = callchain_cursor_current(cursor);
  707. new = add_child(parent, cursor, period);
  708. if (new == NULL)
  709. return -1;
  710. /*
  711. * This is second child since we moved parent's children
  712. * to new (first) child above.
  713. */
  714. p = parent->rb_root_in.rb_node;
  715. first = rb_entry(p, struct callchain_node, rb_node_in);
  716. cnode = list_first_entry(&first->val, struct callchain_list,
  717. list);
  718. if (match_chain(node, cnode) == MATCH_LT)
  719. pp = &p->rb_left;
  720. else
  721. pp = &p->rb_right;
  722. rb_link_node(&new->rb_node_in, p, pp);
  723. rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
  724. } else {
  725. parent->hit = period;
  726. parent->count = 1;
  727. }
  728. return 0;
  729. }
  730. static enum match_result
  731. append_chain(struct callchain_node *root,
  732. struct callchain_cursor *cursor,
  733. u64 period);
  734. static int
  735. append_chain_children(struct callchain_node *root,
  736. struct callchain_cursor *cursor,
  737. u64 period)
  738. {
  739. struct callchain_node *rnode;
  740. struct callchain_cursor_node *node;
  741. struct rb_node **p = &root->rb_root_in.rb_node;
  742. struct rb_node *parent = NULL;
  743. node = callchain_cursor_current(cursor);
  744. if (!node)
  745. return -1;
  746. /* lookup in childrens */
  747. while (*p) {
  748. enum match_result ret;
  749. parent = *p;
  750. rnode = rb_entry(parent, struct callchain_node, rb_node_in);
  751. /* If at least first entry matches, rely to children */
  752. ret = append_chain(rnode, cursor, period);
  753. if (ret == MATCH_EQ)
  754. goto inc_children_hit;
  755. if (ret == MATCH_ERROR)
  756. return -1;
  757. if (ret == MATCH_LT)
  758. p = &parent->rb_left;
  759. else
  760. p = &parent->rb_right;
  761. }
  762. /* nothing in children, add to the current node */
  763. rnode = add_child(root, cursor, period);
  764. if (rnode == NULL)
  765. return -1;
  766. rb_link_node(&rnode->rb_node_in, parent, p);
  767. rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
  768. inc_children_hit:
  769. root->children_hit += period;
  770. root->children_count++;
  771. return 0;
  772. }
  773. static enum match_result
  774. append_chain(struct callchain_node *root,
  775. struct callchain_cursor *cursor,
  776. u64 period)
  777. {
  778. struct callchain_list *cnode;
  779. u64 start = cursor->pos;
  780. bool found = false;
  781. u64 matches;
  782. enum match_result cmp = MATCH_ERROR;
  783. /*
  784. * Lookup in the current node
  785. * If we have a symbol, then compare the start to match
  786. * anywhere inside a function, unless function
  787. * mode is disabled.
  788. */
  789. list_for_each_entry(cnode, &root->val, list) {
  790. struct callchain_cursor_node *node;
  791. node = callchain_cursor_current(cursor);
  792. if (!node)
  793. break;
  794. cmp = match_chain(node, cnode);
  795. if (cmp != MATCH_EQ)
  796. break;
  797. found = true;
  798. callchain_cursor_advance(cursor);
  799. }
  800. /* matches not, relay no the parent */
  801. if (!found) {
  802. WARN_ONCE(cmp == MATCH_ERROR, "Chain comparison error\n");
  803. return cmp;
  804. }
  805. matches = cursor->pos - start;
  806. /* we match only a part of the node. Split it and add the new chain */
  807. if (matches < root->val_nr) {
  808. if (split_add_child(root, cursor, cnode, start, matches,
  809. period) < 0)
  810. return MATCH_ERROR;
  811. return MATCH_EQ;
  812. }
  813. /* we match 100% of the path, increment the hit */
  814. if (matches == root->val_nr && cursor->pos == cursor->nr) {
  815. root->hit += period;
  816. root->count++;
  817. return MATCH_EQ;
  818. }
  819. /* We match the node and still have a part remaining */
  820. if (append_chain_children(root, cursor, period) < 0)
  821. return MATCH_ERROR;
  822. return MATCH_EQ;
  823. }
  824. int callchain_append(struct callchain_root *root,
  825. struct callchain_cursor *cursor,
  826. u64 period)
  827. {
  828. if (!cursor->nr)
  829. return 0;
  830. callchain_cursor_commit(cursor);
  831. if (append_chain_children(&root->node, cursor, period) < 0)
  832. return -1;
  833. if (cursor->nr > root->max_depth)
  834. root->max_depth = cursor->nr;
  835. return 0;
  836. }
  837. static int
  838. merge_chain_branch(struct callchain_cursor *cursor,
  839. struct callchain_node *dst, struct callchain_node *src)
  840. {
  841. struct callchain_cursor_node **old_last = cursor->last;
  842. struct callchain_node *child;
  843. struct callchain_list *list, *next_list;
  844. struct rb_node *n;
  845. int old_pos = cursor->nr;
  846. int err = 0;
  847. list_for_each_entry_safe(list, next_list, &src->val, list) {
  848. callchain_cursor_append(cursor, list->ip,
  849. list->ms.map, list->ms.sym,
  850. false, NULL, 0, 0, 0, list->srcline);
  851. list_del(&list->list);
  852. map__zput(list->ms.map);
  853. free(list);
  854. }
  855. if (src->hit) {
  856. callchain_cursor_commit(cursor);
  857. if (append_chain_children(dst, cursor, src->hit) < 0)
  858. return -1;
  859. }
  860. n = rb_first(&src->rb_root_in);
  861. while (n) {
  862. child = container_of(n, struct callchain_node, rb_node_in);
  863. n = rb_next(n);
  864. rb_erase(&child->rb_node_in, &src->rb_root_in);
  865. err = merge_chain_branch(cursor, dst, child);
  866. if (err)
  867. break;
  868. free(child);
  869. }
  870. cursor->nr = old_pos;
  871. cursor->last = old_last;
  872. return err;
  873. }
  874. int callchain_merge(struct callchain_cursor *cursor,
  875. struct callchain_root *dst, struct callchain_root *src)
  876. {
  877. return merge_chain_branch(cursor, &dst->node, &src->node);
  878. }
  879. int callchain_cursor_append(struct callchain_cursor *cursor,
  880. u64 ip, struct map *map, struct symbol *sym,
  881. bool branch, struct branch_flags *flags,
  882. int nr_loop_iter, u64 iter_cycles, u64 branch_from,
  883. const char *srcline)
  884. {
  885. struct callchain_cursor_node *node = *cursor->last;
  886. if (!node) {
  887. node = calloc(1, sizeof(*node));
  888. if (!node)
  889. return -ENOMEM;
  890. *cursor->last = node;
  891. }
  892. node->ip = ip;
  893. map__zput(node->map);
  894. node->map = map__get(map);
  895. node->sym = sym;
  896. node->branch = branch;
  897. node->nr_loop_iter = nr_loop_iter;
  898. node->iter_cycles = iter_cycles;
  899. node->srcline = srcline;
  900. if (flags)
  901. memcpy(&node->branch_flags, flags,
  902. sizeof(struct branch_flags));
  903. node->branch_from = branch_from;
  904. cursor->nr++;
  905. cursor->last = &node->next;
  906. return 0;
  907. }
  908. int sample__resolve_callchain(struct perf_sample *sample,
  909. struct callchain_cursor *cursor, struct symbol **parent,
  910. struct perf_evsel *evsel, struct addr_location *al,
  911. int max_stack)
  912. {
  913. if (sample->callchain == NULL && !symbol_conf.show_branchflag_count)
  914. return 0;
  915. if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
  916. perf_hpp_list.parent || symbol_conf.show_branchflag_count) {
  917. return thread__resolve_callchain(al->thread, cursor, evsel, sample,
  918. parent, al, max_stack);
  919. }
  920. return 0;
  921. }
  922. int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
  923. {
  924. if ((!symbol_conf.use_callchain || sample->callchain == NULL) &&
  925. !symbol_conf.show_branchflag_count)
  926. return 0;
  927. return callchain_append(he->callchain, &callchain_cursor, sample->period);
  928. }
  929. int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
  930. bool hide_unresolved)
  931. {
  932. al->map = node->map;
  933. al->sym = node->sym;
  934. al->srcline = node->srcline;
  935. al->addr = node->ip;
  936. if (al->sym == NULL) {
  937. if (hide_unresolved)
  938. return 0;
  939. if (al->map == NULL)
  940. goto out;
  941. }
  942. if (al->map->groups == &al->machine->kmaps) {
  943. if (machine__is_host(al->machine)) {
  944. al->cpumode = PERF_RECORD_MISC_KERNEL;
  945. al->level = 'k';
  946. } else {
  947. al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
  948. al->level = 'g';
  949. }
  950. } else {
  951. if (machine__is_host(al->machine)) {
  952. al->cpumode = PERF_RECORD_MISC_USER;
  953. al->level = '.';
  954. } else if (perf_guest) {
  955. al->cpumode = PERF_RECORD_MISC_GUEST_USER;
  956. al->level = 'u';
  957. } else {
  958. al->cpumode = PERF_RECORD_MISC_HYPERVISOR;
  959. al->level = 'H';
  960. }
  961. }
  962. out:
  963. return 1;
  964. }
  965. char *callchain_list__sym_name(struct callchain_list *cl,
  966. char *bf, size_t bfsize, bool show_dso)
  967. {
  968. bool show_addr = callchain_param.key == CCKEY_ADDRESS;
  969. bool show_srcline = show_addr || callchain_param.key == CCKEY_SRCLINE;
  970. int printed;
  971. if (cl->ms.sym) {
  972. const char *inlined = cl->ms.sym->inlined ? " (inlined)" : "";
  973. if (show_srcline && cl->srcline)
  974. printed = scnprintf(bf, bfsize, "%s %s%s",
  975. cl->ms.sym->name, cl->srcline,
  976. inlined);
  977. else
  978. printed = scnprintf(bf, bfsize, "%s%s",
  979. cl->ms.sym->name, inlined);
  980. } else
  981. printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
  982. if (show_dso)
  983. scnprintf(bf + printed, bfsize - printed, " %s",
  984. cl->ms.map ?
  985. cl->ms.map->dso->short_name :
  986. "unknown");
  987. return bf;
  988. }
  989. char *callchain_node__scnprintf_value(struct callchain_node *node,
  990. char *bf, size_t bfsize, u64 total)
  991. {
  992. double percent = 0.0;
  993. u64 period = callchain_cumul_hits(node);
  994. unsigned count = callchain_cumul_counts(node);
  995. if (callchain_param.mode == CHAIN_FOLDED) {
  996. period = node->hit;
  997. count = node->count;
  998. }
  999. switch (callchain_param.value) {
  1000. case CCVAL_PERIOD:
  1001. scnprintf(bf, bfsize, "%"PRIu64, period);
  1002. break;
  1003. case CCVAL_COUNT:
  1004. scnprintf(bf, bfsize, "%u", count);
  1005. break;
  1006. case CCVAL_PERCENT:
  1007. default:
  1008. if (total)
  1009. percent = period * 100.0 / total;
  1010. scnprintf(bf, bfsize, "%.2f%%", percent);
  1011. break;
  1012. }
  1013. return bf;
  1014. }
  1015. int callchain_node__fprintf_value(struct callchain_node *node,
  1016. FILE *fp, u64 total)
  1017. {
  1018. double percent = 0.0;
  1019. u64 period = callchain_cumul_hits(node);
  1020. unsigned count = callchain_cumul_counts(node);
  1021. if (callchain_param.mode == CHAIN_FOLDED) {
  1022. period = node->hit;
  1023. count = node->count;
  1024. }
  1025. switch (callchain_param.value) {
  1026. case CCVAL_PERIOD:
  1027. return fprintf(fp, "%"PRIu64, period);
  1028. case CCVAL_COUNT:
  1029. return fprintf(fp, "%u", count);
  1030. case CCVAL_PERCENT:
  1031. default:
  1032. if (total)
  1033. percent = period * 100.0 / total;
  1034. return percent_color_fprintf(fp, "%.2f%%", percent);
  1035. }
  1036. return 0;
  1037. }
  1038. static void callchain_counts_value(struct callchain_node *node,
  1039. u64 *branch_count, u64 *predicted_count,
  1040. u64 *abort_count, u64 *cycles_count)
  1041. {
  1042. struct callchain_list *clist;
  1043. list_for_each_entry(clist, &node->val, list) {
  1044. if (branch_count)
  1045. *branch_count += clist->branch_count;
  1046. if (predicted_count)
  1047. *predicted_count += clist->predicted_count;
  1048. if (abort_count)
  1049. *abort_count += clist->abort_count;
  1050. if (cycles_count)
  1051. *cycles_count += clist->cycles_count;
  1052. }
  1053. }
  1054. static int callchain_node_branch_counts_cumul(struct callchain_node *node,
  1055. u64 *branch_count,
  1056. u64 *predicted_count,
  1057. u64 *abort_count,
  1058. u64 *cycles_count)
  1059. {
  1060. struct callchain_node *child;
  1061. struct rb_node *n;
  1062. n = rb_first(&node->rb_root_in);
  1063. while (n) {
  1064. child = rb_entry(n, struct callchain_node, rb_node_in);
  1065. n = rb_next(n);
  1066. callchain_node_branch_counts_cumul(child, branch_count,
  1067. predicted_count,
  1068. abort_count,
  1069. cycles_count);
  1070. callchain_counts_value(child, branch_count,
  1071. predicted_count, abort_count,
  1072. cycles_count);
  1073. }
  1074. return 0;
  1075. }
  1076. int callchain_branch_counts(struct callchain_root *root,
  1077. u64 *branch_count, u64 *predicted_count,
  1078. u64 *abort_count, u64 *cycles_count)
  1079. {
  1080. if (branch_count)
  1081. *branch_count = 0;
  1082. if (predicted_count)
  1083. *predicted_count = 0;
  1084. if (abort_count)
  1085. *abort_count = 0;
  1086. if (cycles_count)
  1087. *cycles_count = 0;
  1088. return callchain_node_branch_counts_cumul(&root->node,
  1089. branch_count,
  1090. predicted_count,
  1091. abort_count,
  1092. cycles_count);
  1093. }
  1094. static int count_pri64_printf(int idx, const char *str, u64 value, char *bf, int bfsize)
  1095. {
  1096. int printed;
  1097. printed = scnprintf(bf, bfsize, "%s%s:%" PRId64 "", (idx) ? " " : " (", str, value);
  1098. return printed;
  1099. }
  1100. static int count_float_printf(int idx, const char *str, float value,
  1101. char *bf, int bfsize, float threshold)
  1102. {
  1103. int printed;
  1104. if (threshold != 0.0 && value < threshold)
  1105. return 0;
  1106. printed = scnprintf(bf, bfsize, "%s%s:%.1f%%", (idx) ? " " : " (", str, value);
  1107. return printed;
  1108. }
  1109. static int branch_to_str(char *bf, int bfsize,
  1110. u64 branch_count, u64 predicted_count,
  1111. u64 abort_count,
  1112. struct branch_type_stat *brtype_stat)
  1113. {
  1114. int printed, i = 0;
  1115. printed = branch_type_str(brtype_stat, bf, bfsize);
  1116. if (printed)
  1117. i++;
  1118. if (predicted_count < branch_count) {
  1119. printed += count_float_printf(i++, "predicted",
  1120. predicted_count * 100.0 / branch_count,
  1121. bf + printed, bfsize - printed, 0.0);
  1122. }
  1123. if (abort_count) {
  1124. printed += count_float_printf(i++, "abort",
  1125. abort_count * 100.0 / branch_count,
  1126. bf + printed, bfsize - printed, 0.1);
  1127. }
  1128. if (i)
  1129. printed += scnprintf(bf + printed, bfsize - printed, ")");
  1130. return printed;
  1131. }
  1132. static int branch_from_str(char *bf, int bfsize,
  1133. u64 branch_count,
  1134. u64 cycles_count, u64 iter_count,
  1135. u64 iter_cycles)
  1136. {
  1137. int printed = 0, i = 0;
  1138. u64 cycles;
  1139. cycles = cycles_count / branch_count;
  1140. if (cycles) {
  1141. printed += count_pri64_printf(i++, "cycles",
  1142. cycles,
  1143. bf + printed, bfsize - printed);
  1144. }
  1145. if (iter_count) {
  1146. printed += count_pri64_printf(i++, "iter",
  1147. iter_count,
  1148. bf + printed, bfsize - printed);
  1149. printed += count_pri64_printf(i++, "avg_cycles",
  1150. iter_cycles / iter_count,
  1151. bf + printed, bfsize - printed);
  1152. }
  1153. if (i)
  1154. printed += scnprintf(bf + printed, bfsize - printed, ")");
  1155. return printed;
  1156. }
  1157. static int counts_str_build(char *bf, int bfsize,
  1158. u64 branch_count, u64 predicted_count,
  1159. u64 abort_count, u64 cycles_count,
  1160. u64 iter_count, u64 iter_cycles,
  1161. struct branch_type_stat *brtype_stat)
  1162. {
  1163. int printed;
  1164. if (branch_count == 0)
  1165. return scnprintf(bf, bfsize, " (calltrace)");
  1166. if (brtype_stat->branch_to) {
  1167. printed = branch_to_str(bf, bfsize, branch_count,
  1168. predicted_count, abort_count, brtype_stat);
  1169. } else {
  1170. printed = branch_from_str(bf, bfsize, branch_count,
  1171. cycles_count, iter_count, iter_cycles);
  1172. }
  1173. if (!printed)
  1174. bf[0] = 0;
  1175. return printed;
  1176. }
  1177. static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
  1178. u64 branch_count, u64 predicted_count,
  1179. u64 abort_count, u64 cycles_count,
  1180. u64 iter_count, u64 iter_cycles,
  1181. struct branch_type_stat *brtype_stat)
  1182. {
  1183. char str[256];
  1184. counts_str_build(str, sizeof(str), branch_count,
  1185. predicted_count, abort_count, cycles_count,
  1186. iter_count, iter_cycles, brtype_stat);
  1187. if (fp)
  1188. return fprintf(fp, "%s", str);
  1189. return scnprintf(bf, bfsize, "%s", str);
  1190. }
  1191. int callchain_list_counts__printf_value(struct callchain_list *clist,
  1192. FILE *fp, char *bf, int bfsize)
  1193. {
  1194. u64 branch_count, predicted_count;
  1195. u64 abort_count, cycles_count;
  1196. u64 iter_count, iter_cycles;
  1197. branch_count = clist->branch_count;
  1198. predicted_count = clist->predicted_count;
  1199. abort_count = clist->abort_count;
  1200. cycles_count = clist->cycles_count;
  1201. iter_count = clist->iter_count;
  1202. iter_cycles = clist->iter_cycles;
  1203. return callchain_counts_printf(fp, bf, bfsize, branch_count,
  1204. predicted_count, abort_count,
  1205. cycles_count, iter_count, iter_cycles,
  1206. &clist->brtype_stat);
  1207. }
  1208. static void free_callchain_node(struct callchain_node *node)
  1209. {
  1210. struct callchain_list *list, *tmp;
  1211. struct callchain_node *child;
  1212. struct rb_node *n;
  1213. list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
  1214. list_del(&list->list);
  1215. map__zput(list->ms.map);
  1216. free(list);
  1217. }
  1218. list_for_each_entry_safe(list, tmp, &node->val, list) {
  1219. list_del(&list->list);
  1220. map__zput(list->ms.map);
  1221. free(list);
  1222. }
  1223. n = rb_first(&node->rb_root_in);
  1224. while (n) {
  1225. child = container_of(n, struct callchain_node, rb_node_in);
  1226. n = rb_next(n);
  1227. rb_erase(&child->rb_node_in, &node->rb_root_in);
  1228. free_callchain_node(child);
  1229. free(child);
  1230. }
  1231. }
  1232. void free_callchain(struct callchain_root *root)
  1233. {
  1234. if (!symbol_conf.use_callchain)
  1235. return;
  1236. free_callchain_node(&root->node);
  1237. }
  1238. static u64 decay_callchain_node(struct callchain_node *node)
  1239. {
  1240. struct callchain_node *child;
  1241. struct rb_node *n;
  1242. u64 child_hits = 0;
  1243. n = rb_first(&node->rb_root_in);
  1244. while (n) {
  1245. child = container_of(n, struct callchain_node, rb_node_in);
  1246. child_hits += decay_callchain_node(child);
  1247. n = rb_next(n);
  1248. }
  1249. node->hit = (node->hit * 7) / 8;
  1250. node->children_hit = child_hits;
  1251. return node->hit;
  1252. }
  1253. void decay_callchain(struct callchain_root *root)
  1254. {
  1255. if (!symbol_conf.use_callchain)
  1256. return;
  1257. decay_callchain_node(&root->node);
  1258. }
  1259. int callchain_node__make_parent_list(struct callchain_node *node)
  1260. {
  1261. struct callchain_node *parent = node->parent;
  1262. struct callchain_list *chain, *new;
  1263. LIST_HEAD(head);
  1264. while (parent) {
  1265. list_for_each_entry_reverse(chain, &parent->val, list) {
  1266. new = malloc(sizeof(*new));
  1267. if (new == NULL)
  1268. goto out;
  1269. *new = *chain;
  1270. new->has_children = false;
  1271. map__get(new->ms.map);
  1272. list_add_tail(&new->list, &head);
  1273. }
  1274. parent = parent->parent;
  1275. }
  1276. list_for_each_entry_safe_reverse(chain, new, &head, list)
  1277. list_move_tail(&chain->list, &node->parent_val);
  1278. if (!list_empty(&node->parent_val)) {
  1279. chain = list_first_entry(&node->parent_val, struct callchain_list, list);
  1280. chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
  1281. chain = list_first_entry(&node->val, struct callchain_list, list);
  1282. chain->has_children = false;
  1283. }
  1284. return 0;
  1285. out:
  1286. list_for_each_entry_safe(chain, new, &head, list) {
  1287. list_del(&chain->list);
  1288. map__zput(chain->ms.map);
  1289. free(chain);
  1290. }
  1291. return -ENOMEM;
  1292. }
  1293. int callchain_cursor__copy(struct callchain_cursor *dst,
  1294. struct callchain_cursor *src)
  1295. {
  1296. int rc = 0;
  1297. callchain_cursor_reset(dst);
  1298. callchain_cursor_commit(src);
  1299. while (true) {
  1300. struct callchain_cursor_node *node;
  1301. node = callchain_cursor_current(src);
  1302. if (node == NULL)
  1303. break;
  1304. rc = callchain_cursor_append(dst, node->ip, node->map, node->sym,
  1305. node->branch, &node->branch_flags,
  1306. node->nr_loop_iter,
  1307. node->iter_cycles,
  1308. node->branch_from, node->srcline);
  1309. if (rc)
  1310. break;
  1311. callchain_cursor_advance(src);
  1312. }
  1313. return rc;
  1314. }