thread-stack.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747
  1. /*
  2. * thread-stack.c: Synthesize a thread's stack using call / return events
  3. * Copyright (c) 2014, Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. */
  15. #include <linux/rbtree.h>
  16. #include <linux/list.h>
  17. #include "thread.h"
  18. #include "event.h"
  19. #include "machine.h"
  20. #include "util.h"
  21. #include "debug.h"
  22. #include "symbol.h"
  23. #include "comm.h"
  24. #include "thread-stack.h"
  25. #define CALL_PATH_BLOCK_SHIFT 8
  26. #define CALL_PATH_BLOCK_SIZE (1 << CALL_PATH_BLOCK_SHIFT)
  27. #define CALL_PATH_BLOCK_MASK (CALL_PATH_BLOCK_SIZE - 1)
  28. struct call_path_block {
  29. struct call_path cp[CALL_PATH_BLOCK_SIZE];
  30. struct list_head node;
  31. };
  32. /**
  33. * struct call_path_root - root of all call paths.
  34. * @call_path: root call path
  35. * @blocks: list of blocks to store call paths
  36. * @next: next free space
  37. * @sz: number of spaces
  38. */
  39. struct call_path_root {
  40. struct call_path call_path;
  41. struct list_head blocks;
  42. size_t next;
  43. size_t sz;
  44. };
  45. /**
  46. * struct call_return_processor - provides a call-back to consume call-return
  47. * information.
  48. * @cpr: call path root
  49. * @process: call-back that accepts call/return information
  50. * @data: anonymous data for call-back
  51. */
  52. struct call_return_processor {
  53. struct call_path_root *cpr;
  54. int (*process)(struct call_return *cr, void *data);
  55. void *data;
  56. };
  57. #define STACK_GROWTH 2048
  58. /**
  59. * struct thread_stack_entry - thread stack entry.
  60. * @ret_addr: return address
  61. * @timestamp: timestamp (if known)
  62. * @ref: external reference (e.g. db_id of sample)
  63. * @branch_count: the branch count when the entry was created
  64. * @cp: call path
  65. * @no_call: a 'call' was not seen
  66. */
  67. struct thread_stack_entry {
  68. u64 ret_addr;
  69. u64 timestamp;
  70. u64 ref;
  71. u64 branch_count;
  72. struct call_path *cp;
  73. bool no_call;
  74. };
  75. /**
  76. * struct thread_stack - thread stack constructed from 'call' and 'return'
  77. * branch samples.
  78. * @stack: array that holds the stack
  79. * @cnt: number of entries in the stack
  80. * @sz: current maximum stack size
  81. * @trace_nr: current trace number
  82. * @branch_count: running branch count
  83. * @kernel_start: kernel start address
  84. * @last_time: last timestamp
  85. * @crp: call/return processor
  86. * @comm: current comm
  87. */
  88. struct thread_stack {
  89. struct thread_stack_entry *stack;
  90. size_t cnt;
  91. size_t sz;
  92. u64 trace_nr;
  93. u64 branch_count;
  94. u64 kernel_start;
  95. u64 last_time;
  96. struct call_return_processor *crp;
  97. struct comm *comm;
  98. };
  99. static int thread_stack__grow(struct thread_stack *ts)
  100. {
  101. struct thread_stack_entry *new_stack;
  102. size_t sz, new_sz;
  103. new_sz = ts->sz + STACK_GROWTH;
  104. sz = new_sz * sizeof(struct thread_stack_entry);
  105. new_stack = realloc(ts->stack, sz);
  106. if (!new_stack)
  107. return -ENOMEM;
  108. ts->stack = new_stack;
  109. ts->sz = new_sz;
  110. return 0;
  111. }
  112. static struct thread_stack *thread_stack__new(struct thread *thread,
  113. struct call_return_processor *crp)
  114. {
  115. struct thread_stack *ts;
  116. ts = zalloc(sizeof(struct thread_stack));
  117. if (!ts)
  118. return NULL;
  119. if (thread_stack__grow(ts)) {
  120. free(ts);
  121. return NULL;
  122. }
  123. if (thread->mg && thread->mg->machine)
  124. ts->kernel_start = machine__kernel_start(thread->mg->machine);
  125. else
  126. ts->kernel_start = 1ULL << 63;
  127. ts->crp = crp;
  128. return ts;
  129. }
  130. static int thread_stack__push(struct thread_stack *ts, u64 ret_addr)
  131. {
  132. int err = 0;
  133. if (ts->cnt == ts->sz) {
  134. err = thread_stack__grow(ts);
  135. if (err) {
  136. pr_warning("Out of memory: discarding thread stack\n");
  137. ts->cnt = 0;
  138. }
  139. }
  140. ts->stack[ts->cnt++].ret_addr = ret_addr;
  141. return err;
  142. }
  143. static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
  144. {
  145. size_t i;
  146. /*
  147. * In some cases there may be functions which are not seen to return.
  148. * For example when setjmp / longjmp has been used. Or the perf context
  149. * switch in the kernel which doesn't stop and start tracing in exactly
  150. * the same code path. When that happens the return address will be
  151. * further down the stack. If the return address is not found at all,
  152. * we assume the opposite (i.e. this is a return for a call that wasn't
  153. * seen for some reason) and leave the stack alone.
  154. */
  155. for (i = ts->cnt; i; ) {
  156. if (ts->stack[--i].ret_addr == ret_addr) {
  157. ts->cnt = i;
  158. return;
  159. }
  160. }
  161. }
  162. static bool thread_stack__in_kernel(struct thread_stack *ts)
  163. {
  164. if (!ts->cnt)
  165. return false;
  166. return ts->stack[ts->cnt - 1].cp->in_kernel;
  167. }
  168. static int thread_stack__call_return(struct thread *thread,
  169. struct thread_stack *ts, size_t idx,
  170. u64 timestamp, u64 ref, bool no_return)
  171. {
  172. struct call_return_processor *crp = ts->crp;
  173. struct thread_stack_entry *tse;
  174. struct call_return cr = {
  175. .thread = thread,
  176. .comm = ts->comm,
  177. .db_id = 0,
  178. };
  179. tse = &ts->stack[idx];
  180. cr.cp = tse->cp;
  181. cr.call_time = tse->timestamp;
  182. cr.return_time = timestamp;
  183. cr.branch_count = ts->branch_count - tse->branch_count;
  184. cr.call_ref = tse->ref;
  185. cr.return_ref = ref;
  186. if (tse->no_call)
  187. cr.flags |= CALL_RETURN_NO_CALL;
  188. if (no_return)
  189. cr.flags |= CALL_RETURN_NO_RETURN;
  190. return crp->process(&cr, crp->data);
  191. }
  192. static int thread_stack__flush(struct thread *thread, struct thread_stack *ts)
  193. {
  194. struct call_return_processor *crp = ts->crp;
  195. int err;
  196. if (!crp) {
  197. ts->cnt = 0;
  198. return 0;
  199. }
  200. while (ts->cnt) {
  201. err = thread_stack__call_return(thread, ts, --ts->cnt,
  202. ts->last_time, 0, true);
  203. if (err) {
  204. pr_err("Error flushing thread stack!\n");
  205. ts->cnt = 0;
  206. return err;
  207. }
  208. }
  209. return 0;
  210. }
  211. int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
  212. u64 to_ip, u16 insn_len, u64 trace_nr)
  213. {
  214. if (!thread)
  215. return -EINVAL;
  216. if (!thread->ts) {
  217. thread->ts = thread_stack__new(thread, NULL);
  218. if (!thread->ts) {
  219. pr_warning("Out of memory: no thread stack\n");
  220. return -ENOMEM;
  221. }
  222. thread->ts->trace_nr = trace_nr;
  223. }
  224. /*
  225. * When the trace is discontinuous, the trace_nr changes. In that case
  226. * the stack might be completely invalid. Better to report nothing than
  227. * to report something misleading, so flush the stack.
  228. */
  229. if (trace_nr != thread->ts->trace_nr) {
  230. if (thread->ts->trace_nr)
  231. thread_stack__flush(thread, thread->ts);
  232. thread->ts->trace_nr = trace_nr;
  233. }
  234. /* Stop here if thread_stack__process() is in use */
  235. if (thread->ts->crp)
  236. return 0;
  237. if (flags & PERF_IP_FLAG_CALL) {
  238. u64 ret_addr;
  239. if (!to_ip)
  240. return 0;
  241. ret_addr = from_ip + insn_len;
  242. if (ret_addr == to_ip)
  243. return 0; /* Zero-length calls are excluded */
  244. return thread_stack__push(thread->ts, ret_addr);
  245. } else if (flags & PERF_IP_FLAG_RETURN) {
  246. if (!from_ip)
  247. return 0;
  248. thread_stack__pop(thread->ts, to_ip);
  249. }
  250. return 0;
  251. }
  252. void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
  253. {
  254. if (!thread || !thread->ts)
  255. return;
  256. if (trace_nr != thread->ts->trace_nr) {
  257. if (thread->ts->trace_nr)
  258. thread_stack__flush(thread, thread->ts);
  259. thread->ts->trace_nr = trace_nr;
  260. }
  261. }
  262. void thread_stack__free(struct thread *thread)
  263. {
  264. if (thread->ts) {
  265. thread_stack__flush(thread, thread->ts);
  266. zfree(&thread->ts->stack);
  267. zfree(&thread->ts);
  268. }
  269. }
  270. void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
  271. size_t sz, u64 ip)
  272. {
  273. size_t i;
  274. if (!thread || !thread->ts)
  275. chain->nr = 1;
  276. else
  277. chain->nr = min(sz, thread->ts->cnt + 1);
  278. chain->ips[0] = ip;
  279. for (i = 1; i < chain->nr; i++)
  280. chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr;
  281. }
  282. static void call_path__init(struct call_path *cp, struct call_path *parent,
  283. struct symbol *sym, u64 ip, bool in_kernel)
  284. {
  285. cp->parent = parent;
  286. cp->sym = sym;
  287. cp->ip = sym ? 0 : ip;
  288. cp->db_id = 0;
  289. cp->in_kernel = in_kernel;
  290. RB_CLEAR_NODE(&cp->rb_node);
  291. cp->children = RB_ROOT;
  292. }
  293. static struct call_path_root *call_path_root__new(void)
  294. {
  295. struct call_path_root *cpr;
  296. cpr = zalloc(sizeof(struct call_path_root));
  297. if (!cpr)
  298. return NULL;
  299. call_path__init(&cpr->call_path, NULL, NULL, 0, false);
  300. INIT_LIST_HEAD(&cpr->blocks);
  301. return cpr;
  302. }
  303. static void call_path_root__free(struct call_path_root *cpr)
  304. {
  305. struct call_path_block *pos, *n;
  306. list_for_each_entry_safe(pos, n, &cpr->blocks, node) {
  307. list_del(&pos->node);
  308. free(pos);
  309. }
  310. free(cpr);
  311. }
  312. static struct call_path *call_path__new(struct call_path_root *cpr,
  313. struct call_path *parent,
  314. struct symbol *sym, u64 ip,
  315. bool in_kernel)
  316. {
  317. struct call_path_block *cpb;
  318. struct call_path *cp;
  319. size_t n;
  320. if (cpr->next < cpr->sz) {
  321. cpb = list_last_entry(&cpr->blocks, struct call_path_block,
  322. node);
  323. } else {
  324. cpb = zalloc(sizeof(struct call_path_block));
  325. if (!cpb)
  326. return NULL;
  327. list_add_tail(&cpb->node, &cpr->blocks);
  328. cpr->sz += CALL_PATH_BLOCK_SIZE;
  329. }
  330. n = cpr->next++ & CALL_PATH_BLOCK_MASK;
  331. cp = &cpb->cp[n];
  332. call_path__init(cp, parent, sym, ip, in_kernel);
  333. return cp;
  334. }
  335. static struct call_path *call_path__findnew(struct call_path_root *cpr,
  336. struct call_path *parent,
  337. struct symbol *sym, u64 ip, u64 ks)
  338. {
  339. struct rb_node **p;
  340. struct rb_node *node_parent = NULL;
  341. struct call_path *cp;
  342. bool in_kernel = ip >= ks;
  343. if (sym)
  344. ip = 0;
  345. if (!parent)
  346. return call_path__new(cpr, parent, sym, ip, in_kernel);
  347. p = &parent->children.rb_node;
  348. while (*p != NULL) {
  349. node_parent = *p;
  350. cp = rb_entry(node_parent, struct call_path, rb_node);
  351. if (cp->sym == sym && cp->ip == ip)
  352. return cp;
  353. if (sym < cp->sym || (sym == cp->sym && ip < cp->ip))
  354. p = &(*p)->rb_left;
  355. else
  356. p = &(*p)->rb_right;
  357. }
  358. cp = call_path__new(cpr, parent, sym, ip, in_kernel);
  359. if (!cp)
  360. return NULL;
  361. rb_link_node(&cp->rb_node, node_parent, p);
  362. rb_insert_color(&cp->rb_node, &parent->children);
  363. return cp;
  364. }
  365. struct call_return_processor *
  366. call_return_processor__new(int (*process)(struct call_return *cr, void *data),
  367. void *data)
  368. {
  369. struct call_return_processor *crp;
  370. crp = zalloc(sizeof(struct call_return_processor));
  371. if (!crp)
  372. return NULL;
  373. crp->cpr = call_path_root__new();
  374. if (!crp->cpr)
  375. goto out_free;
  376. crp->process = process;
  377. crp->data = data;
  378. return crp;
  379. out_free:
  380. free(crp);
  381. return NULL;
  382. }
  383. void call_return_processor__free(struct call_return_processor *crp)
  384. {
  385. if (crp) {
  386. call_path_root__free(crp->cpr);
  387. free(crp);
  388. }
  389. }
  390. static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
  391. u64 timestamp, u64 ref, struct call_path *cp,
  392. bool no_call)
  393. {
  394. struct thread_stack_entry *tse;
  395. int err;
  396. if (ts->cnt == ts->sz) {
  397. err = thread_stack__grow(ts);
  398. if (err)
  399. return err;
  400. }
  401. tse = &ts->stack[ts->cnt++];
  402. tse->ret_addr = ret_addr;
  403. tse->timestamp = timestamp;
  404. tse->ref = ref;
  405. tse->branch_count = ts->branch_count;
  406. tse->cp = cp;
  407. tse->no_call = no_call;
  408. return 0;
  409. }
  410. static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
  411. u64 ret_addr, u64 timestamp, u64 ref,
  412. struct symbol *sym)
  413. {
  414. int err;
  415. if (!ts->cnt)
  416. return 1;
  417. if (ts->cnt == 1) {
  418. struct thread_stack_entry *tse = &ts->stack[0];
  419. if (tse->cp->sym == sym)
  420. return thread_stack__call_return(thread, ts, --ts->cnt,
  421. timestamp, ref, false);
  422. }
  423. if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) {
  424. return thread_stack__call_return(thread, ts, --ts->cnt,
  425. timestamp, ref, false);
  426. } else {
  427. size_t i = ts->cnt - 1;
  428. while (i--) {
  429. if (ts->stack[i].ret_addr != ret_addr)
  430. continue;
  431. i += 1;
  432. while (ts->cnt > i) {
  433. err = thread_stack__call_return(thread, ts,
  434. --ts->cnt,
  435. timestamp, ref,
  436. true);
  437. if (err)
  438. return err;
  439. }
  440. return thread_stack__call_return(thread, ts, --ts->cnt,
  441. timestamp, ref, false);
  442. }
  443. }
  444. return 1;
  445. }
  446. static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts,
  447. struct perf_sample *sample,
  448. struct addr_location *from_al,
  449. struct addr_location *to_al, u64 ref)
  450. {
  451. struct call_path_root *cpr = ts->crp->cpr;
  452. struct call_path *cp;
  453. struct symbol *sym;
  454. u64 ip;
  455. if (sample->ip) {
  456. ip = sample->ip;
  457. sym = from_al->sym;
  458. } else if (sample->addr) {
  459. ip = sample->addr;
  460. sym = to_al->sym;
  461. } else {
  462. return 0;
  463. }
  464. cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
  465. ts->kernel_start);
  466. if (!cp)
  467. return -ENOMEM;
  468. return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp,
  469. true);
  470. }
  471. static int thread_stack__no_call_return(struct thread *thread,
  472. struct thread_stack *ts,
  473. struct perf_sample *sample,
  474. struct addr_location *from_al,
  475. struct addr_location *to_al, u64 ref)
  476. {
  477. struct call_path_root *cpr = ts->crp->cpr;
  478. struct call_path *cp, *parent;
  479. u64 ks = ts->kernel_start;
  480. int err;
  481. if (sample->ip >= ks && sample->addr < ks) {
  482. /* Return to userspace, so pop all kernel addresses */
  483. while (thread_stack__in_kernel(ts)) {
  484. err = thread_stack__call_return(thread, ts, --ts->cnt,
  485. sample->time, ref,
  486. true);
  487. if (err)
  488. return err;
  489. }
  490. /* If the stack is empty, push the userspace address */
  491. if (!ts->cnt) {
  492. cp = call_path__findnew(cpr, &cpr->call_path,
  493. to_al->sym, sample->addr,
  494. ts->kernel_start);
  495. if (!cp)
  496. return -ENOMEM;
  497. return thread_stack__push_cp(ts, 0, sample->time, ref,
  498. cp, true);
  499. }
  500. } else if (thread_stack__in_kernel(ts) && sample->ip < ks) {
  501. /* Return to userspace, so pop all kernel addresses */
  502. while (thread_stack__in_kernel(ts)) {
  503. err = thread_stack__call_return(thread, ts, --ts->cnt,
  504. sample->time, ref,
  505. true);
  506. if (err)
  507. return err;
  508. }
  509. }
  510. if (ts->cnt)
  511. parent = ts->stack[ts->cnt - 1].cp;
  512. else
  513. parent = &cpr->call_path;
  514. /* This 'return' had no 'call', so push and pop top of stack */
  515. cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip,
  516. ts->kernel_start);
  517. if (!cp)
  518. return -ENOMEM;
  519. err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp,
  520. true);
  521. if (err)
  522. return err;
  523. return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref,
  524. to_al->sym);
  525. }
  526. static int thread_stack__trace_begin(struct thread *thread,
  527. struct thread_stack *ts, u64 timestamp,
  528. u64 ref)
  529. {
  530. struct thread_stack_entry *tse;
  531. int err;
  532. if (!ts->cnt)
  533. return 0;
  534. /* Pop trace end */
  535. tse = &ts->stack[ts->cnt - 1];
  536. if (tse->cp->sym == NULL && tse->cp->ip == 0) {
  537. err = thread_stack__call_return(thread, ts, --ts->cnt,
  538. timestamp, ref, false);
  539. if (err)
  540. return err;
  541. }
  542. return 0;
  543. }
  544. static int thread_stack__trace_end(struct thread_stack *ts,
  545. struct perf_sample *sample, u64 ref)
  546. {
  547. struct call_path_root *cpr = ts->crp->cpr;
  548. struct call_path *cp;
  549. u64 ret_addr;
  550. /* No point having 'trace end' on the bottom of the stack */
  551. if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref))
  552. return 0;
  553. cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
  554. ts->kernel_start);
  555. if (!cp)
  556. return -ENOMEM;
  557. ret_addr = sample->ip + sample->insn_len;
  558. return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
  559. false);
  560. }
  561. int thread_stack__process(struct thread *thread, struct comm *comm,
  562. struct perf_sample *sample,
  563. struct addr_location *from_al,
  564. struct addr_location *to_al, u64 ref,
  565. struct call_return_processor *crp)
  566. {
  567. struct thread_stack *ts = thread->ts;
  568. int err = 0;
  569. if (ts) {
  570. if (!ts->crp) {
  571. /* Supersede thread_stack__event() */
  572. thread_stack__free(thread);
  573. thread->ts = thread_stack__new(thread, crp);
  574. if (!thread->ts)
  575. return -ENOMEM;
  576. ts = thread->ts;
  577. ts->comm = comm;
  578. }
  579. } else {
  580. thread->ts = thread_stack__new(thread, crp);
  581. if (!thread->ts)
  582. return -ENOMEM;
  583. ts = thread->ts;
  584. ts->comm = comm;
  585. }
  586. /* Flush stack on exec */
  587. if (ts->comm != comm && thread->pid_ == thread->tid) {
  588. err = thread_stack__flush(thread, ts);
  589. if (err)
  590. return err;
  591. ts->comm = comm;
  592. }
  593. /* If the stack is empty, put the current symbol on the stack */
  594. if (!ts->cnt) {
  595. err = thread_stack__bottom(thread, ts, sample, from_al, to_al,
  596. ref);
  597. if (err)
  598. return err;
  599. }
  600. ts->branch_count += 1;
  601. ts->last_time = sample->time;
  602. if (sample->flags & PERF_IP_FLAG_CALL) {
  603. struct call_path_root *cpr = ts->crp->cpr;
  604. struct call_path *cp;
  605. u64 ret_addr;
  606. if (!sample->ip || !sample->addr)
  607. return 0;
  608. ret_addr = sample->ip + sample->insn_len;
  609. if (ret_addr == sample->addr)
  610. return 0; /* Zero-length calls are excluded */
  611. cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
  612. to_al->sym, sample->addr,
  613. ts->kernel_start);
  614. if (!cp)
  615. return -ENOMEM;
  616. err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
  617. cp, false);
  618. } else if (sample->flags & PERF_IP_FLAG_RETURN) {
  619. if (!sample->ip || !sample->addr)
  620. return 0;
  621. err = thread_stack__pop_cp(thread, ts, sample->addr,
  622. sample->time, ref, from_al->sym);
  623. if (err) {
  624. if (err < 0)
  625. return err;
  626. err = thread_stack__no_call_return(thread, ts, sample,
  627. from_al, to_al, ref);
  628. }
  629. } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) {
  630. err = thread_stack__trace_begin(thread, ts, sample->time, ref);
  631. } else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
  632. err = thread_stack__trace_end(ts, sample, ref);
  633. }
  634. return err;
  635. }