machine.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561
  1. #include "callchain.h"
  2. #include "debug.h"
  3. #include "event.h"
  4. #include "evsel.h"
  5. #include "hist.h"
  6. #include "machine.h"
  7. #include "map.h"
  8. #include "sort.h"
  9. #include "strlist.h"
  10. #include "thread.h"
  11. #include "vdso.h"
  12. #include <stdbool.h>
  13. #include <symbol/kallsyms.h>
  14. #include "unwind.h"
  15. int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
  16. {
  17. map_groups__init(&machine->kmaps);
  18. RB_CLEAR_NODE(&machine->rb_node);
  19. INIT_LIST_HEAD(&machine->user_dsos);
  20. INIT_LIST_HEAD(&machine->kernel_dsos);
  21. machine->threads = RB_ROOT;
  22. INIT_LIST_HEAD(&machine->dead_threads);
  23. machine->last_match = NULL;
  24. machine->vdso_info = NULL;
  25. machine->kmaps.machine = machine;
  26. machine->pid = pid;
  27. machine->symbol_filter = NULL;
  28. machine->id_hdr_size = 0;
  29. machine->comm_exec = false;
  30. machine->root_dir = strdup(root_dir);
  31. if (machine->root_dir == NULL)
  32. return -ENOMEM;
  33. if (pid != HOST_KERNEL_ID) {
  34. struct thread *thread = machine__findnew_thread(machine, -1,
  35. pid);
  36. char comm[64];
  37. if (thread == NULL)
  38. return -ENOMEM;
  39. snprintf(comm, sizeof(comm), "[guest/%d]", pid);
  40. thread__set_comm(thread, comm, 0);
  41. }
  42. machine->current_tid = NULL;
  43. return 0;
  44. }
  45. struct machine *machine__new_host(void)
  46. {
  47. struct machine *machine = malloc(sizeof(*machine));
  48. if (machine != NULL) {
  49. machine__init(machine, "", HOST_KERNEL_ID);
  50. if (machine__create_kernel_maps(machine) < 0)
  51. goto out_delete;
  52. }
  53. return machine;
  54. out_delete:
  55. free(machine);
  56. return NULL;
  57. }
  58. static void dsos__delete(struct list_head *dsos)
  59. {
  60. struct dso *pos, *n;
  61. list_for_each_entry_safe(pos, n, dsos, node) {
  62. list_del(&pos->node);
  63. dso__delete(pos);
  64. }
  65. }
  66. void machine__delete_dead_threads(struct machine *machine)
  67. {
  68. struct thread *n, *t;
  69. list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
  70. list_del(&t->node);
  71. thread__delete(t);
  72. }
  73. }
  74. void machine__delete_threads(struct machine *machine)
  75. {
  76. struct rb_node *nd = rb_first(&machine->threads);
  77. while (nd) {
  78. struct thread *t = rb_entry(nd, struct thread, rb_node);
  79. rb_erase(&t->rb_node, &machine->threads);
  80. nd = rb_next(nd);
  81. thread__delete(t);
  82. }
  83. }
  84. void machine__exit(struct machine *machine)
  85. {
  86. map_groups__exit(&machine->kmaps);
  87. dsos__delete(&machine->user_dsos);
  88. dsos__delete(&machine->kernel_dsos);
  89. vdso__exit(machine);
  90. zfree(&machine->root_dir);
  91. zfree(&machine->current_tid);
  92. }
  93. void machine__delete(struct machine *machine)
  94. {
  95. machine__exit(machine);
  96. free(machine);
  97. }
  98. void machines__init(struct machines *machines)
  99. {
  100. machine__init(&machines->host, "", HOST_KERNEL_ID);
  101. machines->guests = RB_ROOT;
  102. machines->symbol_filter = NULL;
  103. }
  104. void machines__exit(struct machines *machines)
  105. {
  106. machine__exit(&machines->host);
  107. /* XXX exit guest */
  108. }
  109. struct machine *machines__add(struct machines *machines, pid_t pid,
  110. const char *root_dir)
  111. {
  112. struct rb_node **p = &machines->guests.rb_node;
  113. struct rb_node *parent = NULL;
  114. struct machine *pos, *machine = malloc(sizeof(*machine));
  115. if (machine == NULL)
  116. return NULL;
  117. if (machine__init(machine, root_dir, pid) != 0) {
  118. free(machine);
  119. return NULL;
  120. }
  121. machine->symbol_filter = machines->symbol_filter;
  122. while (*p != NULL) {
  123. parent = *p;
  124. pos = rb_entry(parent, struct machine, rb_node);
  125. if (pid < pos->pid)
  126. p = &(*p)->rb_left;
  127. else
  128. p = &(*p)->rb_right;
  129. }
  130. rb_link_node(&machine->rb_node, parent, p);
  131. rb_insert_color(&machine->rb_node, &machines->guests);
  132. return machine;
  133. }
  134. void machines__set_symbol_filter(struct machines *machines,
  135. symbol_filter_t symbol_filter)
  136. {
  137. struct rb_node *nd;
  138. machines->symbol_filter = symbol_filter;
  139. machines->host.symbol_filter = symbol_filter;
  140. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  141. struct machine *machine = rb_entry(nd, struct machine, rb_node);
  142. machine->symbol_filter = symbol_filter;
  143. }
  144. }
  145. void machines__set_comm_exec(struct machines *machines, bool comm_exec)
  146. {
  147. struct rb_node *nd;
  148. machines->host.comm_exec = comm_exec;
  149. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  150. struct machine *machine = rb_entry(nd, struct machine, rb_node);
  151. machine->comm_exec = comm_exec;
  152. }
  153. }
  154. struct machine *machines__find(struct machines *machines, pid_t pid)
  155. {
  156. struct rb_node **p = &machines->guests.rb_node;
  157. struct rb_node *parent = NULL;
  158. struct machine *machine;
  159. struct machine *default_machine = NULL;
  160. if (pid == HOST_KERNEL_ID)
  161. return &machines->host;
  162. while (*p != NULL) {
  163. parent = *p;
  164. machine = rb_entry(parent, struct machine, rb_node);
  165. if (pid < machine->pid)
  166. p = &(*p)->rb_left;
  167. else if (pid > machine->pid)
  168. p = &(*p)->rb_right;
  169. else
  170. return machine;
  171. if (!machine->pid)
  172. default_machine = machine;
  173. }
  174. return default_machine;
  175. }
  176. struct machine *machines__findnew(struct machines *machines, pid_t pid)
  177. {
  178. char path[PATH_MAX];
  179. const char *root_dir = "";
  180. struct machine *machine = machines__find(machines, pid);
  181. if (machine && (machine->pid == pid))
  182. goto out;
  183. if ((pid != HOST_KERNEL_ID) &&
  184. (pid != DEFAULT_GUEST_KERNEL_ID) &&
  185. (symbol_conf.guestmount)) {
  186. sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
  187. if (access(path, R_OK)) {
  188. static struct strlist *seen;
  189. if (!seen)
  190. seen = strlist__new(true, NULL);
  191. if (!strlist__has_entry(seen, path)) {
  192. pr_err("Can't access file %s\n", path);
  193. strlist__add(seen, path);
  194. }
  195. machine = NULL;
  196. goto out;
  197. }
  198. root_dir = path;
  199. }
  200. machine = machines__add(machines, pid, root_dir);
  201. out:
  202. return machine;
  203. }
  204. void machines__process_guests(struct machines *machines,
  205. machine__process_t process, void *data)
  206. {
  207. struct rb_node *nd;
  208. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  209. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  210. process(pos, data);
  211. }
  212. }
  213. char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
  214. {
  215. if (machine__is_host(machine))
  216. snprintf(bf, size, "[%s]", "kernel.kallsyms");
  217. else if (machine__is_default_guest(machine))
  218. snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
  219. else {
  220. snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
  221. machine->pid);
  222. }
  223. return bf;
  224. }
  225. void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
  226. {
  227. struct rb_node *node;
  228. struct machine *machine;
  229. machines->host.id_hdr_size = id_hdr_size;
  230. for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
  231. machine = rb_entry(node, struct machine, rb_node);
  232. machine->id_hdr_size = id_hdr_size;
  233. }
  234. return;
  235. }
  236. static void machine__update_thread_pid(struct machine *machine,
  237. struct thread *th, pid_t pid)
  238. {
  239. struct thread *leader;
  240. if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
  241. return;
  242. th->pid_ = pid;
  243. if (th->pid_ == th->tid)
  244. return;
  245. leader = machine__findnew_thread(machine, th->pid_, th->pid_);
  246. if (!leader)
  247. goto out_err;
  248. if (!leader->mg)
  249. leader->mg = map_groups__new();
  250. if (!leader->mg)
  251. goto out_err;
  252. if (th->mg == leader->mg)
  253. return;
  254. if (th->mg) {
  255. /*
  256. * Maps are created from MMAP events which provide the pid and
  257. * tid. Consequently there never should be any maps on a thread
  258. * with an unknown pid. Just print an error if there are.
  259. */
  260. if (!map_groups__empty(th->mg))
  261. pr_err("Discarding thread maps for %d:%d\n",
  262. th->pid_, th->tid);
  263. map_groups__delete(th->mg);
  264. }
  265. th->mg = map_groups__get(leader->mg);
  266. return;
  267. out_err:
  268. pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
  269. }
  270. static struct thread *__machine__findnew_thread(struct machine *machine,
  271. pid_t pid, pid_t tid,
  272. bool create)
  273. {
  274. struct rb_node **p = &machine->threads.rb_node;
  275. struct rb_node *parent = NULL;
  276. struct thread *th;
  277. /*
  278. * Front-end cache - TID lookups come in blocks,
  279. * so most of the time we dont have to look up
  280. * the full rbtree:
  281. */
  282. th = machine->last_match;
  283. if (th && th->tid == tid) {
  284. machine__update_thread_pid(machine, th, pid);
  285. return th;
  286. }
  287. while (*p != NULL) {
  288. parent = *p;
  289. th = rb_entry(parent, struct thread, rb_node);
  290. if (th->tid == tid) {
  291. machine->last_match = th;
  292. machine__update_thread_pid(machine, th, pid);
  293. return th;
  294. }
  295. if (tid < th->tid)
  296. p = &(*p)->rb_left;
  297. else
  298. p = &(*p)->rb_right;
  299. }
  300. if (!create)
  301. return NULL;
  302. th = thread__new(pid, tid);
  303. if (th != NULL) {
  304. rb_link_node(&th->rb_node, parent, p);
  305. rb_insert_color(&th->rb_node, &machine->threads);
  306. machine->last_match = th;
  307. /*
  308. * We have to initialize map_groups separately
  309. * after rb tree is updated.
  310. *
  311. * The reason is that we call machine__findnew_thread
  312. * within thread__init_map_groups to find the thread
  313. * leader and that would screwed the rb tree.
  314. */
  315. if (thread__init_map_groups(th, machine)) {
  316. thread__delete(th);
  317. return NULL;
  318. }
  319. }
  320. return th;
  321. }
  322. struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
  323. pid_t tid)
  324. {
  325. return __machine__findnew_thread(machine, pid, tid, true);
  326. }
  327. struct thread *machine__find_thread(struct machine *machine, pid_t pid,
  328. pid_t tid)
  329. {
  330. return __machine__findnew_thread(machine, pid, tid, false);
  331. }
  332. struct comm *machine__thread_exec_comm(struct machine *machine,
  333. struct thread *thread)
  334. {
  335. if (machine->comm_exec)
  336. return thread__exec_comm(thread);
  337. else
  338. return thread__comm(thread);
  339. }
  340. int machine__process_comm_event(struct machine *machine, union perf_event *event,
  341. struct perf_sample *sample)
  342. {
  343. struct thread *thread = machine__findnew_thread(machine,
  344. event->comm.pid,
  345. event->comm.tid);
  346. bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
  347. if (exec)
  348. machine->comm_exec = true;
  349. if (dump_trace)
  350. perf_event__fprintf_comm(event, stdout);
  351. if (thread == NULL ||
  352. __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
  353. dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
  354. return -1;
  355. }
  356. return 0;
  357. }
  358. int machine__process_lost_event(struct machine *machine __maybe_unused,
  359. union perf_event *event, struct perf_sample *sample __maybe_unused)
  360. {
  361. dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
  362. event->lost.id, event->lost.lost);
  363. return 0;
  364. }
  365. struct map *machine__new_module(struct machine *machine, u64 start,
  366. const char *filename)
  367. {
  368. struct map *map;
  369. struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
  370. if (dso == NULL)
  371. return NULL;
  372. map = map__new2(start, dso, MAP__FUNCTION);
  373. if (map == NULL)
  374. return NULL;
  375. if (machine__is_host(machine))
  376. dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
  377. else
  378. dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
  379. map_groups__insert(&machine->kmaps, map);
  380. return map;
  381. }
  382. size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
  383. {
  384. struct rb_node *nd;
  385. size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) +
  386. __dsos__fprintf(&machines->host.user_dsos, fp);
  387. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  388. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  389. ret += __dsos__fprintf(&pos->kernel_dsos, fp);
  390. ret += __dsos__fprintf(&pos->user_dsos, fp);
  391. }
  392. return ret;
  393. }
  394. size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
  395. bool (skip)(struct dso *dso, int parm), int parm)
  396. {
  397. return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) +
  398. __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm);
  399. }
  400. size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
  401. bool (skip)(struct dso *dso, int parm), int parm)
  402. {
  403. struct rb_node *nd;
  404. size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
  405. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  406. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  407. ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
  408. }
  409. return ret;
  410. }
  411. size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
  412. {
  413. int i;
  414. size_t printed = 0;
  415. struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
  416. if (kdso->has_build_id) {
  417. char filename[PATH_MAX];
  418. if (dso__build_id_filename(kdso, filename, sizeof(filename)))
  419. printed += fprintf(fp, "[0] %s\n", filename);
  420. }
  421. for (i = 0; i < vmlinux_path__nr_entries; ++i)
  422. printed += fprintf(fp, "[%d] %s\n",
  423. i + kdso->has_build_id, vmlinux_path[i]);
  424. return printed;
  425. }
  426. size_t machine__fprintf(struct machine *machine, FILE *fp)
  427. {
  428. size_t ret = 0;
  429. struct rb_node *nd;
  430. for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
  431. struct thread *pos = rb_entry(nd, struct thread, rb_node);
  432. ret += thread__fprintf(pos, fp);
  433. }
  434. return ret;
  435. }
  436. static struct dso *machine__get_kernel(struct machine *machine)
  437. {
  438. const char *vmlinux_name = NULL;
  439. struct dso *kernel;
  440. if (machine__is_host(machine)) {
  441. vmlinux_name = symbol_conf.vmlinux_name;
  442. if (!vmlinux_name)
  443. vmlinux_name = "[kernel.kallsyms]";
  444. kernel = dso__kernel_findnew(machine, vmlinux_name,
  445. "[kernel]",
  446. DSO_TYPE_KERNEL);
  447. } else {
  448. char bf[PATH_MAX];
  449. if (machine__is_default_guest(machine))
  450. vmlinux_name = symbol_conf.default_guest_vmlinux_name;
  451. if (!vmlinux_name)
  452. vmlinux_name = machine__mmap_name(machine, bf,
  453. sizeof(bf));
  454. kernel = dso__kernel_findnew(machine, vmlinux_name,
  455. "[guest.kernel]",
  456. DSO_TYPE_GUEST_KERNEL);
  457. }
  458. if (kernel != NULL && (!kernel->has_build_id))
  459. dso__read_running_kernel_build_id(kernel, machine);
  460. return kernel;
  461. }
  462. struct process_args {
  463. u64 start;
  464. };
  465. static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
  466. size_t bufsz)
  467. {
  468. if (machine__is_default_guest(machine))
  469. scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
  470. else
  471. scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
  472. }
  473. const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
  474. /* Figure out the start address of kernel map from /proc/kallsyms.
  475. * Returns the name of the start symbol in *symbol_name. Pass in NULL as
  476. * symbol_name if it's not that important.
  477. */
  478. static u64 machine__get_kernel_start_addr(struct machine *machine,
  479. const char **symbol_name)
  480. {
  481. char filename[PATH_MAX];
  482. int i;
  483. const char *name;
  484. u64 addr = 0;
  485. machine__get_kallsyms_filename(machine, filename, PATH_MAX);
  486. if (symbol__restricted_filename(filename, "/proc/kallsyms"))
  487. return 0;
  488. for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
  489. addr = kallsyms__get_function_start(filename, name);
  490. if (addr)
  491. break;
  492. }
  493. if (symbol_name)
  494. *symbol_name = name;
  495. return addr;
  496. }
  497. int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
  498. {
  499. enum map_type type;
  500. u64 start = machine__get_kernel_start_addr(machine, NULL);
  501. for (type = 0; type < MAP__NR_TYPES; ++type) {
  502. struct kmap *kmap;
  503. machine->vmlinux_maps[type] = map__new2(start, kernel, type);
  504. if (machine->vmlinux_maps[type] == NULL)
  505. return -1;
  506. machine->vmlinux_maps[type]->map_ip =
  507. machine->vmlinux_maps[type]->unmap_ip =
  508. identity__map_ip;
  509. kmap = map__kmap(machine->vmlinux_maps[type]);
  510. kmap->kmaps = &machine->kmaps;
  511. map_groups__insert(&machine->kmaps,
  512. machine->vmlinux_maps[type]);
  513. }
  514. return 0;
  515. }
  516. void machine__destroy_kernel_maps(struct machine *machine)
  517. {
  518. enum map_type type;
  519. for (type = 0; type < MAP__NR_TYPES; ++type) {
  520. struct kmap *kmap;
  521. if (machine->vmlinux_maps[type] == NULL)
  522. continue;
  523. kmap = map__kmap(machine->vmlinux_maps[type]);
  524. map_groups__remove(&machine->kmaps,
  525. machine->vmlinux_maps[type]);
  526. if (kmap->ref_reloc_sym) {
  527. /*
  528. * ref_reloc_sym is shared among all maps, so free just
  529. * on one of them.
  530. */
  531. if (type == MAP__FUNCTION) {
  532. zfree((char **)&kmap->ref_reloc_sym->name);
  533. zfree(&kmap->ref_reloc_sym);
  534. } else
  535. kmap->ref_reloc_sym = NULL;
  536. }
  537. map__delete(machine->vmlinux_maps[type]);
  538. machine->vmlinux_maps[type] = NULL;
  539. }
  540. }
  541. int machines__create_guest_kernel_maps(struct machines *machines)
  542. {
  543. int ret = 0;
  544. struct dirent **namelist = NULL;
  545. int i, items = 0;
  546. char path[PATH_MAX];
  547. pid_t pid;
  548. char *endp;
  549. if (symbol_conf.default_guest_vmlinux_name ||
  550. symbol_conf.default_guest_modules ||
  551. symbol_conf.default_guest_kallsyms) {
  552. machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
  553. }
  554. if (symbol_conf.guestmount) {
  555. items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
  556. if (items <= 0)
  557. return -ENOENT;
  558. for (i = 0; i < items; i++) {
  559. if (!isdigit(namelist[i]->d_name[0])) {
  560. /* Filter out . and .. */
  561. continue;
  562. }
  563. pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
  564. if ((*endp != '\0') ||
  565. (endp == namelist[i]->d_name) ||
  566. (errno == ERANGE)) {
  567. pr_debug("invalid directory (%s). Skipping.\n",
  568. namelist[i]->d_name);
  569. continue;
  570. }
  571. sprintf(path, "%s/%s/proc/kallsyms",
  572. symbol_conf.guestmount,
  573. namelist[i]->d_name);
  574. ret = access(path, R_OK);
  575. if (ret) {
  576. pr_debug("Can't access file %s\n", path);
  577. goto failure;
  578. }
  579. machines__create_kernel_maps(machines, pid);
  580. }
  581. failure:
  582. free(namelist);
  583. }
  584. return ret;
  585. }
  586. void machines__destroy_kernel_maps(struct machines *machines)
  587. {
  588. struct rb_node *next = rb_first(&machines->guests);
  589. machine__destroy_kernel_maps(&machines->host);
  590. while (next) {
  591. struct machine *pos = rb_entry(next, struct machine, rb_node);
  592. next = rb_next(&pos->rb_node);
  593. rb_erase(&pos->rb_node, &machines->guests);
  594. machine__delete(pos);
  595. }
  596. }
  597. int machines__create_kernel_maps(struct machines *machines, pid_t pid)
  598. {
  599. struct machine *machine = machines__findnew(machines, pid);
  600. if (machine == NULL)
  601. return -1;
  602. return machine__create_kernel_maps(machine);
  603. }
  604. int machine__load_kallsyms(struct machine *machine, const char *filename,
  605. enum map_type type, symbol_filter_t filter)
  606. {
  607. struct map *map = machine->vmlinux_maps[type];
  608. int ret = dso__load_kallsyms(map->dso, filename, map, filter);
  609. if (ret > 0) {
  610. dso__set_loaded(map->dso, type);
  611. /*
  612. * Since /proc/kallsyms will have multiple sessions for the
  613. * kernel, with modules between them, fixup the end of all
  614. * sections.
  615. */
  616. __map_groups__fixup_end(&machine->kmaps, type);
  617. }
  618. return ret;
  619. }
  620. int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
  621. symbol_filter_t filter)
  622. {
  623. struct map *map = machine->vmlinux_maps[type];
  624. int ret = dso__load_vmlinux_path(map->dso, map, filter);
  625. if (ret > 0)
  626. dso__set_loaded(map->dso, type);
  627. return ret;
  628. }
  629. static void map_groups__fixup_end(struct map_groups *mg)
  630. {
  631. int i;
  632. for (i = 0; i < MAP__NR_TYPES; ++i)
  633. __map_groups__fixup_end(mg, i);
  634. }
  635. static char *get_kernel_version(const char *root_dir)
  636. {
  637. char version[PATH_MAX];
  638. FILE *file;
  639. char *name, *tmp;
  640. const char *prefix = "Linux version ";
  641. sprintf(version, "%s/proc/version", root_dir);
  642. file = fopen(version, "r");
  643. if (!file)
  644. return NULL;
  645. version[0] = '\0';
  646. tmp = fgets(version, sizeof(version), file);
  647. fclose(file);
  648. name = strstr(version, prefix);
  649. if (!name)
  650. return NULL;
  651. name += strlen(prefix);
  652. tmp = strchr(name, ' ');
  653. if (tmp)
  654. *tmp = '\0';
  655. return strdup(name);
  656. }
  657. static int map_groups__set_modules_path_dir(struct map_groups *mg,
  658. const char *dir_name, int depth)
  659. {
  660. struct dirent *dent;
  661. DIR *dir = opendir(dir_name);
  662. int ret = 0;
  663. if (!dir) {
  664. pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
  665. return -1;
  666. }
  667. while ((dent = readdir(dir)) != NULL) {
  668. char path[PATH_MAX];
  669. struct stat st;
  670. /*sshfs might return bad dent->d_type, so we have to stat*/
  671. snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
  672. if (stat(path, &st))
  673. continue;
  674. if (S_ISDIR(st.st_mode)) {
  675. if (!strcmp(dent->d_name, ".") ||
  676. !strcmp(dent->d_name, ".."))
  677. continue;
  678. /* Do not follow top-level source and build symlinks */
  679. if (depth == 0) {
  680. if (!strcmp(dent->d_name, "source") ||
  681. !strcmp(dent->d_name, "build"))
  682. continue;
  683. }
  684. ret = map_groups__set_modules_path_dir(mg, path,
  685. depth + 1);
  686. if (ret < 0)
  687. goto out;
  688. } else {
  689. char *dot = strrchr(dent->d_name, '.'),
  690. dso_name[PATH_MAX];
  691. struct map *map;
  692. char *long_name;
  693. if (dot == NULL || strcmp(dot, ".ko"))
  694. continue;
  695. snprintf(dso_name, sizeof(dso_name), "[%.*s]",
  696. (int)(dot - dent->d_name), dent->d_name);
  697. strxfrchar(dso_name, '-', '_');
  698. map = map_groups__find_by_name(mg, MAP__FUNCTION,
  699. dso_name);
  700. if (map == NULL)
  701. continue;
  702. long_name = strdup(path);
  703. if (long_name == NULL) {
  704. ret = -1;
  705. goto out;
  706. }
  707. dso__set_long_name(map->dso, long_name, true);
  708. dso__kernel_module_get_build_id(map->dso, "");
  709. }
  710. }
  711. out:
  712. closedir(dir);
  713. return ret;
  714. }
  715. static int machine__set_modules_path(struct machine *machine)
  716. {
  717. char *version;
  718. char modules_path[PATH_MAX];
  719. version = get_kernel_version(machine->root_dir);
  720. if (!version)
  721. return -1;
  722. snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
  723. machine->root_dir, version);
  724. free(version);
  725. return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
  726. }
  727. static int machine__create_module(void *arg, const char *name, u64 start)
  728. {
  729. struct machine *machine = arg;
  730. struct map *map;
  731. map = machine__new_module(machine, start, name);
  732. if (map == NULL)
  733. return -1;
  734. dso__kernel_module_get_build_id(map->dso, machine->root_dir);
  735. return 0;
  736. }
  737. static int machine__create_modules(struct machine *machine)
  738. {
  739. const char *modules;
  740. char path[PATH_MAX];
  741. if (machine__is_default_guest(machine)) {
  742. modules = symbol_conf.default_guest_modules;
  743. } else {
  744. snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
  745. modules = path;
  746. }
  747. if (symbol__restricted_filename(modules, "/proc/modules"))
  748. return -1;
  749. if (modules__parse(modules, machine, machine__create_module))
  750. return -1;
  751. if (!machine__set_modules_path(machine))
  752. return 0;
  753. pr_debug("Problems setting modules path maps, continuing anyway...\n");
  754. return 0;
  755. }
  756. int machine__create_kernel_maps(struct machine *machine)
  757. {
  758. struct dso *kernel = machine__get_kernel(machine);
  759. const char *name;
  760. u64 addr = machine__get_kernel_start_addr(machine, &name);
  761. if (!addr)
  762. return -1;
  763. if (kernel == NULL ||
  764. __machine__create_kernel_maps(machine, kernel) < 0)
  765. return -1;
  766. if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
  767. if (machine__is_host(machine))
  768. pr_debug("Problems creating module maps, "
  769. "continuing anyway...\n");
  770. else
  771. pr_debug("Problems creating module maps for guest %d, "
  772. "continuing anyway...\n", machine->pid);
  773. }
  774. /*
  775. * Now that we have all the maps created, just set the ->end of them:
  776. */
  777. map_groups__fixup_end(&machine->kmaps);
  778. if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
  779. addr)) {
  780. machine__destroy_kernel_maps(machine);
  781. return -1;
  782. }
  783. return 0;
  784. }
  785. static void machine__set_kernel_mmap_len(struct machine *machine,
  786. union perf_event *event)
  787. {
  788. int i;
  789. for (i = 0; i < MAP__NR_TYPES; i++) {
  790. machine->vmlinux_maps[i]->start = event->mmap.start;
  791. machine->vmlinux_maps[i]->end = (event->mmap.start +
  792. event->mmap.len);
  793. /*
  794. * Be a bit paranoid here, some perf.data file came with
  795. * a zero sized synthesized MMAP event for the kernel.
  796. */
  797. if (machine->vmlinux_maps[i]->end == 0)
  798. machine->vmlinux_maps[i]->end = ~0ULL;
  799. }
  800. }
  801. static bool machine__uses_kcore(struct machine *machine)
  802. {
  803. struct dso *dso;
  804. list_for_each_entry(dso, &machine->kernel_dsos, node) {
  805. if (dso__is_kcore(dso))
  806. return true;
  807. }
  808. return false;
  809. }
  810. static int machine__process_kernel_mmap_event(struct machine *machine,
  811. union perf_event *event)
  812. {
  813. struct map *map;
  814. char kmmap_prefix[PATH_MAX];
  815. enum dso_kernel_type kernel_type;
  816. bool is_kernel_mmap;
  817. /* If we have maps from kcore then we do not need or want any others */
  818. if (machine__uses_kcore(machine))
  819. return 0;
  820. machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
  821. if (machine__is_host(machine))
  822. kernel_type = DSO_TYPE_KERNEL;
  823. else
  824. kernel_type = DSO_TYPE_GUEST_KERNEL;
  825. is_kernel_mmap = memcmp(event->mmap.filename,
  826. kmmap_prefix,
  827. strlen(kmmap_prefix) - 1) == 0;
  828. if (event->mmap.filename[0] == '/' ||
  829. (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
  830. char short_module_name[1024];
  831. char *name, *dot;
  832. if (event->mmap.filename[0] == '/') {
  833. name = strrchr(event->mmap.filename, '/');
  834. if (name == NULL)
  835. goto out_problem;
  836. ++name; /* skip / */
  837. dot = strrchr(name, '.');
  838. if (dot == NULL)
  839. goto out_problem;
  840. snprintf(short_module_name, sizeof(short_module_name),
  841. "[%.*s]", (int)(dot - name), name);
  842. strxfrchar(short_module_name, '-', '_');
  843. } else
  844. strcpy(short_module_name, event->mmap.filename);
  845. map = machine__new_module(machine, event->mmap.start,
  846. event->mmap.filename);
  847. if (map == NULL)
  848. goto out_problem;
  849. name = strdup(short_module_name);
  850. if (name == NULL)
  851. goto out_problem;
  852. dso__set_short_name(map->dso, name, true);
  853. map->end = map->start + event->mmap.len;
  854. } else if (is_kernel_mmap) {
  855. const char *symbol_name = (event->mmap.filename +
  856. strlen(kmmap_prefix));
  857. /*
  858. * Should be there already, from the build-id table in
  859. * the header.
  860. */
  861. struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
  862. kmmap_prefix);
  863. if (kernel == NULL)
  864. goto out_problem;
  865. kernel->kernel = kernel_type;
  866. if (__machine__create_kernel_maps(machine, kernel) < 0)
  867. goto out_problem;
  868. machine__set_kernel_mmap_len(machine, event);
  869. /*
  870. * Avoid using a zero address (kptr_restrict) for the ref reloc
  871. * symbol. Effectively having zero here means that at record
  872. * time /proc/sys/kernel/kptr_restrict was non zero.
  873. */
  874. if (event->mmap.pgoff != 0) {
  875. maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
  876. symbol_name,
  877. event->mmap.pgoff);
  878. }
  879. if (machine__is_default_guest(machine)) {
  880. /*
  881. * preload dso of guest kernel and modules
  882. */
  883. dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
  884. NULL);
  885. }
  886. }
  887. return 0;
  888. out_problem:
  889. return -1;
  890. }
  891. int machine__process_mmap2_event(struct machine *machine,
  892. union perf_event *event,
  893. struct perf_sample *sample __maybe_unused)
  894. {
  895. u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  896. struct thread *thread;
  897. struct map *map;
  898. enum map_type type;
  899. int ret = 0;
  900. if (dump_trace)
  901. perf_event__fprintf_mmap2(event, stdout);
  902. if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  903. cpumode == PERF_RECORD_MISC_KERNEL) {
  904. ret = machine__process_kernel_mmap_event(machine, event);
  905. if (ret < 0)
  906. goto out_problem;
  907. return 0;
  908. }
  909. thread = machine__findnew_thread(machine, event->mmap2.pid,
  910. event->mmap2.tid);
  911. if (thread == NULL)
  912. goto out_problem;
  913. if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
  914. type = MAP__VARIABLE;
  915. else
  916. type = MAP__FUNCTION;
  917. map = map__new(machine, event->mmap2.start,
  918. event->mmap2.len, event->mmap2.pgoff,
  919. event->mmap2.pid, event->mmap2.maj,
  920. event->mmap2.min, event->mmap2.ino,
  921. event->mmap2.ino_generation,
  922. event->mmap2.prot,
  923. event->mmap2.flags,
  924. event->mmap2.filename, type, thread);
  925. if (map == NULL)
  926. goto out_problem;
  927. thread__insert_map(thread, map);
  928. return 0;
  929. out_problem:
  930. dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
  931. return 0;
  932. }
  933. int machine__process_mmap_event(struct machine *machine, union perf_event *event,
  934. struct perf_sample *sample __maybe_unused)
  935. {
  936. u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  937. struct thread *thread;
  938. struct map *map;
  939. enum map_type type;
  940. int ret = 0;
  941. if (dump_trace)
  942. perf_event__fprintf_mmap(event, stdout);
  943. if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  944. cpumode == PERF_RECORD_MISC_KERNEL) {
  945. ret = machine__process_kernel_mmap_event(machine, event);
  946. if (ret < 0)
  947. goto out_problem;
  948. return 0;
  949. }
  950. thread = machine__findnew_thread(machine, event->mmap.pid,
  951. event->mmap.tid);
  952. if (thread == NULL)
  953. goto out_problem;
  954. if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
  955. type = MAP__VARIABLE;
  956. else
  957. type = MAP__FUNCTION;
  958. map = map__new(machine, event->mmap.start,
  959. event->mmap.len, event->mmap.pgoff,
  960. event->mmap.pid, 0, 0, 0, 0, 0, 0,
  961. event->mmap.filename,
  962. type, thread);
  963. if (map == NULL)
  964. goto out_problem;
  965. thread__insert_map(thread, map);
  966. return 0;
  967. out_problem:
  968. dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
  969. return 0;
  970. }
  971. static void machine__remove_thread(struct machine *machine, struct thread *th)
  972. {
  973. machine->last_match = NULL;
  974. rb_erase(&th->rb_node, &machine->threads);
  975. /*
  976. * We may have references to this thread, for instance in some hist_entry
  977. * instances, so just move them to a separate list.
  978. */
  979. list_add_tail(&th->node, &machine->dead_threads);
  980. }
  981. int machine__process_fork_event(struct machine *machine, union perf_event *event,
  982. struct perf_sample *sample)
  983. {
  984. struct thread *thread = machine__find_thread(machine,
  985. event->fork.pid,
  986. event->fork.tid);
  987. struct thread *parent = machine__findnew_thread(machine,
  988. event->fork.ppid,
  989. event->fork.ptid);
  990. /* if a thread currently exists for the thread id remove it */
  991. if (thread != NULL)
  992. machine__remove_thread(machine, thread);
  993. thread = machine__findnew_thread(machine, event->fork.pid,
  994. event->fork.tid);
  995. if (dump_trace)
  996. perf_event__fprintf_task(event, stdout);
  997. if (thread == NULL || parent == NULL ||
  998. thread__fork(thread, parent, sample->time) < 0) {
  999. dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
  1000. return -1;
  1001. }
  1002. return 0;
  1003. }
  1004. int machine__process_exit_event(struct machine *machine, union perf_event *event,
  1005. struct perf_sample *sample __maybe_unused)
  1006. {
  1007. struct thread *thread = machine__find_thread(machine,
  1008. event->fork.pid,
  1009. event->fork.tid);
  1010. if (dump_trace)
  1011. perf_event__fprintf_task(event, stdout);
  1012. if (thread != NULL)
  1013. thread__exited(thread);
  1014. return 0;
  1015. }
  1016. int machine__process_event(struct machine *machine, union perf_event *event,
  1017. struct perf_sample *sample)
  1018. {
  1019. int ret;
  1020. switch (event->header.type) {
  1021. case PERF_RECORD_COMM:
  1022. ret = machine__process_comm_event(machine, event, sample); break;
  1023. case PERF_RECORD_MMAP:
  1024. ret = machine__process_mmap_event(machine, event, sample); break;
  1025. case PERF_RECORD_MMAP2:
  1026. ret = machine__process_mmap2_event(machine, event, sample); break;
  1027. case PERF_RECORD_FORK:
  1028. ret = machine__process_fork_event(machine, event, sample); break;
  1029. case PERF_RECORD_EXIT:
  1030. ret = machine__process_exit_event(machine, event, sample); break;
  1031. case PERF_RECORD_LOST:
  1032. ret = machine__process_lost_event(machine, event, sample); break;
  1033. default:
  1034. ret = -1;
  1035. break;
  1036. }
  1037. return ret;
  1038. }
  1039. static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
  1040. {
  1041. if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
  1042. return 1;
  1043. return 0;
  1044. }
  1045. static void ip__resolve_ams(struct machine *machine, struct thread *thread,
  1046. struct addr_map_symbol *ams,
  1047. u64 ip)
  1048. {
  1049. struct addr_location al;
  1050. memset(&al, 0, sizeof(al));
  1051. /*
  1052. * We cannot use the header.misc hint to determine whether a
  1053. * branch stack address is user, kernel, guest, hypervisor.
  1054. * Branches may straddle the kernel/user/hypervisor boundaries.
  1055. * Thus, we have to try consecutively until we find a match
  1056. * or else, the symbol is unknown
  1057. */
  1058. thread__find_cpumode_addr_location(thread, machine, MAP__FUNCTION, ip, &al);
  1059. ams->addr = ip;
  1060. ams->al_addr = al.addr;
  1061. ams->sym = al.sym;
  1062. ams->map = al.map;
  1063. }
  1064. static void ip__resolve_data(struct machine *machine, struct thread *thread,
  1065. u8 m, struct addr_map_symbol *ams, u64 addr)
  1066. {
  1067. struct addr_location al;
  1068. memset(&al, 0, sizeof(al));
  1069. thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr,
  1070. &al);
  1071. ams->addr = addr;
  1072. ams->al_addr = al.addr;
  1073. ams->sym = al.sym;
  1074. ams->map = al.map;
  1075. }
  1076. struct mem_info *sample__resolve_mem(struct perf_sample *sample,
  1077. struct addr_location *al)
  1078. {
  1079. struct mem_info *mi = zalloc(sizeof(*mi));
  1080. if (!mi)
  1081. return NULL;
  1082. ip__resolve_ams(al->machine, al->thread, &mi->iaddr, sample->ip);
  1083. ip__resolve_data(al->machine, al->thread, al->cpumode,
  1084. &mi->daddr, sample->addr);
  1085. mi->data_src.val = sample->data_src;
  1086. return mi;
  1087. }
  1088. struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
  1089. struct addr_location *al)
  1090. {
  1091. unsigned int i;
  1092. const struct branch_stack *bs = sample->branch_stack;
  1093. struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
  1094. if (!bi)
  1095. return NULL;
  1096. for (i = 0; i < bs->nr; i++) {
  1097. ip__resolve_ams(al->machine, al->thread, &bi[i].to, bs->entries[i].to);
  1098. ip__resolve_ams(al->machine, al->thread, &bi[i].from, bs->entries[i].from);
  1099. bi[i].flags = bs->entries[i].flags;
  1100. }
  1101. return bi;
  1102. }
  1103. static int machine__resolve_callchain_sample(struct machine *machine,
  1104. struct thread *thread,
  1105. struct ip_callchain *chain,
  1106. struct symbol **parent,
  1107. struct addr_location *root_al,
  1108. int max_stack)
  1109. {
  1110. u8 cpumode = PERF_RECORD_MISC_USER;
  1111. int chain_nr = min(max_stack, (int)chain->nr);
  1112. int i;
  1113. int j;
  1114. int err;
  1115. int skip_idx __maybe_unused;
  1116. callchain_cursor_reset(&callchain_cursor);
  1117. if (chain->nr > PERF_MAX_STACK_DEPTH) {
  1118. pr_warning("corrupted callchain. skipping...\n");
  1119. return 0;
  1120. }
  1121. /*
  1122. * Based on DWARF debug information, some architectures skip
  1123. * a callchain entry saved by the kernel.
  1124. */
  1125. skip_idx = arch_skip_callchain_idx(machine, thread, chain);
  1126. for (i = 0; i < chain_nr; i++) {
  1127. u64 ip;
  1128. struct addr_location al;
  1129. if (callchain_param.order == ORDER_CALLEE)
  1130. j = i;
  1131. else
  1132. j = chain->nr - i - 1;
  1133. #ifdef HAVE_SKIP_CALLCHAIN_IDX
  1134. if (j == skip_idx)
  1135. continue;
  1136. #endif
  1137. ip = chain->ips[j];
  1138. if (ip >= PERF_CONTEXT_MAX) {
  1139. switch (ip) {
  1140. case PERF_CONTEXT_HV:
  1141. cpumode = PERF_RECORD_MISC_HYPERVISOR;
  1142. break;
  1143. case PERF_CONTEXT_KERNEL:
  1144. cpumode = PERF_RECORD_MISC_KERNEL;
  1145. break;
  1146. case PERF_CONTEXT_USER:
  1147. cpumode = PERF_RECORD_MISC_USER;
  1148. break;
  1149. default:
  1150. pr_debug("invalid callchain context: "
  1151. "%"PRId64"\n", (s64) ip);
  1152. /*
  1153. * It seems the callchain is corrupted.
  1154. * Discard all.
  1155. */
  1156. callchain_cursor_reset(&callchain_cursor);
  1157. return 0;
  1158. }
  1159. continue;
  1160. }
  1161. al.filtered = 0;
  1162. thread__find_addr_location(thread, machine, cpumode,
  1163. MAP__FUNCTION, ip, &al);
  1164. if (al.sym != NULL) {
  1165. if (sort__has_parent && !*parent &&
  1166. symbol__match_regex(al.sym, &parent_regex))
  1167. *parent = al.sym;
  1168. else if (have_ignore_callees && root_al &&
  1169. symbol__match_regex(al.sym, &ignore_callees_regex)) {
  1170. /* Treat this symbol as the root,
  1171. forgetting its callees. */
  1172. *root_al = al;
  1173. callchain_cursor_reset(&callchain_cursor);
  1174. }
  1175. }
  1176. err = callchain_cursor_append(&callchain_cursor,
  1177. ip, al.map, al.sym);
  1178. if (err)
  1179. return err;
  1180. }
  1181. return 0;
  1182. }
  1183. static int unwind_entry(struct unwind_entry *entry, void *arg)
  1184. {
  1185. struct callchain_cursor *cursor = arg;
  1186. return callchain_cursor_append(cursor, entry->ip,
  1187. entry->map, entry->sym);
  1188. }
  1189. int machine__resolve_callchain(struct machine *machine,
  1190. struct perf_evsel *evsel,
  1191. struct thread *thread,
  1192. struct perf_sample *sample,
  1193. struct symbol **parent,
  1194. struct addr_location *root_al,
  1195. int max_stack)
  1196. {
  1197. int ret;
  1198. ret = machine__resolve_callchain_sample(machine, thread,
  1199. sample->callchain, parent,
  1200. root_al, max_stack);
  1201. if (ret)
  1202. return ret;
  1203. /* Can we do dwarf post unwind? */
  1204. if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
  1205. (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
  1206. return 0;
  1207. /* Bail out if nothing was captured. */
  1208. if ((!sample->user_regs.regs) ||
  1209. (!sample->user_stack.size))
  1210. return 0;
  1211. return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
  1212. thread, sample, max_stack);
  1213. }
  1214. int machine__for_each_thread(struct machine *machine,
  1215. int (*fn)(struct thread *thread, void *p),
  1216. void *priv)
  1217. {
  1218. struct rb_node *nd;
  1219. struct thread *thread;
  1220. int rc = 0;
  1221. for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
  1222. thread = rb_entry(nd, struct thread, rb_node);
  1223. rc = fn(thread, priv);
  1224. if (rc != 0)
  1225. return rc;
  1226. }
  1227. list_for_each_entry(thread, &machine->dead_threads, node) {
  1228. rc = fn(thread, priv);
  1229. if (rc != 0)
  1230. return rc;
  1231. }
  1232. return rc;
  1233. }
  1234. int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
  1235. struct target *target, struct thread_map *threads,
  1236. perf_event__handler_t process, bool data_mmap)
  1237. {
  1238. if (target__has_task(target))
  1239. return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
  1240. else if (target__has_cpu(target))
  1241. return perf_event__synthesize_threads(tool, process, machine, data_mmap);
  1242. /* command specified */
  1243. return 0;
  1244. }
  1245. pid_t machine__get_current_tid(struct machine *machine, int cpu)
  1246. {
  1247. if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
  1248. return -1;
  1249. return machine->current_tid[cpu];
  1250. }
  1251. int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
  1252. pid_t tid)
  1253. {
  1254. struct thread *thread;
  1255. if (cpu < 0)
  1256. return -EINVAL;
  1257. if (!machine->current_tid) {
  1258. int i;
  1259. machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
  1260. if (!machine->current_tid)
  1261. return -ENOMEM;
  1262. for (i = 0; i < MAX_NR_CPUS; i++)
  1263. machine->current_tid[i] = -1;
  1264. }
  1265. if (cpu >= MAX_NR_CPUS) {
  1266. pr_err("Requested CPU %d too large. ", cpu);
  1267. pr_err("Consider raising MAX_NR_CPUS\n");
  1268. return -EINVAL;
  1269. }
  1270. machine->current_tid[cpu] = tid;
  1271. thread = machine__findnew_thread(machine, pid, tid);
  1272. if (!thread)
  1273. return -ENOMEM;
  1274. thread->cpu = cpu;
  1275. return 0;
  1276. }