machine.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846
  1. #include "callchain.h"
  2. #include "debug.h"
  3. #include "event.h"
  4. #include "evsel.h"
  5. #include "hist.h"
  6. #include "machine.h"
  7. #include "map.h"
  8. #include "sort.h"
  9. #include "strlist.h"
  10. #include "thread.h"
  11. #include "vdso.h"
  12. #include <stdbool.h>
  13. #include <symbol/kallsyms.h>
  14. #include "unwind.h"
  15. #include "linux/hash.h"
  16. static void machine__remove_thread(struct machine *machine, struct thread *th);
  17. static void dsos__init(struct dsos *dsos)
  18. {
  19. INIT_LIST_HEAD(&dsos->head);
  20. dsos->root = RB_ROOT;
  21. }
  22. int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
  23. {
  24. map_groups__init(&machine->kmaps, machine);
  25. RB_CLEAR_NODE(&machine->rb_node);
  26. dsos__init(&machine->user_dsos);
  27. dsos__init(&machine->kernel_dsos);
  28. machine->threads = RB_ROOT;
  29. INIT_LIST_HEAD(&machine->dead_threads);
  30. machine->last_match = NULL;
  31. machine->vdso_info = NULL;
  32. machine->pid = pid;
  33. machine->symbol_filter = NULL;
  34. machine->id_hdr_size = 0;
  35. machine->comm_exec = false;
  36. machine->kernel_start = 0;
  37. machine->root_dir = strdup(root_dir);
  38. if (machine->root_dir == NULL)
  39. return -ENOMEM;
  40. if (pid != HOST_KERNEL_ID) {
  41. struct thread *thread = machine__findnew_thread(machine, -1,
  42. pid);
  43. char comm[64];
  44. if (thread == NULL)
  45. return -ENOMEM;
  46. snprintf(comm, sizeof(comm), "[guest/%d]", pid);
  47. thread__set_comm(thread, comm, 0);
  48. }
  49. machine->current_tid = NULL;
  50. return 0;
  51. }
  52. struct machine *machine__new_host(void)
  53. {
  54. struct machine *machine = malloc(sizeof(*machine));
  55. if (machine != NULL) {
  56. machine__init(machine, "", HOST_KERNEL_ID);
  57. if (machine__create_kernel_maps(machine) < 0)
  58. goto out_delete;
  59. }
  60. return machine;
  61. out_delete:
  62. free(machine);
  63. return NULL;
  64. }
  65. static void dsos__delete(struct dsos *dsos)
  66. {
  67. struct dso *pos, *n;
  68. list_for_each_entry_safe(pos, n, &dsos->head, node) {
  69. RB_CLEAR_NODE(&pos->rb_node);
  70. list_del(&pos->node);
  71. dso__delete(pos);
  72. }
  73. }
  74. void machine__delete_threads(struct machine *machine)
  75. {
  76. struct rb_node *nd = rb_first(&machine->threads);
  77. while (nd) {
  78. struct thread *t = rb_entry(nd, struct thread, rb_node);
  79. nd = rb_next(nd);
  80. machine__remove_thread(machine, t);
  81. }
  82. }
  83. void machine__exit(struct machine *machine)
  84. {
  85. map_groups__exit(&machine->kmaps);
  86. dsos__delete(&machine->user_dsos);
  87. dsos__delete(&machine->kernel_dsos);
  88. vdso__exit(machine);
  89. zfree(&machine->root_dir);
  90. zfree(&machine->current_tid);
  91. }
  92. void machine__delete(struct machine *machine)
  93. {
  94. machine__exit(machine);
  95. free(machine);
  96. }
  97. void machines__init(struct machines *machines)
  98. {
  99. machine__init(&machines->host, "", HOST_KERNEL_ID);
  100. machines->guests = RB_ROOT;
  101. machines->symbol_filter = NULL;
  102. }
  103. void machines__exit(struct machines *machines)
  104. {
  105. machine__exit(&machines->host);
  106. /* XXX exit guest */
  107. }
  108. struct machine *machines__add(struct machines *machines, pid_t pid,
  109. const char *root_dir)
  110. {
  111. struct rb_node **p = &machines->guests.rb_node;
  112. struct rb_node *parent = NULL;
  113. struct machine *pos, *machine = malloc(sizeof(*machine));
  114. if (machine == NULL)
  115. return NULL;
  116. if (machine__init(machine, root_dir, pid) != 0) {
  117. free(machine);
  118. return NULL;
  119. }
  120. machine->symbol_filter = machines->symbol_filter;
  121. while (*p != NULL) {
  122. parent = *p;
  123. pos = rb_entry(parent, struct machine, rb_node);
  124. if (pid < pos->pid)
  125. p = &(*p)->rb_left;
  126. else
  127. p = &(*p)->rb_right;
  128. }
  129. rb_link_node(&machine->rb_node, parent, p);
  130. rb_insert_color(&machine->rb_node, &machines->guests);
  131. return machine;
  132. }
  133. void machines__set_symbol_filter(struct machines *machines,
  134. symbol_filter_t symbol_filter)
  135. {
  136. struct rb_node *nd;
  137. machines->symbol_filter = symbol_filter;
  138. machines->host.symbol_filter = symbol_filter;
  139. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  140. struct machine *machine = rb_entry(nd, struct machine, rb_node);
  141. machine->symbol_filter = symbol_filter;
  142. }
  143. }
  144. void machines__set_comm_exec(struct machines *machines, bool comm_exec)
  145. {
  146. struct rb_node *nd;
  147. machines->host.comm_exec = comm_exec;
  148. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  149. struct machine *machine = rb_entry(nd, struct machine, rb_node);
  150. machine->comm_exec = comm_exec;
  151. }
  152. }
  153. struct machine *machines__find(struct machines *machines, pid_t pid)
  154. {
  155. struct rb_node **p = &machines->guests.rb_node;
  156. struct rb_node *parent = NULL;
  157. struct machine *machine;
  158. struct machine *default_machine = NULL;
  159. if (pid == HOST_KERNEL_ID)
  160. return &machines->host;
  161. while (*p != NULL) {
  162. parent = *p;
  163. machine = rb_entry(parent, struct machine, rb_node);
  164. if (pid < machine->pid)
  165. p = &(*p)->rb_left;
  166. else if (pid > machine->pid)
  167. p = &(*p)->rb_right;
  168. else
  169. return machine;
  170. if (!machine->pid)
  171. default_machine = machine;
  172. }
  173. return default_machine;
  174. }
  175. struct machine *machines__findnew(struct machines *machines, pid_t pid)
  176. {
  177. char path[PATH_MAX];
  178. const char *root_dir = "";
  179. struct machine *machine = machines__find(machines, pid);
  180. if (machine && (machine->pid == pid))
  181. goto out;
  182. if ((pid != HOST_KERNEL_ID) &&
  183. (pid != DEFAULT_GUEST_KERNEL_ID) &&
  184. (symbol_conf.guestmount)) {
  185. sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
  186. if (access(path, R_OK)) {
  187. static struct strlist *seen;
  188. if (!seen)
  189. seen = strlist__new(true, NULL);
  190. if (!strlist__has_entry(seen, path)) {
  191. pr_err("Can't access file %s\n", path);
  192. strlist__add(seen, path);
  193. }
  194. machine = NULL;
  195. goto out;
  196. }
  197. root_dir = path;
  198. }
  199. machine = machines__add(machines, pid, root_dir);
  200. out:
  201. return machine;
  202. }
  203. void machines__process_guests(struct machines *machines,
  204. machine__process_t process, void *data)
  205. {
  206. struct rb_node *nd;
  207. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  208. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  209. process(pos, data);
  210. }
  211. }
  212. char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
  213. {
  214. if (machine__is_host(machine))
  215. snprintf(bf, size, "[%s]", "kernel.kallsyms");
  216. else if (machine__is_default_guest(machine))
  217. snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
  218. else {
  219. snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
  220. machine->pid);
  221. }
  222. return bf;
  223. }
  224. void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
  225. {
  226. struct rb_node *node;
  227. struct machine *machine;
  228. machines->host.id_hdr_size = id_hdr_size;
  229. for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
  230. machine = rb_entry(node, struct machine, rb_node);
  231. machine->id_hdr_size = id_hdr_size;
  232. }
  233. return;
  234. }
  235. static void machine__update_thread_pid(struct machine *machine,
  236. struct thread *th, pid_t pid)
  237. {
  238. struct thread *leader;
  239. if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
  240. return;
  241. th->pid_ = pid;
  242. if (th->pid_ == th->tid)
  243. return;
  244. leader = machine__findnew_thread(machine, th->pid_, th->pid_);
  245. if (!leader)
  246. goto out_err;
  247. if (!leader->mg)
  248. leader->mg = map_groups__new(machine);
  249. if (!leader->mg)
  250. goto out_err;
  251. if (th->mg == leader->mg)
  252. return;
  253. if (th->mg) {
  254. /*
  255. * Maps are created from MMAP events which provide the pid and
  256. * tid. Consequently there never should be any maps on a thread
  257. * with an unknown pid. Just print an error if there are.
  258. */
  259. if (!map_groups__empty(th->mg))
  260. pr_err("Discarding thread maps for %d:%d\n",
  261. th->pid_, th->tid);
  262. map_groups__delete(th->mg);
  263. }
  264. th->mg = map_groups__get(leader->mg);
  265. return;
  266. out_err:
  267. pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
  268. }
  269. static struct thread *__machine__findnew_thread(struct machine *machine,
  270. pid_t pid, pid_t tid,
  271. bool create)
  272. {
  273. struct rb_node **p = &machine->threads.rb_node;
  274. struct rb_node *parent = NULL;
  275. struct thread *th;
  276. /*
  277. * Front-end cache - TID lookups come in blocks,
  278. * so most of the time we dont have to look up
  279. * the full rbtree:
  280. */
  281. th = machine->last_match;
  282. if (th != NULL) {
  283. if (th->tid == tid) {
  284. machine__update_thread_pid(machine, th, pid);
  285. return th;
  286. }
  287. thread__zput(machine->last_match);
  288. }
  289. while (*p != NULL) {
  290. parent = *p;
  291. th = rb_entry(parent, struct thread, rb_node);
  292. if (th->tid == tid) {
  293. machine->last_match = thread__get(th);
  294. machine__update_thread_pid(machine, th, pid);
  295. return th;
  296. }
  297. if (tid < th->tid)
  298. p = &(*p)->rb_left;
  299. else
  300. p = &(*p)->rb_right;
  301. }
  302. if (!create)
  303. return NULL;
  304. th = thread__new(pid, tid);
  305. if (th != NULL) {
  306. rb_link_node(&th->rb_node, parent, p);
  307. rb_insert_color(&th->rb_node, &machine->threads);
  308. /*
  309. * We have to initialize map_groups separately
  310. * after rb tree is updated.
  311. *
  312. * The reason is that we call machine__findnew_thread
  313. * within thread__init_map_groups to find the thread
  314. * leader and that would screwed the rb tree.
  315. */
  316. if (thread__init_map_groups(th, machine)) {
  317. rb_erase(&th->rb_node, &machine->threads);
  318. thread__delete(th);
  319. return NULL;
  320. }
  321. /*
  322. * It is now in the rbtree, get a ref
  323. */
  324. thread__get(th);
  325. machine->last_match = thread__get(th);
  326. }
  327. return th;
  328. }
  329. struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
  330. pid_t tid)
  331. {
  332. return __machine__findnew_thread(machine, pid, tid, true);
  333. }
  334. struct thread *machine__find_thread(struct machine *machine, pid_t pid,
  335. pid_t tid)
  336. {
  337. return __machine__findnew_thread(machine, pid, tid, false);
  338. }
  339. struct comm *machine__thread_exec_comm(struct machine *machine,
  340. struct thread *thread)
  341. {
  342. if (machine->comm_exec)
  343. return thread__exec_comm(thread);
  344. else
  345. return thread__comm(thread);
  346. }
  347. int machine__process_comm_event(struct machine *machine, union perf_event *event,
  348. struct perf_sample *sample)
  349. {
  350. struct thread *thread = machine__findnew_thread(machine,
  351. event->comm.pid,
  352. event->comm.tid);
  353. bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
  354. if (exec)
  355. machine->comm_exec = true;
  356. if (dump_trace)
  357. perf_event__fprintf_comm(event, stdout);
  358. if (thread == NULL ||
  359. __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
  360. dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
  361. return -1;
  362. }
  363. return 0;
  364. }
  365. int machine__process_lost_event(struct machine *machine __maybe_unused,
  366. union perf_event *event, struct perf_sample *sample __maybe_unused)
  367. {
  368. dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
  369. event->lost.id, event->lost.lost);
  370. return 0;
  371. }
  372. static struct dso*
  373. machine__module_dso(struct machine *machine, struct kmod_path *m,
  374. const char *filename)
  375. {
  376. struct dso *dso;
  377. dso = dsos__find(&machine->kernel_dsos, m->name, true);
  378. if (!dso) {
  379. dso = dsos__addnew(&machine->kernel_dsos, m->name);
  380. if (dso == NULL)
  381. return NULL;
  382. if (machine__is_host(machine))
  383. dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
  384. else
  385. dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
  386. /* _KMODULE_COMP should be next to _KMODULE */
  387. if (m->kmod && m->comp)
  388. dso->symtab_type++;
  389. dso__set_short_name(dso, strdup(m->name), true);
  390. dso__set_long_name(dso, strdup(filename), true);
  391. }
  392. return dso;
  393. }
  394. struct map *machine__new_module(struct machine *machine, u64 start,
  395. const char *filename)
  396. {
  397. struct map *map = NULL;
  398. struct dso *dso;
  399. struct kmod_path m;
  400. if (kmod_path__parse_name(&m, filename))
  401. return NULL;
  402. map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
  403. m.name);
  404. if (map)
  405. goto out;
  406. dso = machine__module_dso(machine, &m, filename);
  407. if (dso == NULL)
  408. goto out;
  409. map = map__new2(start, dso, MAP__FUNCTION);
  410. if (map == NULL)
  411. goto out;
  412. map_groups__insert(&machine->kmaps, map);
  413. out:
  414. free(m.name);
  415. return map;
  416. }
  417. size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
  418. {
  419. struct rb_node *nd;
  420. size_t ret = __dsos__fprintf(&machines->host.kernel_dsos.head, fp) +
  421. __dsos__fprintf(&machines->host.user_dsos.head, fp);
  422. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  423. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  424. ret += __dsos__fprintf(&pos->kernel_dsos.head, fp);
  425. ret += __dsos__fprintf(&pos->user_dsos.head, fp);
  426. }
  427. return ret;
  428. }
  429. size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
  430. bool (skip)(struct dso *dso, int parm), int parm)
  431. {
  432. return __dsos__fprintf_buildid(&m->kernel_dsos.head, fp, skip, parm) +
  433. __dsos__fprintf_buildid(&m->user_dsos.head, fp, skip, parm);
  434. }
  435. size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
  436. bool (skip)(struct dso *dso, int parm), int parm)
  437. {
  438. struct rb_node *nd;
  439. size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
  440. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  441. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  442. ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
  443. }
  444. return ret;
  445. }
  446. size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
  447. {
  448. int i;
  449. size_t printed = 0;
  450. struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
  451. if (kdso->has_build_id) {
  452. char filename[PATH_MAX];
  453. if (dso__build_id_filename(kdso, filename, sizeof(filename)))
  454. printed += fprintf(fp, "[0] %s\n", filename);
  455. }
  456. for (i = 0; i < vmlinux_path__nr_entries; ++i)
  457. printed += fprintf(fp, "[%d] %s\n",
  458. i + kdso->has_build_id, vmlinux_path[i]);
  459. return printed;
  460. }
  461. size_t machine__fprintf(struct machine *machine, FILE *fp)
  462. {
  463. size_t ret = 0;
  464. struct rb_node *nd;
  465. for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
  466. struct thread *pos = rb_entry(nd, struct thread, rb_node);
  467. ret += thread__fprintf(pos, fp);
  468. }
  469. return ret;
  470. }
  471. static struct dso *machine__get_kernel(struct machine *machine)
  472. {
  473. const char *vmlinux_name = NULL;
  474. struct dso *kernel;
  475. if (machine__is_host(machine)) {
  476. vmlinux_name = symbol_conf.vmlinux_name;
  477. if (!vmlinux_name)
  478. vmlinux_name = "[kernel.kallsyms]";
  479. kernel = dso__kernel_findnew(machine, vmlinux_name,
  480. "[kernel]",
  481. DSO_TYPE_KERNEL);
  482. } else {
  483. char bf[PATH_MAX];
  484. if (machine__is_default_guest(machine))
  485. vmlinux_name = symbol_conf.default_guest_vmlinux_name;
  486. if (!vmlinux_name)
  487. vmlinux_name = machine__mmap_name(machine, bf,
  488. sizeof(bf));
  489. kernel = dso__kernel_findnew(machine, vmlinux_name,
  490. "[guest.kernel]",
  491. DSO_TYPE_GUEST_KERNEL);
  492. }
  493. if (kernel != NULL && (!kernel->has_build_id))
  494. dso__read_running_kernel_build_id(kernel, machine);
  495. return kernel;
  496. }
  497. struct process_args {
  498. u64 start;
  499. };
  500. static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
  501. size_t bufsz)
  502. {
  503. if (machine__is_default_guest(machine))
  504. scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
  505. else
  506. scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
  507. }
  508. const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
  509. /* Figure out the start address of kernel map from /proc/kallsyms.
  510. * Returns the name of the start symbol in *symbol_name. Pass in NULL as
  511. * symbol_name if it's not that important.
  512. */
  513. static u64 machine__get_running_kernel_start(struct machine *machine,
  514. const char **symbol_name)
  515. {
  516. char filename[PATH_MAX];
  517. int i;
  518. const char *name;
  519. u64 addr = 0;
  520. machine__get_kallsyms_filename(machine, filename, PATH_MAX);
  521. if (symbol__restricted_filename(filename, "/proc/kallsyms"))
  522. return 0;
  523. for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
  524. addr = kallsyms__get_function_start(filename, name);
  525. if (addr)
  526. break;
  527. }
  528. if (symbol_name)
  529. *symbol_name = name;
  530. return addr;
  531. }
  532. int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
  533. {
  534. enum map_type type;
  535. u64 start = machine__get_running_kernel_start(machine, NULL);
  536. for (type = 0; type < MAP__NR_TYPES; ++type) {
  537. struct kmap *kmap;
  538. machine->vmlinux_maps[type] = map__new2(start, kernel, type);
  539. if (machine->vmlinux_maps[type] == NULL)
  540. return -1;
  541. machine->vmlinux_maps[type]->map_ip =
  542. machine->vmlinux_maps[type]->unmap_ip =
  543. identity__map_ip;
  544. kmap = map__kmap(machine->vmlinux_maps[type]);
  545. kmap->kmaps = &machine->kmaps;
  546. map_groups__insert(&machine->kmaps,
  547. machine->vmlinux_maps[type]);
  548. }
  549. return 0;
  550. }
  551. void machine__destroy_kernel_maps(struct machine *machine)
  552. {
  553. enum map_type type;
  554. for (type = 0; type < MAP__NR_TYPES; ++type) {
  555. struct kmap *kmap;
  556. if (machine->vmlinux_maps[type] == NULL)
  557. continue;
  558. kmap = map__kmap(machine->vmlinux_maps[type]);
  559. map_groups__remove(&machine->kmaps,
  560. machine->vmlinux_maps[type]);
  561. if (kmap->ref_reloc_sym) {
  562. /*
  563. * ref_reloc_sym is shared among all maps, so free just
  564. * on one of them.
  565. */
  566. if (type == MAP__FUNCTION) {
  567. zfree((char **)&kmap->ref_reloc_sym->name);
  568. zfree(&kmap->ref_reloc_sym);
  569. } else
  570. kmap->ref_reloc_sym = NULL;
  571. }
  572. map__delete(machine->vmlinux_maps[type]);
  573. machine->vmlinux_maps[type] = NULL;
  574. }
  575. }
  576. int machines__create_guest_kernel_maps(struct machines *machines)
  577. {
  578. int ret = 0;
  579. struct dirent **namelist = NULL;
  580. int i, items = 0;
  581. char path[PATH_MAX];
  582. pid_t pid;
  583. char *endp;
  584. if (symbol_conf.default_guest_vmlinux_name ||
  585. symbol_conf.default_guest_modules ||
  586. symbol_conf.default_guest_kallsyms) {
  587. machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
  588. }
  589. if (symbol_conf.guestmount) {
  590. items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
  591. if (items <= 0)
  592. return -ENOENT;
  593. for (i = 0; i < items; i++) {
  594. if (!isdigit(namelist[i]->d_name[0])) {
  595. /* Filter out . and .. */
  596. continue;
  597. }
  598. pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
  599. if ((*endp != '\0') ||
  600. (endp == namelist[i]->d_name) ||
  601. (errno == ERANGE)) {
  602. pr_debug("invalid directory (%s). Skipping.\n",
  603. namelist[i]->d_name);
  604. continue;
  605. }
  606. sprintf(path, "%s/%s/proc/kallsyms",
  607. symbol_conf.guestmount,
  608. namelist[i]->d_name);
  609. ret = access(path, R_OK);
  610. if (ret) {
  611. pr_debug("Can't access file %s\n", path);
  612. goto failure;
  613. }
  614. machines__create_kernel_maps(machines, pid);
  615. }
  616. failure:
  617. free(namelist);
  618. }
  619. return ret;
  620. }
  621. void machines__destroy_kernel_maps(struct machines *machines)
  622. {
  623. struct rb_node *next = rb_first(&machines->guests);
  624. machine__destroy_kernel_maps(&machines->host);
  625. while (next) {
  626. struct machine *pos = rb_entry(next, struct machine, rb_node);
  627. next = rb_next(&pos->rb_node);
  628. rb_erase(&pos->rb_node, &machines->guests);
  629. machine__delete(pos);
  630. }
  631. }
  632. int machines__create_kernel_maps(struct machines *machines, pid_t pid)
  633. {
  634. struct machine *machine = machines__findnew(machines, pid);
  635. if (machine == NULL)
  636. return -1;
  637. return machine__create_kernel_maps(machine);
  638. }
  639. int machine__load_kallsyms(struct machine *machine, const char *filename,
  640. enum map_type type, symbol_filter_t filter)
  641. {
  642. struct map *map = machine->vmlinux_maps[type];
  643. int ret = dso__load_kallsyms(map->dso, filename, map, filter);
  644. if (ret > 0) {
  645. dso__set_loaded(map->dso, type);
  646. /*
  647. * Since /proc/kallsyms will have multiple sessions for the
  648. * kernel, with modules between them, fixup the end of all
  649. * sections.
  650. */
  651. __map_groups__fixup_end(&machine->kmaps, type);
  652. }
  653. return ret;
  654. }
  655. int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
  656. symbol_filter_t filter)
  657. {
  658. struct map *map = machine->vmlinux_maps[type];
  659. int ret = dso__load_vmlinux_path(map->dso, map, filter);
  660. if (ret > 0)
  661. dso__set_loaded(map->dso, type);
  662. return ret;
  663. }
  664. static void map_groups__fixup_end(struct map_groups *mg)
  665. {
  666. int i;
  667. for (i = 0; i < MAP__NR_TYPES; ++i)
  668. __map_groups__fixup_end(mg, i);
  669. }
  670. static char *get_kernel_version(const char *root_dir)
  671. {
  672. char version[PATH_MAX];
  673. FILE *file;
  674. char *name, *tmp;
  675. const char *prefix = "Linux version ";
  676. sprintf(version, "%s/proc/version", root_dir);
  677. file = fopen(version, "r");
  678. if (!file)
  679. return NULL;
  680. version[0] = '\0';
  681. tmp = fgets(version, sizeof(version), file);
  682. fclose(file);
  683. name = strstr(version, prefix);
  684. if (!name)
  685. return NULL;
  686. name += strlen(prefix);
  687. tmp = strchr(name, ' ');
  688. if (tmp)
  689. *tmp = '\0';
  690. return strdup(name);
  691. }
  692. static bool is_kmod_dso(struct dso *dso)
  693. {
  694. return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
  695. dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
  696. }
  697. static int map_groups__set_module_path(struct map_groups *mg, const char *path,
  698. struct kmod_path *m)
  699. {
  700. struct map *map;
  701. char *long_name;
  702. map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
  703. if (map == NULL)
  704. return 0;
  705. long_name = strdup(path);
  706. if (long_name == NULL)
  707. return -ENOMEM;
  708. dso__set_long_name(map->dso, long_name, true);
  709. dso__kernel_module_get_build_id(map->dso, "");
  710. /*
  711. * Full name could reveal us kmod compression, so
  712. * we need to update the symtab_type if needed.
  713. */
  714. if (m->comp && is_kmod_dso(map->dso))
  715. map->dso->symtab_type++;
  716. return 0;
  717. }
  718. static int map_groups__set_modules_path_dir(struct map_groups *mg,
  719. const char *dir_name, int depth)
  720. {
  721. struct dirent *dent;
  722. DIR *dir = opendir(dir_name);
  723. int ret = 0;
  724. if (!dir) {
  725. pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
  726. return -1;
  727. }
  728. while ((dent = readdir(dir)) != NULL) {
  729. char path[PATH_MAX];
  730. struct stat st;
  731. /*sshfs might return bad dent->d_type, so we have to stat*/
  732. snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
  733. if (stat(path, &st))
  734. continue;
  735. if (S_ISDIR(st.st_mode)) {
  736. if (!strcmp(dent->d_name, ".") ||
  737. !strcmp(dent->d_name, ".."))
  738. continue;
  739. /* Do not follow top-level source and build symlinks */
  740. if (depth == 0) {
  741. if (!strcmp(dent->d_name, "source") ||
  742. !strcmp(dent->d_name, "build"))
  743. continue;
  744. }
  745. ret = map_groups__set_modules_path_dir(mg, path,
  746. depth + 1);
  747. if (ret < 0)
  748. goto out;
  749. } else {
  750. struct kmod_path m;
  751. ret = kmod_path__parse_name(&m, dent->d_name);
  752. if (ret)
  753. goto out;
  754. if (m.kmod)
  755. ret = map_groups__set_module_path(mg, path, &m);
  756. free(m.name);
  757. if (ret)
  758. goto out;
  759. }
  760. }
  761. out:
  762. closedir(dir);
  763. return ret;
  764. }
  765. static int machine__set_modules_path(struct machine *machine)
  766. {
  767. char *version;
  768. char modules_path[PATH_MAX];
  769. version = get_kernel_version(machine->root_dir);
  770. if (!version)
  771. return -1;
  772. snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
  773. machine->root_dir, version);
  774. free(version);
  775. return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
  776. }
  777. static int machine__create_module(void *arg, const char *name, u64 start)
  778. {
  779. struct machine *machine = arg;
  780. struct map *map;
  781. map = machine__new_module(machine, start, name);
  782. if (map == NULL)
  783. return -1;
  784. dso__kernel_module_get_build_id(map->dso, machine->root_dir);
  785. return 0;
  786. }
  787. static int machine__create_modules(struct machine *machine)
  788. {
  789. const char *modules;
  790. char path[PATH_MAX];
  791. if (machine__is_default_guest(machine)) {
  792. modules = symbol_conf.default_guest_modules;
  793. } else {
  794. snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
  795. modules = path;
  796. }
  797. if (symbol__restricted_filename(modules, "/proc/modules"))
  798. return -1;
  799. if (modules__parse(modules, machine, machine__create_module))
  800. return -1;
  801. if (!machine__set_modules_path(machine))
  802. return 0;
  803. pr_debug("Problems setting modules path maps, continuing anyway...\n");
  804. return 0;
  805. }
  806. int machine__create_kernel_maps(struct machine *machine)
  807. {
  808. struct dso *kernel = machine__get_kernel(machine);
  809. const char *name;
  810. u64 addr = machine__get_running_kernel_start(machine, &name);
  811. if (!addr)
  812. return -1;
  813. if (kernel == NULL ||
  814. __machine__create_kernel_maps(machine, kernel) < 0)
  815. return -1;
  816. if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
  817. if (machine__is_host(machine))
  818. pr_debug("Problems creating module maps, "
  819. "continuing anyway...\n");
  820. else
  821. pr_debug("Problems creating module maps for guest %d, "
  822. "continuing anyway...\n", machine->pid);
  823. }
  824. /*
  825. * Now that we have all the maps created, just set the ->end of them:
  826. */
  827. map_groups__fixup_end(&machine->kmaps);
  828. if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
  829. addr)) {
  830. machine__destroy_kernel_maps(machine);
  831. return -1;
  832. }
  833. return 0;
  834. }
  835. static void machine__set_kernel_mmap_len(struct machine *machine,
  836. union perf_event *event)
  837. {
  838. int i;
  839. for (i = 0; i < MAP__NR_TYPES; i++) {
  840. machine->vmlinux_maps[i]->start = event->mmap.start;
  841. machine->vmlinux_maps[i]->end = (event->mmap.start +
  842. event->mmap.len);
  843. /*
  844. * Be a bit paranoid here, some perf.data file came with
  845. * a zero sized synthesized MMAP event for the kernel.
  846. */
  847. if (machine->vmlinux_maps[i]->end == 0)
  848. machine->vmlinux_maps[i]->end = ~0ULL;
  849. }
  850. }
  851. static bool machine__uses_kcore(struct machine *machine)
  852. {
  853. struct dso *dso;
  854. list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
  855. if (dso__is_kcore(dso))
  856. return true;
  857. }
  858. return false;
  859. }
  860. static int machine__process_kernel_mmap_event(struct machine *machine,
  861. union perf_event *event)
  862. {
  863. struct map *map;
  864. char kmmap_prefix[PATH_MAX];
  865. enum dso_kernel_type kernel_type;
  866. bool is_kernel_mmap;
  867. /* If we have maps from kcore then we do not need or want any others */
  868. if (machine__uses_kcore(machine))
  869. return 0;
  870. machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
  871. if (machine__is_host(machine))
  872. kernel_type = DSO_TYPE_KERNEL;
  873. else
  874. kernel_type = DSO_TYPE_GUEST_KERNEL;
  875. is_kernel_mmap = memcmp(event->mmap.filename,
  876. kmmap_prefix,
  877. strlen(kmmap_prefix) - 1) == 0;
  878. if (event->mmap.filename[0] == '/' ||
  879. (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
  880. map = machine__new_module(machine, event->mmap.start,
  881. event->mmap.filename);
  882. if (map == NULL)
  883. goto out_problem;
  884. map->end = map->start + event->mmap.len;
  885. } else if (is_kernel_mmap) {
  886. const char *symbol_name = (event->mmap.filename +
  887. strlen(kmmap_prefix));
  888. /*
  889. * Should be there already, from the build-id table in
  890. * the header.
  891. */
  892. struct dso *kernel = NULL;
  893. struct dso *dso;
  894. list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
  895. if (is_kernel_module(dso->long_name))
  896. continue;
  897. kernel = dso;
  898. break;
  899. }
  900. if (kernel == NULL)
  901. kernel = __dsos__findnew(&machine->kernel_dsos,
  902. kmmap_prefix);
  903. if (kernel == NULL)
  904. goto out_problem;
  905. kernel->kernel = kernel_type;
  906. if (__machine__create_kernel_maps(machine, kernel) < 0)
  907. goto out_problem;
  908. if (strstr(kernel->long_name, "vmlinux"))
  909. dso__set_short_name(kernel, "[kernel.vmlinux]", false);
  910. machine__set_kernel_mmap_len(machine, event);
  911. /*
  912. * Avoid using a zero address (kptr_restrict) for the ref reloc
  913. * symbol. Effectively having zero here means that at record
  914. * time /proc/sys/kernel/kptr_restrict was non zero.
  915. */
  916. if (event->mmap.pgoff != 0) {
  917. maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
  918. symbol_name,
  919. event->mmap.pgoff);
  920. }
  921. if (machine__is_default_guest(machine)) {
  922. /*
  923. * preload dso of guest kernel and modules
  924. */
  925. dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
  926. NULL);
  927. }
  928. }
  929. return 0;
  930. out_problem:
  931. return -1;
  932. }
  933. int machine__process_mmap2_event(struct machine *machine,
  934. union perf_event *event,
  935. struct perf_sample *sample __maybe_unused)
  936. {
  937. u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  938. struct thread *thread;
  939. struct map *map;
  940. enum map_type type;
  941. int ret = 0;
  942. if (dump_trace)
  943. perf_event__fprintf_mmap2(event, stdout);
  944. if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  945. cpumode == PERF_RECORD_MISC_KERNEL) {
  946. ret = machine__process_kernel_mmap_event(machine, event);
  947. if (ret < 0)
  948. goto out_problem;
  949. return 0;
  950. }
  951. thread = machine__findnew_thread(machine, event->mmap2.pid,
  952. event->mmap2.tid);
  953. if (thread == NULL)
  954. goto out_problem;
  955. if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
  956. type = MAP__VARIABLE;
  957. else
  958. type = MAP__FUNCTION;
  959. map = map__new(machine, event->mmap2.start,
  960. event->mmap2.len, event->mmap2.pgoff,
  961. event->mmap2.pid, event->mmap2.maj,
  962. event->mmap2.min, event->mmap2.ino,
  963. event->mmap2.ino_generation,
  964. event->mmap2.prot,
  965. event->mmap2.flags,
  966. event->mmap2.filename, type, thread);
  967. if (map == NULL)
  968. goto out_problem;
  969. thread__insert_map(thread, map);
  970. return 0;
  971. out_problem:
  972. dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
  973. return 0;
  974. }
  975. int machine__process_mmap_event(struct machine *machine, union perf_event *event,
  976. struct perf_sample *sample __maybe_unused)
  977. {
  978. u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  979. struct thread *thread;
  980. struct map *map;
  981. enum map_type type;
  982. int ret = 0;
  983. if (dump_trace)
  984. perf_event__fprintf_mmap(event, stdout);
  985. if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  986. cpumode == PERF_RECORD_MISC_KERNEL) {
  987. ret = machine__process_kernel_mmap_event(machine, event);
  988. if (ret < 0)
  989. goto out_problem;
  990. return 0;
  991. }
  992. thread = machine__findnew_thread(machine, event->mmap.pid,
  993. event->mmap.tid);
  994. if (thread == NULL)
  995. goto out_problem;
  996. if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
  997. type = MAP__VARIABLE;
  998. else
  999. type = MAP__FUNCTION;
  1000. map = map__new(machine, event->mmap.start,
  1001. event->mmap.len, event->mmap.pgoff,
  1002. event->mmap.pid, 0, 0, 0, 0, 0, 0,
  1003. event->mmap.filename,
  1004. type, thread);
  1005. if (map == NULL)
  1006. goto out_problem;
  1007. thread__insert_map(thread, map);
  1008. return 0;
  1009. out_problem:
  1010. dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
  1011. return 0;
  1012. }
  1013. static void machine__remove_thread(struct machine *machine, struct thread *th)
  1014. {
  1015. if (machine->last_match == th)
  1016. thread__zput(machine->last_match);
  1017. rb_erase(&th->rb_node, &machine->threads);
  1018. /*
  1019. * Move it first to the dead_threads list, then drop the reference,
  1020. * if this is the last reference, then the thread__delete destructor
  1021. * will be called and we will remove it from the dead_threads list.
  1022. */
  1023. list_add_tail(&th->node, &machine->dead_threads);
  1024. thread__put(th);
  1025. }
  1026. int machine__process_fork_event(struct machine *machine, union perf_event *event,
  1027. struct perf_sample *sample)
  1028. {
  1029. struct thread *thread = machine__find_thread(machine,
  1030. event->fork.pid,
  1031. event->fork.tid);
  1032. struct thread *parent = machine__findnew_thread(machine,
  1033. event->fork.ppid,
  1034. event->fork.ptid);
  1035. /* if a thread currently exists for the thread id remove it */
  1036. if (thread != NULL)
  1037. machine__remove_thread(machine, thread);
  1038. thread = machine__findnew_thread(machine, event->fork.pid,
  1039. event->fork.tid);
  1040. if (dump_trace)
  1041. perf_event__fprintf_task(event, stdout);
  1042. if (thread == NULL || parent == NULL ||
  1043. thread__fork(thread, parent, sample->time) < 0) {
  1044. dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
  1045. return -1;
  1046. }
  1047. return 0;
  1048. }
  1049. int machine__process_exit_event(struct machine *machine, union perf_event *event,
  1050. struct perf_sample *sample __maybe_unused)
  1051. {
  1052. struct thread *thread = machine__find_thread(machine,
  1053. event->fork.pid,
  1054. event->fork.tid);
  1055. if (dump_trace)
  1056. perf_event__fprintf_task(event, stdout);
  1057. if (thread != NULL)
  1058. thread__exited(thread);
  1059. return 0;
  1060. }
  1061. int machine__process_event(struct machine *machine, union perf_event *event,
  1062. struct perf_sample *sample)
  1063. {
  1064. int ret;
  1065. switch (event->header.type) {
  1066. case PERF_RECORD_COMM:
  1067. ret = machine__process_comm_event(machine, event, sample); break;
  1068. case PERF_RECORD_MMAP:
  1069. ret = machine__process_mmap_event(machine, event, sample); break;
  1070. case PERF_RECORD_MMAP2:
  1071. ret = machine__process_mmap2_event(machine, event, sample); break;
  1072. case PERF_RECORD_FORK:
  1073. ret = machine__process_fork_event(machine, event, sample); break;
  1074. case PERF_RECORD_EXIT:
  1075. ret = machine__process_exit_event(machine, event, sample); break;
  1076. case PERF_RECORD_LOST:
  1077. ret = machine__process_lost_event(machine, event, sample); break;
  1078. default:
  1079. ret = -1;
  1080. break;
  1081. }
  1082. return ret;
  1083. }
  1084. static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
  1085. {
  1086. if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
  1087. return 1;
  1088. return 0;
  1089. }
  1090. static void ip__resolve_ams(struct thread *thread,
  1091. struct addr_map_symbol *ams,
  1092. u64 ip)
  1093. {
  1094. struct addr_location al;
  1095. memset(&al, 0, sizeof(al));
  1096. /*
  1097. * We cannot use the header.misc hint to determine whether a
  1098. * branch stack address is user, kernel, guest, hypervisor.
  1099. * Branches may straddle the kernel/user/hypervisor boundaries.
  1100. * Thus, we have to try consecutively until we find a match
  1101. * or else, the symbol is unknown
  1102. */
  1103. thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
  1104. ams->addr = ip;
  1105. ams->al_addr = al.addr;
  1106. ams->sym = al.sym;
  1107. ams->map = al.map;
  1108. }
  1109. static void ip__resolve_data(struct thread *thread,
  1110. u8 m, struct addr_map_symbol *ams, u64 addr)
  1111. {
  1112. struct addr_location al;
  1113. memset(&al, 0, sizeof(al));
  1114. thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
  1115. if (al.map == NULL) {
  1116. /*
  1117. * some shared data regions have execute bit set which puts
  1118. * their mapping in the MAP__FUNCTION type array.
  1119. * Check there as a fallback option before dropping the sample.
  1120. */
  1121. thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
  1122. }
  1123. ams->addr = addr;
  1124. ams->al_addr = al.addr;
  1125. ams->sym = al.sym;
  1126. ams->map = al.map;
  1127. }
  1128. struct mem_info *sample__resolve_mem(struct perf_sample *sample,
  1129. struct addr_location *al)
  1130. {
  1131. struct mem_info *mi = zalloc(sizeof(*mi));
  1132. if (!mi)
  1133. return NULL;
  1134. ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
  1135. ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr);
  1136. mi->data_src.val = sample->data_src;
  1137. return mi;
  1138. }
  1139. static int add_callchain_ip(struct thread *thread,
  1140. struct symbol **parent,
  1141. struct addr_location *root_al,
  1142. u8 *cpumode,
  1143. u64 ip)
  1144. {
  1145. struct addr_location al;
  1146. al.filtered = 0;
  1147. al.sym = NULL;
  1148. if (!cpumode) {
  1149. thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
  1150. ip, &al);
  1151. } else {
  1152. if (ip >= PERF_CONTEXT_MAX) {
  1153. switch (ip) {
  1154. case PERF_CONTEXT_HV:
  1155. *cpumode = PERF_RECORD_MISC_HYPERVISOR;
  1156. break;
  1157. case PERF_CONTEXT_KERNEL:
  1158. *cpumode = PERF_RECORD_MISC_KERNEL;
  1159. break;
  1160. case PERF_CONTEXT_USER:
  1161. *cpumode = PERF_RECORD_MISC_USER;
  1162. break;
  1163. default:
  1164. pr_debug("invalid callchain context: "
  1165. "%"PRId64"\n", (s64) ip);
  1166. /*
  1167. * It seems the callchain is corrupted.
  1168. * Discard all.
  1169. */
  1170. callchain_cursor_reset(&callchain_cursor);
  1171. return 1;
  1172. }
  1173. return 0;
  1174. }
  1175. thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
  1176. ip, &al);
  1177. }
  1178. if (al.sym != NULL) {
  1179. if (sort__has_parent && !*parent &&
  1180. symbol__match_regex(al.sym, &parent_regex))
  1181. *parent = al.sym;
  1182. else if (have_ignore_callees && root_al &&
  1183. symbol__match_regex(al.sym, &ignore_callees_regex)) {
  1184. /* Treat this symbol as the root,
  1185. forgetting its callees. */
  1186. *root_al = al;
  1187. callchain_cursor_reset(&callchain_cursor);
  1188. }
  1189. }
  1190. return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym);
  1191. }
  1192. struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
  1193. struct addr_location *al)
  1194. {
  1195. unsigned int i;
  1196. const struct branch_stack *bs = sample->branch_stack;
  1197. struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
  1198. if (!bi)
  1199. return NULL;
  1200. for (i = 0; i < bs->nr; i++) {
  1201. ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
  1202. ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
  1203. bi[i].flags = bs->entries[i].flags;
  1204. }
  1205. return bi;
  1206. }
  1207. #define CHASHSZ 127
  1208. #define CHASHBITS 7
  1209. #define NO_ENTRY 0xff
  1210. #define PERF_MAX_BRANCH_DEPTH 127
  1211. /* Remove loops. */
  1212. static int remove_loops(struct branch_entry *l, int nr)
  1213. {
  1214. int i, j, off;
  1215. unsigned char chash[CHASHSZ];
  1216. memset(chash, NO_ENTRY, sizeof(chash));
  1217. BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
  1218. for (i = 0; i < nr; i++) {
  1219. int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
  1220. /* no collision handling for now */
  1221. if (chash[h] == NO_ENTRY) {
  1222. chash[h] = i;
  1223. } else if (l[chash[h]].from == l[i].from) {
  1224. bool is_loop = true;
  1225. /* check if it is a real loop */
  1226. off = 0;
  1227. for (j = chash[h]; j < i && i + off < nr; j++, off++)
  1228. if (l[j].from != l[i + off].from) {
  1229. is_loop = false;
  1230. break;
  1231. }
  1232. if (is_loop) {
  1233. memmove(l + i, l + i + off,
  1234. (nr - (i + off)) * sizeof(*l));
  1235. nr -= off;
  1236. }
  1237. }
  1238. }
  1239. return nr;
  1240. }
  1241. /*
  1242. * Recolve LBR callstack chain sample
  1243. * Return:
  1244. * 1 on success get LBR callchain information
  1245. * 0 no available LBR callchain information, should try fp
  1246. * negative error code on other errors.
  1247. */
  1248. static int resolve_lbr_callchain_sample(struct thread *thread,
  1249. struct perf_sample *sample,
  1250. struct symbol **parent,
  1251. struct addr_location *root_al,
  1252. int max_stack)
  1253. {
  1254. struct ip_callchain *chain = sample->callchain;
  1255. int chain_nr = min(max_stack, (int)chain->nr);
  1256. u8 cpumode = PERF_RECORD_MISC_USER;
  1257. int i, j, err;
  1258. u64 ip;
  1259. for (i = 0; i < chain_nr; i++) {
  1260. if (chain->ips[i] == PERF_CONTEXT_USER)
  1261. break;
  1262. }
  1263. /* LBR only affects the user callchain */
  1264. if (i != chain_nr) {
  1265. struct branch_stack *lbr_stack = sample->branch_stack;
  1266. int lbr_nr = lbr_stack->nr;
  1267. /*
  1268. * LBR callstack can only get user call chain.
  1269. * The mix_chain_nr is kernel call chain
  1270. * number plus LBR user call chain number.
  1271. * i is kernel call chain number,
  1272. * 1 is PERF_CONTEXT_USER,
  1273. * lbr_nr + 1 is the user call chain number.
  1274. * For details, please refer to the comments
  1275. * in callchain__printf
  1276. */
  1277. int mix_chain_nr = i + 1 + lbr_nr + 1;
  1278. if (mix_chain_nr > PERF_MAX_STACK_DEPTH + PERF_MAX_BRANCH_DEPTH) {
  1279. pr_warning("corrupted callchain. skipping...\n");
  1280. return 0;
  1281. }
  1282. for (j = 0; j < mix_chain_nr; j++) {
  1283. if (callchain_param.order == ORDER_CALLEE) {
  1284. if (j < i + 1)
  1285. ip = chain->ips[j];
  1286. else if (j > i + 1)
  1287. ip = lbr_stack->entries[j - i - 2].from;
  1288. else
  1289. ip = lbr_stack->entries[0].to;
  1290. } else {
  1291. if (j < lbr_nr)
  1292. ip = lbr_stack->entries[lbr_nr - j - 1].from;
  1293. else if (j > lbr_nr)
  1294. ip = chain->ips[i + 1 - (j - lbr_nr)];
  1295. else
  1296. ip = lbr_stack->entries[0].to;
  1297. }
  1298. err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
  1299. if (err)
  1300. return (err < 0) ? err : 0;
  1301. }
  1302. return 1;
  1303. }
  1304. return 0;
  1305. }
  1306. static int thread__resolve_callchain_sample(struct thread *thread,
  1307. struct perf_evsel *evsel,
  1308. struct perf_sample *sample,
  1309. struct symbol **parent,
  1310. struct addr_location *root_al,
  1311. int max_stack)
  1312. {
  1313. struct branch_stack *branch = sample->branch_stack;
  1314. struct ip_callchain *chain = sample->callchain;
  1315. int chain_nr = min(max_stack, (int)chain->nr);
  1316. u8 cpumode = PERF_RECORD_MISC_USER;
  1317. int i, j, err;
  1318. int skip_idx = -1;
  1319. int first_call = 0;
  1320. callchain_cursor_reset(&callchain_cursor);
  1321. if (has_branch_callstack(evsel)) {
  1322. err = resolve_lbr_callchain_sample(thread, sample, parent,
  1323. root_al, max_stack);
  1324. if (err)
  1325. return (err < 0) ? err : 0;
  1326. }
  1327. /*
  1328. * Based on DWARF debug information, some architectures skip
  1329. * a callchain entry saved by the kernel.
  1330. */
  1331. if (chain->nr < PERF_MAX_STACK_DEPTH)
  1332. skip_idx = arch_skip_callchain_idx(thread, chain);
  1333. /*
  1334. * Add branches to call stack for easier browsing. This gives
  1335. * more context for a sample than just the callers.
  1336. *
  1337. * This uses individual histograms of paths compared to the
  1338. * aggregated histograms the normal LBR mode uses.
  1339. *
  1340. * Limitations for now:
  1341. * - No extra filters
  1342. * - No annotations (should annotate somehow)
  1343. */
  1344. if (branch && callchain_param.branch_callstack) {
  1345. int nr = min(max_stack, (int)branch->nr);
  1346. struct branch_entry be[nr];
  1347. if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
  1348. pr_warning("corrupted branch chain. skipping...\n");
  1349. goto check_calls;
  1350. }
  1351. for (i = 0; i < nr; i++) {
  1352. if (callchain_param.order == ORDER_CALLEE) {
  1353. be[i] = branch->entries[i];
  1354. /*
  1355. * Check for overlap into the callchain.
  1356. * The return address is one off compared to
  1357. * the branch entry. To adjust for this
  1358. * assume the calling instruction is not longer
  1359. * than 8 bytes.
  1360. */
  1361. if (i == skip_idx ||
  1362. chain->ips[first_call] >= PERF_CONTEXT_MAX)
  1363. first_call++;
  1364. else if (be[i].from < chain->ips[first_call] &&
  1365. be[i].from >= chain->ips[first_call] - 8)
  1366. first_call++;
  1367. } else
  1368. be[i] = branch->entries[branch->nr - i - 1];
  1369. }
  1370. nr = remove_loops(be, nr);
  1371. for (i = 0; i < nr; i++) {
  1372. err = add_callchain_ip(thread, parent, root_al,
  1373. NULL, be[i].to);
  1374. if (!err)
  1375. err = add_callchain_ip(thread, parent, root_al,
  1376. NULL, be[i].from);
  1377. if (err == -EINVAL)
  1378. break;
  1379. if (err)
  1380. return err;
  1381. }
  1382. chain_nr -= nr;
  1383. }
  1384. check_calls:
  1385. if (chain->nr > PERF_MAX_STACK_DEPTH) {
  1386. pr_warning("corrupted callchain. skipping...\n");
  1387. return 0;
  1388. }
  1389. for (i = first_call; i < chain_nr; i++) {
  1390. u64 ip;
  1391. if (callchain_param.order == ORDER_CALLEE)
  1392. j = i;
  1393. else
  1394. j = chain->nr - i - 1;
  1395. #ifdef HAVE_SKIP_CALLCHAIN_IDX
  1396. if (j == skip_idx)
  1397. continue;
  1398. #endif
  1399. ip = chain->ips[j];
  1400. err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
  1401. if (err)
  1402. return (err < 0) ? err : 0;
  1403. }
  1404. return 0;
  1405. }
  1406. static int unwind_entry(struct unwind_entry *entry, void *arg)
  1407. {
  1408. struct callchain_cursor *cursor = arg;
  1409. return callchain_cursor_append(cursor, entry->ip,
  1410. entry->map, entry->sym);
  1411. }
  1412. int thread__resolve_callchain(struct thread *thread,
  1413. struct perf_evsel *evsel,
  1414. struct perf_sample *sample,
  1415. struct symbol **parent,
  1416. struct addr_location *root_al,
  1417. int max_stack)
  1418. {
  1419. int ret = thread__resolve_callchain_sample(thread, evsel,
  1420. sample, parent,
  1421. root_al, max_stack);
  1422. if (ret)
  1423. return ret;
  1424. /* Can we do dwarf post unwind? */
  1425. if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
  1426. (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
  1427. return 0;
  1428. /* Bail out if nothing was captured. */
  1429. if ((!sample->user_regs.regs) ||
  1430. (!sample->user_stack.size))
  1431. return 0;
  1432. return unwind__get_entries(unwind_entry, &callchain_cursor,
  1433. thread, sample, max_stack);
  1434. }
  1435. int machine__for_each_thread(struct machine *machine,
  1436. int (*fn)(struct thread *thread, void *p),
  1437. void *priv)
  1438. {
  1439. struct rb_node *nd;
  1440. struct thread *thread;
  1441. int rc = 0;
  1442. for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
  1443. thread = rb_entry(nd, struct thread, rb_node);
  1444. rc = fn(thread, priv);
  1445. if (rc != 0)
  1446. return rc;
  1447. }
  1448. list_for_each_entry(thread, &machine->dead_threads, node) {
  1449. rc = fn(thread, priv);
  1450. if (rc != 0)
  1451. return rc;
  1452. }
  1453. return rc;
  1454. }
  1455. int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
  1456. struct target *target, struct thread_map *threads,
  1457. perf_event__handler_t process, bool data_mmap)
  1458. {
  1459. if (target__has_task(target))
  1460. return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
  1461. else if (target__has_cpu(target))
  1462. return perf_event__synthesize_threads(tool, process, machine, data_mmap);
  1463. /* command specified */
  1464. return 0;
  1465. }
  1466. pid_t machine__get_current_tid(struct machine *machine, int cpu)
  1467. {
  1468. if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
  1469. return -1;
  1470. return machine->current_tid[cpu];
  1471. }
  1472. int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
  1473. pid_t tid)
  1474. {
  1475. struct thread *thread;
  1476. if (cpu < 0)
  1477. return -EINVAL;
  1478. if (!machine->current_tid) {
  1479. int i;
  1480. machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
  1481. if (!machine->current_tid)
  1482. return -ENOMEM;
  1483. for (i = 0; i < MAX_NR_CPUS; i++)
  1484. machine->current_tid[i] = -1;
  1485. }
  1486. if (cpu >= MAX_NR_CPUS) {
  1487. pr_err("Requested CPU %d too large. ", cpu);
  1488. pr_err("Consider raising MAX_NR_CPUS\n");
  1489. return -EINVAL;
  1490. }
  1491. machine->current_tid[cpu] = tid;
  1492. thread = machine__findnew_thread(machine, pid, tid);
  1493. if (!thread)
  1494. return -ENOMEM;
  1495. thread->cpu = cpu;
  1496. return 0;
  1497. }
  1498. int machine__get_kernel_start(struct machine *machine)
  1499. {
  1500. struct map *map = machine__kernel_map(machine, MAP__FUNCTION);
  1501. int err = 0;
  1502. /*
  1503. * The only addresses above 2^63 are kernel addresses of a 64-bit
  1504. * kernel. Note that addresses are unsigned so that on a 32-bit system
  1505. * all addresses including kernel addresses are less than 2^32. In
  1506. * that case (32-bit system), if the kernel mapping is unknown, all
  1507. * addresses will be assumed to be in user space - see
  1508. * machine__kernel_ip().
  1509. */
  1510. machine->kernel_start = 1ULL << 63;
  1511. if (map) {
  1512. err = map__load(map, machine->symbol_filter);
  1513. if (map->start)
  1514. machine->kernel_start = map->start;
  1515. }
  1516. return err;
  1517. }