machine.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847
  1. #include "callchain.h"
  2. #include "debug.h"
  3. #include "event.h"
  4. #include "evsel.h"
  5. #include "hist.h"
  6. #include "machine.h"
  7. #include "map.h"
  8. #include "sort.h"
  9. #include "strlist.h"
  10. #include "thread.h"
  11. #include "vdso.h"
  12. #include <stdbool.h>
  13. #include <symbol/kallsyms.h>
  14. #include "unwind.h"
  15. #include "linux/hash.h"
  16. static void dsos__init(struct dsos *dsos)
  17. {
  18. INIT_LIST_HEAD(&dsos->head);
  19. dsos->root = RB_ROOT;
  20. }
  21. int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
  22. {
  23. map_groups__init(&machine->kmaps, machine);
  24. RB_CLEAR_NODE(&machine->rb_node);
  25. dsos__init(&machine->user_dsos);
  26. dsos__init(&machine->kernel_dsos);
  27. machine->threads = RB_ROOT;
  28. INIT_LIST_HEAD(&machine->dead_threads);
  29. machine->last_match = NULL;
  30. machine->vdso_info = NULL;
  31. machine->pid = pid;
  32. machine->symbol_filter = NULL;
  33. machine->id_hdr_size = 0;
  34. machine->comm_exec = false;
  35. machine->kernel_start = 0;
  36. machine->root_dir = strdup(root_dir);
  37. if (machine->root_dir == NULL)
  38. return -ENOMEM;
  39. if (pid != HOST_KERNEL_ID) {
  40. struct thread *thread = machine__findnew_thread(machine, -1,
  41. pid);
  42. char comm[64];
  43. if (thread == NULL)
  44. return -ENOMEM;
  45. snprintf(comm, sizeof(comm), "[guest/%d]", pid);
  46. thread__set_comm(thread, comm, 0);
  47. }
  48. machine->current_tid = NULL;
  49. return 0;
  50. }
  51. struct machine *machine__new_host(void)
  52. {
  53. struct machine *machine = malloc(sizeof(*machine));
  54. if (machine != NULL) {
  55. machine__init(machine, "", HOST_KERNEL_ID);
  56. if (machine__create_kernel_maps(machine) < 0)
  57. goto out_delete;
  58. }
  59. return machine;
  60. out_delete:
  61. free(machine);
  62. return NULL;
  63. }
  64. static void dsos__delete(struct dsos *dsos)
  65. {
  66. struct dso *pos, *n;
  67. list_for_each_entry_safe(pos, n, &dsos->head, node) {
  68. RB_CLEAR_NODE(&pos->rb_node);
  69. list_del(&pos->node);
  70. dso__delete(pos);
  71. }
  72. }
  73. void machine__delete_threads(struct machine *machine)
  74. {
  75. struct rb_node *nd = rb_first(&machine->threads);
  76. while (nd) {
  77. struct thread *t = rb_entry(nd, struct thread, rb_node);
  78. nd = rb_next(nd);
  79. machine__remove_thread(machine, t);
  80. }
  81. }
  82. void machine__exit(struct machine *machine)
  83. {
  84. map_groups__exit(&machine->kmaps);
  85. dsos__delete(&machine->user_dsos);
  86. dsos__delete(&machine->kernel_dsos);
  87. vdso__exit(machine);
  88. zfree(&machine->root_dir);
  89. zfree(&machine->current_tid);
  90. }
  91. void machine__delete(struct machine *machine)
  92. {
  93. machine__exit(machine);
  94. free(machine);
  95. }
  96. void machines__init(struct machines *machines)
  97. {
  98. machine__init(&machines->host, "", HOST_KERNEL_ID);
  99. machines->guests = RB_ROOT;
  100. machines->symbol_filter = NULL;
  101. }
  102. void machines__exit(struct machines *machines)
  103. {
  104. machine__exit(&machines->host);
  105. /* XXX exit guest */
  106. }
  107. struct machine *machines__add(struct machines *machines, pid_t pid,
  108. const char *root_dir)
  109. {
  110. struct rb_node **p = &machines->guests.rb_node;
  111. struct rb_node *parent = NULL;
  112. struct machine *pos, *machine = malloc(sizeof(*machine));
  113. if (machine == NULL)
  114. return NULL;
  115. if (machine__init(machine, root_dir, pid) != 0) {
  116. free(machine);
  117. return NULL;
  118. }
  119. machine->symbol_filter = machines->symbol_filter;
  120. while (*p != NULL) {
  121. parent = *p;
  122. pos = rb_entry(parent, struct machine, rb_node);
  123. if (pid < pos->pid)
  124. p = &(*p)->rb_left;
  125. else
  126. p = &(*p)->rb_right;
  127. }
  128. rb_link_node(&machine->rb_node, parent, p);
  129. rb_insert_color(&machine->rb_node, &machines->guests);
  130. return machine;
  131. }
  132. void machines__set_symbol_filter(struct machines *machines,
  133. symbol_filter_t symbol_filter)
  134. {
  135. struct rb_node *nd;
  136. machines->symbol_filter = symbol_filter;
  137. machines->host.symbol_filter = symbol_filter;
  138. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  139. struct machine *machine = rb_entry(nd, struct machine, rb_node);
  140. machine->symbol_filter = symbol_filter;
  141. }
  142. }
  143. void machines__set_comm_exec(struct machines *machines, bool comm_exec)
  144. {
  145. struct rb_node *nd;
  146. machines->host.comm_exec = comm_exec;
  147. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  148. struct machine *machine = rb_entry(nd, struct machine, rb_node);
  149. machine->comm_exec = comm_exec;
  150. }
  151. }
  152. struct machine *machines__find(struct machines *machines, pid_t pid)
  153. {
  154. struct rb_node **p = &machines->guests.rb_node;
  155. struct rb_node *parent = NULL;
  156. struct machine *machine;
  157. struct machine *default_machine = NULL;
  158. if (pid == HOST_KERNEL_ID)
  159. return &machines->host;
  160. while (*p != NULL) {
  161. parent = *p;
  162. machine = rb_entry(parent, struct machine, rb_node);
  163. if (pid < machine->pid)
  164. p = &(*p)->rb_left;
  165. else if (pid > machine->pid)
  166. p = &(*p)->rb_right;
  167. else
  168. return machine;
  169. if (!machine->pid)
  170. default_machine = machine;
  171. }
  172. return default_machine;
  173. }
  174. struct machine *machines__findnew(struct machines *machines, pid_t pid)
  175. {
  176. char path[PATH_MAX];
  177. const char *root_dir = "";
  178. struct machine *machine = machines__find(machines, pid);
  179. if (machine && (machine->pid == pid))
  180. goto out;
  181. if ((pid != HOST_KERNEL_ID) &&
  182. (pid != DEFAULT_GUEST_KERNEL_ID) &&
  183. (symbol_conf.guestmount)) {
  184. sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
  185. if (access(path, R_OK)) {
  186. static struct strlist *seen;
  187. if (!seen)
  188. seen = strlist__new(true, NULL);
  189. if (!strlist__has_entry(seen, path)) {
  190. pr_err("Can't access file %s\n", path);
  191. strlist__add(seen, path);
  192. }
  193. machine = NULL;
  194. goto out;
  195. }
  196. root_dir = path;
  197. }
  198. machine = machines__add(machines, pid, root_dir);
  199. out:
  200. return machine;
  201. }
  202. void machines__process_guests(struct machines *machines,
  203. machine__process_t process, void *data)
  204. {
  205. struct rb_node *nd;
  206. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  207. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  208. process(pos, data);
  209. }
  210. }
  211. char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
  212. {
  213. if (machine__is_host(machine))
  214. snprintf(bf, size, "[%s]", "kernel.kallsyms");
  215. else if (machine__is_default_guest(machine))
  216. snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
  217. else {
  218. snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
  219. machine->pid);
  220. }
  221. return bf;
  222. }
  223. void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
  224. {
  225. struct rb_node *node;
  226. struct machine *machine;
  227. machines->host.id_hdr_size = id_hdr_size;
  228. for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
  229. machine = rb_entry(node, struct machine, rb_node);
  230. machine->id_hdr_size = id_hdr_size;
  231. }
  232. return;
  233. }
  234. static void machine__update_thread_pid(struct machine *machine,
  235. struct thread *th, pid_t pid)
  236. {
  237. struct thread *leader;
  238. if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
  239. return;
  240. th->pid_ = pid;
  241. if (th->pid_ == th->tid)
  242. return;
  243. leader = machine__findnew_thread(machine, th->pid_, th->pid_);
  244. if (!leader)
  245. goto out_err;
  246. if (!leader->mg)
  247. leader->mg = map_groups__new(machine);
  248. if (!leader->mg)
  249. goto out_err;
  250. if (th->mg == leader->mg)
  251. return;
  252. if (th->mg) {
  253. /*
  254. * Maps are created from MMAP events which provide the pid and
  255. * tid. Consequently there never should be any maps on a thread
  256. * with an unknown pid. Just print an error if there are.
  257. */
  258. if (!map_groups__empty(th->mg))
  259. pr_err("Discarding thread maps for %d:%d\n",
  260. th->pid_, th->tid);
  261. map_groups__delete(th->mg);
  262. }
  263. th->mg = map_groups__get(leader->mg);
  264. return;
  265. out_err:
  266. pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
  267. }
  268. static struct thread *__machine__findnew_thread(struct machine *machine,
  269. pid_t pid, pid_t tid,
  270. bool create)
  271. {
  272. struct rb_node **p = &machine->threads.rb_node;
  273. struct rb_node *parent = NULL;
  274. struct thread *th;
  275. /*
  276. * Front-end cache - TID lookups come in blocks,
  277. * so most of the time we dont have to look up
  278. * the full rbtree:
  279. */
  280. th = machine->last_match;
  281. if (th != NULL) {
  282. if (th->tid == tid) {
  283. machine__update_thread_pid(machine, th, pid);
  284. return th;
  285. }
  286. thread__zput(machine->last_match);
  287. }
  288. while (*p != NULL) {
  289. parent = *p;
  290. th = rb_entry(parent, struct thread, rb_node);
  291. if (th->tid == tid) {
  292. machine->last_match = thread__get(th);
  293. machine__update_thread_pid(machine, th, pid);
  294. return th;
  295. }
  296. if (tid < th->tid)
  297. p = &(*p)->rb_left;
  298. else
  299. p = &(*p)->rb_right;
  300. }
  301. if (!create)
  302. return NULL;
  303. th = thread__new(pid, tid);
  304. if (th != NULL) {
  305. rb_link_node(&th->rb_node, parent, p);
  306. rb_insert_color(&th->rb_node, &machine->threads);
  307. /*
  308. * We have to initialize map_groups separately
  309. * after rb tree is updated.
  310. *
  311. * The reason is that we call machine__findnew_thread
  312. * within thread__init_map_groups to find the thread
  313. * leader and that would screwed the rb tree.
  314. */
  315. if (thread__init_map_groups(th, machine)) {
  316. rb_erase(&th->rb_node, &machine->threads);
  317. thread__delete(th);
  318. return NULL;
  319. }
  320. /*
  321. * It is now in the rbtree, get a ref
  322. */
  323. thread__get(th);
  324. machine->last_match = thread__get(th);
  325. }
  326. return th;
  327. }
  328. struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
  329. pid_t tid)
  330. {
  331. return __machine__findnew_thread(machine, pid, tid, true);
  332. }
  333. struct thread *machine__find_thread(struct machine *machine, pid_t pid,
  334. pid_t tid)
  335. {
  336. return __machine__findnew_thread(machine, pid, tid, false);
  337. }
  338. struct comm *machine__thread_exec_comm(struct machine *machine,
  339. struct thread *thread)
  340. {
  341. if (machine->comm_exec)
  342. return thread__exec_comm(thread);
  343. else
  344. return thread__comm(thread);
  345. }
  346. int machine__process_comm_event(struct machine *machine, union perf_event *event,
  347. struct perf_sample *sample)
  348. {
  349. struct thread *thread = machine__findnew_thread(machine,
  350. event->comm.pid,
  351. event->comm.tid);
  352. bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
  353. if (exec)
  354. machine->comm_exec = true;
  355. if (dump_trace)
  356. perf_event__fprintf_comm(event, stdout);
  357. if (thread == NULL ||
  358. __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
  359. dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
  360. return -1;
  361. }
  362. return 0;
  363. }
  364. int machine__process_lost_event(struct machine *machine __maybe_unused,
  365. union perf_event *event, struct perf_sample *sample __maybe_unused)
  366. {
  367. dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
  368. event->lost.id, event->lost.lost);
  369. return 0;
  370. }
  371. static struct dso*
  372. machine__module_dso(struct machine *machine, struct kmod_path *m,
  373. const char *filename)
  374. {
  375. struct dso *dso;
  376. dso = dsos__find(&machine->kernel_dsos, m->name, true);
  377. if (!dso) {
  378. dso = dsos__addnew(&machine->kernel_dsos, m->name);
  379. if (dso == NULL)
  380. return NULL;
  381. if (machine__is_host(machine))
  382. dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
  383. else
  384. dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
  385. /* _KMODULE_COMP should be next to _KMODULE */
  386. if (m->kmod && m->comp)
  387. dso->symtab_type++;
  388. dso__set_short_name(dso, strdup(m->name), true);
  389. dso__set_long_name(dso, strdup(filename), true);
  390. }
  391. return dso;
  392. }
  393. struct map *machine__new_module(struct machine *machine, u64 start,
  394. const char *filename)
  395. {
  396. struct map *map = NULL;
  397. struct dso *dso;
  398. struct kmod_path m;
  399. if (kmod_path__parse_name(&m, filename))
  400. return NULL;
  401. map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
  402. m.name);
  403. if (map)
  404. goto out;
  405. dso = machine__module_dso(machine, &m, filename);
  406. if (dso == NULL)
  407. goto out;
  408. map = map__new2(start, dso, MAP__FUNCTION);
  409. if (map == NULL)
  410. goto out;
  411. map_groups__insert(&machine->kmaps, map);
  412. out:
  413. free(m.name);
  414. return map;
  415. }
  416. size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
  417. {
  418. struct rb_node *nd;
  419. size_t ret = __dsos__fprintf(&machines->host.kernel_dsos.head, fp) +
  420. __dsos__fprintf(&machines->host.user_dsos.head, fp);
  421. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  422. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  423. ret += __dsos__fprintf(&pos->kernel_dsos.head, fp);
  424. ret += __dsos__fprintf(&pos->user_dsos.head, fp);
  425. }
  426. return ret;
  427. }
  428. size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
  429. bool (skip)(struct dso *dso, int parm), int parm)
  430. {
  431. return __dsos__fprintf_buildid(&m->kernel_dsos.head, fp, skip, parm) +
  432. __dsos__fprintf_buildid(&m->user_dsos.head, fp, skip, parm);
  433. }
  434. size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
  435. bool (skip)(struct dso *dso, int parm), int parm)
  436. {
  437. struct rb_node *nd;
  438. size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
  439. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  440. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  441. ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
  442. }
  443. return ret;
  444. }
  445. size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
  446. {
  447. int i;
  448. size_t printed = 0;
  449. struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
  450. if (kdso->has_build_id) {
  451. char filename[PATH_MAX];
  452. if (dso__build_id_filename(kdso, filename, sizeof(filename)))
  453. printed += fprintf(fp, "[0] %s\n", filename);
  454. }
  455. for (i = 0; i < vmlinux_path__nr_entries; ++i)
  456. printed += fprintf(fp, "[%d] %s\n",
  457. i + kdso->has_build_id, vmlinux_path[i]);
  458. return printed;
  459. }
  460. size_t machine__fprintf(struct machine *machine, FILE *fp)
  461. {
  462. size_t ret = 0;
  463. struct rb_node *nd;
  464. for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
  465. struct thread *pos = rb_entry(nd, struct thread, rb_node);
  466. ret += thread__fprintf(pos, fp);
  467. }
  468. return ret;
  469. }
  470. static struct dso *machine__get_kernel(struct machine *machine)
  471. {
  472. const char *vmlinux_name = NULL;
  473. struct dso *kernel;
  474. if (machine__is_host(machine)) {
  475. vmlinux_name = symbol_conf.vmlinux_name;
  476. if (!vmlinux_name)
  477. vmlinux_name = "[kernel.kallsyms]";
  478. kernel = dso__kernel_findnew(machine, vmlinux_name,
  479. "[kernel]",
  480. DSO_TYPE_KERNEL);
  481. } else {
  482. char bf[PATH_MAX];
  483. if (machine__is_default_guest(machine))
  484. vmlinux_name = symbol_conf.default_guest_vmlinux_name;
  485. if (!vmlinux_name)
  486. vmlinux_name = machine__mmap_name(machine, bf,
  487. sizeof(bf));
  488. kernel = dso__kernel_findnew(machine, vmlinux_name,
  489. "[guest.kernel]",
  490. DSO_TYPE_GUEST_KERNEL);
  491. }
  492. if (kernel != NULL && (!kernel->has_build_id))
  493. dso__read_running_kernel_build_id(kernel, machine);
  494. return kernel;
  495. }
  496. struct process_args {
  497. u64 start;
  498. };
  499. static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
  500. size_t bufsz)
  501. {
  502. if (machine__is_default_guest(machine))
  503. scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
  504. else
  505. scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
  506. }
  507. const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
  508. /* Figure out the start address of kernel map from /proc/kallsyms.
  509. * Returns the name of the start symbol in *symbol_name. Pass in NULL as
  510. * symbol_name if it's not that important.
  511. */
  512. static u64 machine__get_running_kernel_start(struct machine *machine,
  513. const char **symbol_name)
  514. {
  515. char filename[PATH_MAX];
  516. int i;
  517. const char *name;
  518. u64 addr = 0;
  519. machine__get_kallsyms_filename(machine, filename, PATH_MAX);
  520. if (symbol__restricted_filename(filename, "/proc/kallsyms"))
  521. return 0;
  522. for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
  523. addr = kallsyms__get_function_start(filename, name);
  524. if (addr)
  525. break;
  526. }
  527. if (symbol_name)
  528. *symbol_name = name;
  529. return addr;
  530. }
  531. int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
  532. {
  533. enum map_type type;
  534. u64 start = machine__get_running_kernel_start(machine, NULL);
  535. for (type = 0; type < MAP__NR_TYPES; ++type) {
  536. struct kmap *kmap;
  537. machine->vmlinux_maps[type] = map__new2(start, kernel, type);
  538. if (machine->vmlinux_maps[type] == NULL)
  539. return -1;
  540. machine->vmlinux_maps[type]->map_ip =
  541. machine->vmlinux_maps[type]->unmap_ip =
  542. identity__map_ip;
  543. kmap = map__kmap(machine->vmlinux_maps[type]);
  544. if (!kmap)
  545. return -1;
  546. kmap->kmaps = &machine->kmaps;
  547. map_groups__insert(&machine->kmaps,
  548. machine->vmlinux_maps[type]);
  549. }
  550. return 0;
  551. }
  552. void machine__destroy_kernel_maps(struct machine *machine)
  553. {
  554. enum map_type type;
  555. for (type = 0; type < MAP__NR_TYPES; ++type) {
  556. struct kmap *kmap;
  557. if (machine->vmlinux_maps[type] == NULL)
  558. continue;
  559. kmap = map__kmap(machine->vmlinux_maps[type]);
  560. map_groups__remove(&machine->kmaps,
  561. machine->vmlinux_maps[type]);
  562. if (kmap && kmap->ref_reloc_sym) {
  563. /*
  564. * ref_reloc_sym is shared among all maps, so free just
  565. * on one of them.
  566. */
  567. if (type == MAP__FUNCTION) {
  568. zfree((char **)&kmap->ref_reloc_sym->name);
  569. zfree(&kmap->ref_reloc_sym);
  570. } else
  571. kmap->ref_reloc_sym = NULL;
  572. }
  573. map__delete(machine->vmlinux_maps[type]);
  574. machine->vmlinux_maps[type] = NULL;
  575. }
  576. }
  577. int machines__create_guest_kernel_maps(struct machines *machines)
  578. {
  579. int ret = 0;
  580. struct dirent **namelist = NULL;
  581. int i, items = 0;
  582. char path[PATH_MAX];
  583. pid_t pid;
  584. char *endp;
  585. if (symbol_conf.default_guest_vmlinux_name ||
  586. symbol_conf.default_guest_modules ||
  587. symbol_conf.default_guest_kallsyms) {
  588. machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
  589. }
  590. if (symbol_conf.guestmount) {
  591. items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
  592. if (items <= 0)
  593. return -ENOENT;
  594. for (i = 0; i < items; i++) {
  595. if (!isdigit(namelist[i]->d_name[0])) {
  596. /* Filter out . and .. */
  597. continue;
  598. }
  599. pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
  600. if ((*endp != '\0') ||
  601. (endp == namelist[i]->d_name) ||
  602. (errno == ERANGE)) {
  603. pr_debug("invalid directory (%s). Skipping.\n",
  604. namelist[i]->d_name);
  605. continue;
  606. }
  607. sprintf(path, "%s/%s/proc/kallsyms",
  608. symbol_conf.guestmount,
  609. namelist[i]->d_name);
  610. ret = access(path, R_OK);
  611. if (ret) {
  612. pr_debug("Can't access file %s\n", path);
  613. goto failure;
  614. }
  615. machines__create_kernel_maps(machines, pid);
  616. }
  617. failure:
  618. free(namelist);
  619. }
  620. return ret;
  621. }
  622. void machines__destroy_kernel_maps(struct machines *machines)
  623. {
  624. struct rb_node *next = rb_first(&machines->guests);
  625. machine__destroy_kernel_maps(&machines->host);
  626. while (next) {
  627. struct machine *pos = rb_entry(next, struct machine, rb_node);
  628. next = rb_next(&pos->rb_node);
  629. rb_erase(&pos->rb_node, &machines->guests);
  630. machine__delete(pos);
  631. }
  632. }
  633. int machines__create_kernel_maps(struct machines *machines, pid_t pid)
  634. {
  635. struct machine *machine = machines__findnew(machines, pid);
  636. if (machine == NULL)
  637. return -1;
  638. return machine__create_kernel_maps(machine);
  639. }
  640. int machine__load_kallsyms(struct machine *machine, const char *filename,
  641. enum map_type type, symbol_filter_t filter)
  642. {
  643. struct map *map = machine->vmlinux_maps[type];
  644. int ret = dso__load_kallsyms(map->dso, filename, map, filter);
  645. if (ret > 0) {
  646. dso__set_loaded(map->dso, type);
  647. /*
  648. * Since /proc/kallsyms will have multiple sessions for the
  649. * kernel, with modules between them, fixup the end of all
  650. * sections.
  651. */
  652. __map_groups__fixup_end(&machine->kmaps, type);
  653. }
  654. return ret;
  655. }
  656. int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
  657. symbol_filter_t filter)
  658. {
  659. struct map *map = machine->vmlinux_maps[type];
  660. int ret = dso__load_vmlinux_path(map->dso, map, filter);
  661. if (ret > 0)
  662. dso__set_loaded(map->dso, type);
  663. return ret;
  664. }
  665. static void map_groups__fixup_end(struct map_groups *mg)
  666. {
  667. int i;
  668. for (i = 0; i < MAP__NR_TYPES; ++i)
  669. __map_groups__fixup_end(mg, i);
  670. }
  671. static char *get_kernel_version(const char *root_dir)
  672. {
  673. char version[PATH_MAX];
  674. FILE *file;
  675. char *name, *tmp;
  676. const char *prefix = "Linux version ";
  677. sprintf(version, "%s/proc/version", root_dir);
  678. file = fopen(version, "r");
  679. if (!file)
  680. return NULL;
  681. version[0] = '\0';
  682. tmp = fgets(version, sizeof(version), file);
  683. fclose(file);
  684. name = strstr(version, prefix);
  685. if (!name)
  686. return NULL;
  687. name += strlen(prefix);
  688. tmp = strchr(name, ' ');
  689. if (tmp)
  690. *tmp = '\0';
  691. return strdup(name);
  692. }
  693. static bool is_kmod_dso(struct dso *dso)
  694. {
  695. return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
  696. dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
  697. }
  698. static int map_groups__set_module_path(struct map_groups *mg, const char *path,
  699. struct kmod_path *m)
  700. {
  701. struct map *map;
  702. char *long_name;
  703. map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
  704. if (map == NULL)
  705. return 0;
  706. long_name = strdup(path);
  707. if (long_name == NULL)
  708. return -ENOMEM;
  709. dso__set_long_name(map->dso, long_name, true);
  710. dso__kernel_module_get_build_id(map->dso, "");
  711. /*
  712. * Full name could reveal us kmod compression, so
  713. * we need to update the symtab_type if needed.
  714. */
  715. if (m->comp && is_kmod_dso(map->dso))
  716. map->dso->symtab_type++;
  717. return 0;
  718. }
  719. static int map_groups__set_modules_path_dir(struct map_groups *mg,
  720. const char *dir_name, int depth)
  721. {
  722. struct dirent *dent;
  723. DIR *dir = opendir(dir_name);
  724. int ret = 0;
  725. if (!dir) {
  726. pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
  727. return -1;
  728. }
  729. while ((dent = readdir(dir)) != NULL) {
  730. char path[PATH_MAX];
  731. struct stat st;
  732. /*sshfs might return bad dent->d_type, so we have to stat*/
  733. snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
  734. if (stat(path, &st))
  735. continue;
  736. if (S_ISDIR(st.st_mode)) {
  737. if (!strcmp(dent->d_name, ".") ||
  738. !strcmp(dent->d_name, ".."))
  739. continue;
  740. /* Do not follow top-level source and build symlinks */
  741. if (depth == 0) {
  742. if (!strcmp(dent->d_name, "source") ||
  743. !strcmp(dent->d_name, "build"))
  744. continue;
  745. }
  746. ret = map_groups__set_modules_path_dir(mg, path,
  747. depth + 1);
  748. if (ret < 0)
  749. goto out;
  750. } else {
  751. struct kmod_path m;
  752. ret = kmod_path__parse_name(&m, dent->d_name);
  753. if (ret)
  754. goto out;
  755. if (m.kmod)
  756. ret = map_groups__set_module_path(mg, path, &m);
  757. free(m.name);
  758. if (ret)
  759. goto out;
  760. }
  761. }
  762. out:
  763. closedir(dir);
  764. return ret;
  765. }
  766. static int machine__set_modules_path(struct machine *machine)
  767. {
  768. char *version;
  769. char modules_path[PATH_MAX];
  770. version = get_kernel_version(machine->root_dir);
  771. if (!version)
  772. return -1;
  773. snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
  774. machine->root_dir, version);
  775. free(version);
  776. return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
  777. }
  778. static int machine__create_module(void *arg, const char *name, u64 start)
  779. {
  780. struct machine *machine = arg;
  781. struct map *map;
  782. map = machine__new_module(machine, start, name);
  783. if (map == NULL)
  784. return -1;
  785. dso__kernel_module_get_build_id(map->dso, machine->root_dir);
  786. return 0;
  787. }
  788. static int machine__create_modules(struct machine *machine)
  789. {
  790. const char *modules;
  791. char path[PATH_MAX];
  792. if (machine__is_default_guest(machine)) {
  793. modules = symbol_conf.default_guest_modules;
  794. } else {
  795. snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
  796. modules = path;
  797. }
  798. if (symbol__restricted_filename(modules, "/proc/modules"))
  799. return -1;
  800. if (modules__parse(modules, machine, machine__create_module))
  801. return -1;
  802. if (!machine__set_modules_path(machine))
  803. return 0;
  804. pr_debug("Problems setting modules path maps, continuing anyway...\n");
  805. return 0;
  806. }
  807. int machine__create_kernel_maps(struct machine *machine)
  808. {
  809. struct dso *kernel = machine__get_kernel(machine);
  810. const char *name;
  811. u64 addr = machine__get_running_kernel_start(machine, &name);
  812. if (!addr)
  813. return -1;
  814. if (kernel == NULL ||
  815. __machine__create_kernel_maps(machine, kernel) < 0)
  816. return -1;
  817. if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
  818. if (machine__is_host(machine))
  819. pr_debug("Problems creating module maps, "
  820. "continuing anyway...\n");
  821. else
  822. pr_debug("Problems creating module maps for guest %d, "
  823. "continuing anyway...\n", machine->pid);
  824. }
  825. /*
  826. * Now that we have all the maps created, just set the ->end of them:
  827. */
  828. map_groups__fixup_end(&machine->kmaps);
  829. if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
  830. addr)) {
  831. machine__destroy_kernel_maps(machine);
  832. return -1;
  833. }
  834. return 0;
  835. }
  836. static void machine__set_kernel_mmap_len(struct machine *machine,
  837. union perf_event *event)
  838. {
  839. int i;
  840. for (i = 0; i < MAP__NR_TYPES; i++) {
  841. machine->vmlinux_maps[i]->start = event->mmap.start;
  842. machine->vmlinux_maps[i]->end = (event->mmap.start +
  843. event->mmap.len);
  844. /*
  845. * Be a bit paranoid here, some perf.data file came with
  846. * a zero sized synthesized MMAP event for the kernel.
  847. */
  848. if (machine->vmlinux_maps[i]->end == 0)
  849. machine->vmlinux_maps[i]->end = ~0ULL;
  850. }
  851. }
  852. static bool machine__uses_kcore(struct machine *machine)
  853. {
  854. struct dso *dso;
  855. list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
  856. if (dso__is_kcore(dso))
  857. return true;
  858. }
  859. return false;
  860. }
  861. static int machine__process_kernel_mmap_event(struct machine *machine,
  862. union perf_event *event)
  863. {
  864. struct map *map;
  865. char kmmap_prefix[PATH_MAX];
  866. enum dso_kernel_type kernel_type;
  867. bool is_kernel_mmap;
  868. /* If we have maps from kcore then we do not need or want any others */
  869. if (machine__uses_kcore(machine))
  870. return 0;
  871. machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
  872. if (machine__is_host(machine))
  873. kernel_type = DSO_TYPE_KERNEL;
  874. else
  875. kernel_type = DSO_TYPE_GUEST_KERNEL;
  876. is_kernel_mmap = memcmp(event->mmap.filename,
  877. kmmap_prefix,
  878. strlen(kmmap_prefix) - 1) == 0;
  879. if (event->mmap.filename[0] == '/' ||
  880. (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
  881. map = machine__new_module(machine, event->mmap.start,
  882. event->mmap.filename);
  883. if (map == NULL)
  884. goto out_problem;
  885. map->end = map->start + event->mmap.len;
  886. } else if (is_kernel_mmap) {
  887. const char *symbol_name = (event->mmap.filename +
  888. strlen(kmmap_prefix));
  889. /*
  890. * Should be there already, from the build-id table in
  891. * the header.
  892. */
  893. struct dso *kernel = NULL;
  894. struct dso *dso;
  895. list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
  896. if (is_kernel_module(dso->long_name))
  897. continue;
  898. kernel = dso;
  899. break;
  900. }
  901. if (kernel == NULL)
  902. kernel = __dsos__findnew(&machine->kernel_dsos,
  903. kmmap_prefix);
  904. if (kernel == NULL)
  905. goto out_problem;
  906. kernel->kernel = kernel_type;
  907. if (__machine__create_kernel_maps(machine, kernel) < 0)
  908. goto out_problem;
  909. if (strstr(kernel->long_name, "vmlinux"))
  910. dso__set_short_name(kernel, "[kernel.vmlinux]", false);
  911. machine__set_kernel_mmap_len(machine, event);
  912. /*
  913. * Avoid using a zero address (kptr_restrict) for the ref reloc
  914. * symbol. Effectively having zero here means that at record
  915. * time /proc/sys/kernel/kptr_restrict was non zero.
  916. */
  917. if (event->mmap.pgoff != 0) {
  918. maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
  919. symbol_name,
  920. event->mmap.pgoff);
  921. }
  922. if (machine__is_default_guest(machine)) {
  923. /*
  924. * preload dso of guest kernel and modules
  925. */
  926. dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
  927. NULL);
  928. }
  929. }
  930. return 0;
  931. out_problem:
  932. return -1;
  933. }
  934. int machine__process_mmap2_event(struct machine *machine,
  935. union perf_event *event,
  936. struct perf_sample *sample __maybe_unused)
  937. {
  938. u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  939. struct thread *thread;
  940. struct map *map;
  941. enum map_type type;
  942. int ret = 0;
  943. if (dump_trace)
  944. perf_event__fprintf_mmap2(event, stdout);
  945. if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  946. cpumode == PERF_RECORD_MISC_KERNEL) {
  947. ret = machine__process_kernel_mmap_event(machine, event);
  948. if (ret < 0)
  949. goto out_problem;
  950. return 0;
  951. }
  952. thread = machine__findnew_thread(machine, event->mmap2.pid,
  953. event->mmap2.tid);
  954. if (thread == NULL)
  955. goto out_problem;
  956. if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
  957. type = MAP__VARIABLE;
  958. else
  959. type = MAP__FUNCTION;
  960. map = map__new(machine, event->mmap2.start,
  961. event->mmap2.len, event->mmap2.pgoff,
  962. event->mmap2.pid, event->mmap2.maj,
  963. event->mmap2.min, event->mmap2.ino,
  964. event->mmap2.ino_generation,
  965. event->mmap2.prot,
  966. event->mmap2.flags,
  967. event->mmap2.filename, type, thread);
  968. if (map == NULL)
  969. goto out_problem;
  970. thread__insert_map(thread, map);
  971. return 0;
  972. out_problem:
  973. dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
  974. return 0;
  975. }
  976. int machine__process_mmap_event(struct machine *machine, union perf_event *event,
  977. struct perf_sample *sample __maybe_unused)
  978. {
  979. u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  980. struct thread *thread;
  981. struct map *map;
  982. enum map_type type;
  983. int ret = 0;
  984. if (dump_trace)
  985. perf_event__fprintf_mmap(event, stdout);
  986. if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  987. cpumode == PERF_RECORD_MISC_KERNEL) {
  988. ret = machine__process_kernel_mmap_event(machine, event);
  989. if (ret < 0)
  990. goto out_problem;
  991. return 0;
  992. }
  993. thread = machine__findnew_thread(machine, event->mmap.pid,
  994. event->mmap.tid);
  995. if (thread == NULL)
  996. goto out_problem;
  997. if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
  998. type = MAP__VARIABLE;
  999. else
  1000. type = MAP__FUNCTION;
  1001. map = map__new(machine, event->mmap.start,
  1002. event->mmap.len, event->mmap.pgoff,
  1003. event->mmap.pid, 0, 0, 0, 0, 0, 0,
  1004. event->mmap.filename,
  1005. type, thread);
  1006. if (map == NULL)
  1007. goto out_problem;
  1008. thread__insert_map(thread, map);
  1009. return 0;
  1010. out_problem:
  1011. dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
  1012. return 0;
  1013. }
  1014. void machine__remove_thread(struct machine *machine, struct thread *th)
  1015. {
  1016. if (machine->last_match == th)
  1017. thread__zput(machine->last_match);
  1018. rb_erase(&th->rb_node, &machine->threads);
  1019. /*
  1020. * Move it first to the dead_threads list, then drop the reference,
  1021. * if this is the last reference, then the thread__delete destructor
  1022. * will be called and we will remove it from the dead_threads list.
  1023. */
  1024. list_add_tail(&th->node, &machine->dead_threads);
  1025. thread__put(th);
  1026. }
  1027. int machine__process_fork_event(struct machine *machine, union perf_event *event,
  1028. struct perf_sample *sample)
  1029. {
  1030. struct thread *thread = machine__find_thread(machine,
  1031. event->fork.pid,
  1032. event->fork.tid);
  1033. struct thread *parent = machine__findnew_thread(machine,
  1034. event->fork.ppid,
  1035. event->fork.ptid);
  1036. /* if a thread currently exists for the thread id remove it */
  1037. if (thread != NULL)
  1038. machine__remove_thread(machine, thread);
  1039. thread = machine__findnew_thread(machine, event->fork.pid,
  1040. event->fork.tid);
  1041. if (dump_trace)
  1042. perf_event__fprintf_task(event, stdout);
  1043. if (thread == NULL || parent == NULL ||
  1044. thread__fork(thread, parent, sample->time) < 0) {
  1045. dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
  1046. return -1;
  1047. }
  1048. return 0;
  1049. }
  1050. int machine__process_exit_event(struct machine *machine, union perf_event *event,
  1051. struct perf_sample *sample __maybe_unused)
  1052. {
  1053. struct thread *thread = machine__find_thread(machine,
  1054. event->fork.pid,
  1055. event->fork.tid);
  1056. if (dump_trace)
  1057. perf_event__fprintf_task(event, stdout);
  1058. if (thread != NULL)
  1059. thread__exited(thread);
  1060. return 0;
  1061. }
  1062. int machine__process_event(struct machine *machine, union perf_event *event,
  1063. struct perf_sample *sample)
  1064. {
  1065. int ret;
  1066. switch (event->header.type) {
  1067. case PERF_RECORD_COMM:
  1068. ret = machine__process_comm_event(machine, event, sample); break;
  1069. case PERF_RECORD_MMAP:
  1070. ret = machine__process_mmap_event(machine, event, sample); break;
  1071. case PERF_RECORD_MMAP2:
  1072. ret = machine__process_mmap2_event(machine, event, sample); break;
  1073. case PERF_RECORD_FORK:
  1074. ret = machine__process_fork_event(machine, event, sample); break;
  1075. case PERF_RECORD_EXIT:
  1076. ret = machine__process_exit_event(machine, event, sample); break;
  1077. case PERF_RECORD_LOST:
  1078. ret = machine__process_lost_event(machine, event, sample); break;
  1079. default:
  1080. ret = -1;
  1081. break;
  1082. }
  1083. return ret;
  1084. }
  1085. static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
  1086. {
  1087. if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
  1088. return 1;
  1089. return 0;
  1090. }
  1091. static void ip__resolve_ams(struct thread *thread,
  1092. struct addr_map_symbol *ams,
  1093. u64 ip)
  1094. {
  1095. struct addr_location al;
  1096. memset(&al, 0, sizeof(al));
  1097. /*
  1098. * We cannot use the header.misc hint to determine whether a
  1099. * branch stack address is user, kernel, guest, hypervisor.
  1100. * Branches may straddle the kernel/user/hypervisor boundaries.
  1101. * Thus, we have to try consecutively until we find a match
  1102. * or else, the symbol is unknown
  1103. */
  1104. thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
  1105. ams->addr = ip;
  1106. ams->al_addr = al.addr;
  1107. ams->sym = al.sym;
  1108. ams->map = al.map;
  1109. }
  1110. static void ip__resolve_data(struct thread *thread,
  1111. u8 m, struct addr_map_symbol *ams, u64 addr)
  1112. {
  1113. struct addr_location al;
  1114. memset(&al, 0, sizeof(al));
  1115. thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
  1116. if (al.map == NULL) {
  1117. /*
  1118. * some shared data regions have execute bit set which puts
  1119. * their mapping in the MAP__FUNCTION type array.
  1120. * Check there as a fallback option before dropping the sample.
  1121. */
  1122. thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
  1123. }
  1124. ams->addr = addr;
  1125. ams->al_addr = al.addr;
  1126. ams->sym = al.sym;
  1127. ams->map = al.map;
  1128. }
  1129. struct mem_info *sample__resolve_mem(struct perf_sample *sample,
  1130. struct addr_location *al)
  1131. {
  1132. struct mem_info *mi = zalloc(sizeof(*mi));
  1133. if (!mi)
  1134. return NULL;
  1135. ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
  1136. ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr);
  1137. mi->data_src.val = sample->data_src;
  1138. return mi;
  1139. }
  1140. static int add_callchain_ip(struct thread *thread,
  1141. struct symbol **parent,
  1142. struct addr_location *root_al,
  1143. u8 *cpumode,
  1144. u64 ip)
  1145. {
  1146. struct addr_location al;
  1147. al.filtered = 0;
  1148. al.sym = NULL;
  1149. if (!cpumode) {
  1150. thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
  1151. ip, &al);
  1152. } else {
  1153. if (ip >= PERF_CONTEXT_MAX) {
  1154. switch (ip) {
  1155. case PERF_CONTEXT_HV:
  1156. *cpumode = PERF_RECORD_MISC_HYPERVISOR;
  1157. break;
  1158. case PERF_CONTEXT_KERNEL:
  1159. *cpumode = PERF_RECORD_MISC_KERNEL;
  1160. break;
  1161. case PERF_CONTEXT_USER:
  1162. *cpumode = PERF_RECORD_MISC_USER;
  1163. break;
  1164. default:
  1165. pr_debug("invalid callchain context: "
  1166. "%"PRId64"\n", (s64) ip);
  1167. /*
  1168. * It seems the callchain is corrupted.
  1169. * Discard all.
  1170. */
  1171. callchain_cursor_reset(&callchain_cursor);
  1172. return 1;
  1173. }
  1174. return 0;
  1175. }
  1176. thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
  1177. ip, &al);
  1178. }
  1179. if (al.sym != NULL) {
  1180. if (sort__has_parent && !*parent &&
  1181. symbol__match_regex(al.sym, &parent_regex))
  1182. *parent = al.sym;
  1183. else if (have_ignore_callees && root_al &&
  1184. symbol__match_regex(al.sym, &ignore_callees_regex)) {
  1185. /* Treat this symbol as the root,
  1186. forgetting its callees. */
  1187. *root_al = al;
  1188. callchain_cursor_reset(&callchain_cursor);
  1189. }
  1190. }
  1191. return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym);
  1192. }
  1193. struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
  1194. struct addr_location *al)
  1195. {
  1196. unsigned int i;
  1197. const struct branch_stack *bs = sample->branch_stack;
  1198. struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
  1199. if (!bi)
  1200. return NULL;
  1201. for (i = 0; i < bs->nr; i++) {
  1202. ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
  1203. ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
  1204. bi[i].flags = bs->entries[i].flags;
  1205. }
  1206. return bi;
  1207. }
  1208. #define CHASHSZ 127
  1209. #define CHASHBITS 7
  1210. #define NO_ENTRY 0xff
  1211. #define PERF_MAX_BRANCH_DEPTH 127
  1212. /* Remove loops. */
  1213. static int remove_loops(struct branch_entry *l, int nr)
  1214. {
  1215. int i, j, off;
  1216. unsigned char chash[CHASHSZ];
  1217. memset(chash, NO_ENTRY, sizeof(chash));
  1218. BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
  1219. for (i = 0; i < nr; i++) {
  1220. int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
  1221. /* no collision handling for now */
  1222. if (chash[h] == NO_ENTRY) {
  1223. chash[h] = i;
  1224. } else if (l[chash[h]].from == l[i].from) {
  1225. bool is_loop = true;
  1226. /* check if it is a real loop */
  1227. off = 0;
  1228. for (j = chash[h]; j < i && i + off < nr; j++, off++)
  1229. if (l[j].from != l[i + off].from) {
  1230. is_loop = false;
  1231. break;
  1232. }
  1233. if (is_loop) {
  1234. memmove(l + i, l + i + off,
  1235. (nr - (i + off)) * sizeof(*l));
  1236. nr -= off;
  1237. }
  1238. }
  1239. }
  1240. return nr;
  1241. }
  1242. /*
  1243. * Recolve LBR callstack chain sample
  1244. * Return:
  1245. * 1 on success get LBR callchain information
  1246. * 0 no available LBR callchain information, should try fp
  1247. * negative error code on other errors.
  1248. */
  1249. static int resolve_lbr_callchain_sample(struct thread *thread,
  1250. struct perf_sample *sample,
  1251. struct symbol **parent,
  1252. struct addr_location *root_al,
  1253. int max_stack)
  1254. {
  1255. struct ip_callchain *chain = sample->callchain;
  1256. int chain_nr = min(max_stack, (int)chain->nr);
  1257. u8 cpumode = PERF_RECORD_MISC_USER;
  1258. int i, j, err;
  1259. u64 ip;
  1260. for (i = 0; i < chain_nr; i++) {
  1261. if (chain->ips[i] == PERF_CONTEXT_USER)
  1262. break;
  1263. }
  1264. /* LBR only affects the user callchain */
  1265. if (i != chain_nr) {
  1266. struct branch_stack *lbr_stack = sample->branch_stack;
  1267. int lbr_nr = lbr_stack->nr;
  1268. /*
  1269. * LBR callstack can only get user call chain.
  1270. * The mix_chain_nr is kernel call chain
  1271. * number plus LBR user call chain number.
  1272. * i is kernel call chain number,
  1273. * 1 is PERF_CONTEXT_USER,
  1274. * lbr_nr + 1 is the user call chain number.
  1275. * For details, please refer to the comments
  1276. * in callchain__printf
  1277. */
  1278. int mix_chain_nr = i + 1 + lbr_nr + 1;
  1279. if (mix_chain_nr > PERF_MAX_STACK_DEPTH + PERF_MAX_BRANCH_DEPTH) {
  1280. pr_warning("corrupted callchain. skipping...\n");
  1281. return 0;
  1282. }
  1283. for (j = 0; j < mix_chain_nr; j++) {
  1284. if (callchain_param.order == ORDER_CALLEE) {
  1285. if (j < i + 1)
  1286. ip = chain->ips[j];
  1287. else if (j > i + 1)
  1288. ip = lbr_stack->entries[j - i - 2].from;
  1289. else
  1290. ip = lbr_stack->entries[0].to;
  1291. } else {
  1292. if (j < lbr_nr)
  1293. ip = lbr_stack->entries[lbr_nr - j - 1].from;
  1294. else if (j > lbr_nr)
  1295. ip = chain->ips[i + 1 - (j - lbr_nr)];
  1296. else
  1297. ip = lbr_stack->entries[0].to;
  1298. }
  1299. err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
  1300. if (err)
  1301. return (err < 0) ? err : 0;
  1302. }
  1303. return 1;
  1304. }
  1305. return 0;
  1306. }
  1307. static int thread__resolve_callchain_sample(struct thread *thread,
  1308. struct perf_evsel *evsel,
  1309. struct perf_sample *sample,
  1310. struct symbol **parent,
  1311. struct addr_location *root_al,
  1312. int max_stack)
  1313. {
  1314. struct branch_stack *branch = sample->branch_stack;
  1315. struct ip_callchain *chain = sample->callchain;
  1316. int chain_nr = min(max_stack, (int)chain->nr);
  1317. u8 cpumode = PERF_RECORD_MISC_USER;
  1318. int i, j, err;
  1319. int skip_idx = -1;
  1320. int first_call = 0;
  1321. callchain_cursor_reset(&callchain_cursor);
  1322. if (has_branch_callstack(evsel)) {
  1323. err = resolve_lbr_callchain_sample(thread, sample, parent,
  1324. root_al, max_stack);
  1325. if (err)
  1326. return (err < 0) ? err : 0;
  1327. }
  1328. /*
  1329. * Based on DWARF debug information, some architectures skip
  1330. * a callchain entry saved by the kernel.
  1331. */
  1332. if (chain->nr < PERF_MAX_STACK_DEPTH)
  1333. skip_idx = arch_skip_callchain_idx(thread, chain);
  1334. /*
  1335. * Add branches to call stack for easier browsing. This gives
  1336. * more context for a sample than just the callers.
  1337. *
  1338. * This uses individual histograms of paths compared to the
  1339. * aggregated histograms the normal LBR mode uses.
  1340. *
  1341. * Limitations for now:
  1342. * - No extra filters
  1343. * - No annotations (should annotate somehow)
  1344. */
  1345. if (branch && callchain_param.branch_callstack) {
  1346. int nr = min(max_stack, (int)branch->nr);
  1347. struct branch_entry be[nr];
  1348. if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
  1349. pr_warning("corrupted branch chain. skipping...\n");
  1350. goto check_calls;
  1351. }
  1352. for (i = 0; i < nr; i++) {
  1353. if (callchain_param.order == ORDER_CALLEE) {
  1354. be[i] = branch->entries[i];
  1355. /*
  1356. * Check for overlap into the callchain.
  1357. * The return address is one off compared to
  1358. * the branch entry. To adjust for this
  1359. * assume the calling instruction is not longer
  1360. * than 8 bytes.
  1361. */
  1362. if (i == skip_idx ||
  1363. chain->ips[first_call] >= PERF_CONTEXT_MAX)
  1364. first_call++;
  1365. else if (be[i].from < chain->ips[first_call] &&
  1366. be[i].from >= chain->ips[first_call] - 8)
  1367. first_call++;
  1368. } else
  1369. be[i] = branch->entries[branch->nr - i - 1];
  1370. }
  1371. nr = remove_loops(be, nr);
  1372. for (i = 0; i < nr; i++) {
  1373. err = add_callchain_ip(thread, parent, root_al,
  1374. NULL, be[i].to);
  1375. if (!err)
  1376. err = add_callchain_ip(thread, parent, root_al,
  1377. NULL, be[i].from);
  1378. if (err == -EINVAL)
  1379. break;
  1380. if (err)
  1381. return err;
  1382. }
  1383. chain_nr -= nr;
  1384. }
  1385. check_calls:
  1386. if (chain->nr > PERF_MAX_STACK_DEPTH) {
  1387. pr_warning("corrupted callchain. skipping...\n");
  1388. return 0;
  1389. }
  1390. for (i = first_call; i < chain_nr; i++) {
  1391. u64 ip;
  1392. if (callchain_param.order == ORDER_CALLEE)
  1393. j = i;
  1394. else
  1395. j = chain->nr - i - 1;
  1396. #ifdef HAVE_SKIP_CALLCHAIN_IDX
  1397. if (j == skip_idx)
  1398. continue;
  1399. #endif
  1400. ip = chain->ips[j];
  1401. err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
  1402. if (err)
  1403. return (err < 0) ? err : 0;
  1404. }
  1405. return 0;
  1406. }
  1407. static int unwind_entry(struct unwind_entry *entry, void *arg)
  1408. {
  1409. struct callchain_cursor *cursor = arg;
  1410. return callchain_cursor_append(cursor, entry->ip,
  1411. entry->map, entry->sym);
  1412. }
  1413. int thread__resolve_callchain(struct thread *thread,
  1414. struct perf_evsel *evsel,
  1415. struct perf_sample *sample,
  1416. struct symbol **parent,
  1417. struct addr_location *root_al,
  1418. int max_stack)
  1419. {
  1420. int ret = thread__resolve_callchain_sample(thread, evsel,
  1421. sample, parent,
  1422. root_al, max_stack);
  1423. if (ret)
  1424. return ret;
  1425. /* Can we do dwarf post unwind? */
  1426. if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
  1427. (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
  1428. return 0;
  1429. /* Bail out if nothing was captured. */
  1430. if ((!sample->user_regs.regs) ||
  1431. (!sample->user_stack.size))
  1432. return 0;
  1433. return unwind__get_entries(unwind_entry, &callchain_cursor,
  1434. thread, sample, max_stack);
  1435. }
  1436. int machine__for_each_thread(struct machine *machine,
  1437. int (*fn)(struct thread *thread, void *p),
  1438. void *priv)
  1439. {
  1440. struct rb_node *nd;
  1441. struct thread *thread;
  1442. int rc = 0;
  1443. for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
  1444. thread = rb_entry(nd, struct thread, rb_node);
  1445. rc = fn(thread, priv);
  1446. if (rc != 0)
  1447. return rc;
  1448. }
  1449. list_for_each_entry(thread, &machine->dead_threads, node) {
  1450. rc = fn(thread, priv);
  1451. if (rc != 0)
  1452. return rc;
  1453. }
  1454. return rc;
  1455. }
  1456. int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
  1457. struct target *target, struct thread_map *threads,
  1458. perf_event__handler_t process, bool data_mmap)
  1459. {
  1460. if (target__has_task(target))
  1461. return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
  1462. else if (target__has_cpu(target))
  1463. return perf_event__synthesize_threads(tool, process, machine, data_mmap);
  1464. /* command specified */
  1465. return 0;
  1466. }
  1467. pid_t machine__get_current_tid(struct machine *machine, int cpu)
  1468. {
  1469. if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
  1470. return -1;
  1471. return machine->current_tid[cpu];
  1472. }
  1473. int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
  1474. pid_t tid)
  1475. {
  1476. struct thread *thread;
  1477. if (cpu < 0)
  1478. return -EINVAL;
  1479. if (!machine->current_tid) {
  1480. int i;
  1481. machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
  1482. if (!machine->current_tid)
  1483. return -ENOMEM;
  1484. for (i = 0; i < MAX_NR_CPUS; i++)
  1485. machine->current_tid[i] = -1;
  1486. }
  1487. if (cpu >= MAX_NR_CPUS) {
  1488. pr_err("Requested CPU %d too large. ", cpu);
  1489. pr_err("Consider raising MAX_NR_CPUS\n");
  1490. return -EINVAL;
  1491. }
  1492. machine->current_tid[cpu] = tid;
  1493. thread = machine__findnew_thread(machine, pid, tid);
  1494. if (!thread)
  1495. return -ENOMEM;
  1496. thread->cpu = cpu;
  1497. return 0;
  1498. }
  1499. int machine__get_kernel_start(struct machine *machine)
  1500. {
  1501. struct map *map = machine__kernel_map(machine, MAP__FUNCTION);
  1502. int err = 0;
  1503. /*
  1504. * The only addresses above 2^63 are kernel addresses of a 64-bit
  1505. * kernel. Note that addresses are unsigned so that on a 32-bit system
  1506. * all addresses including kernel addresses are less than 2^32. In
  1507. * that case (32-bit system), if the kernel mapping is unknown, all
  1508. * addresses will be assumed to be in user space - see
  1509. * machine__kernel_ip().
  1510. */
  1511. machine->kernel_start = 1ULL << 63;
  1512. if (map) {
  1513. err = map__load(map, machine->symbol_filter);
  1514. if (map->start)
  1515. machine->kernel_start = map->start;
  1516. }
  1517. return err;
  1518. }