machine.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <dirent.h>
  3. #include <errno.h>
  4. #include <inttypes.h>
  5. #include <regex.h>
  6. #include "callchain.h"
  7. #include "debug.h"
  8. #include "event.h"
  9. #include "evsel.h"
  10. #include "hist.h"
  11. #include "machine.h"
  12. #include "map.h"
  13. #include "sort.h"
  14. #include "strlist.h"
  15. #include "thread.h"
  16. #include "vdso.h"
  17. #include <stdbool.h>
  18. #include <sys/types.h>
  19. #include <sys/stat.h>
  20. #include <unistd.h>
  21. #include "unwind.h"
  22. #include "linux/hash.h"
  23. #include "asm/bug.h"
  24. #include "sane_ctype.h"
  25. #include <symbol/kallsyms.h>
  26. static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
  27. static void dsos__init(struct dsos *dsos)
  28. {
  29. INIT_LIST_HEAD(&dsos->head);
  30. dsos->root = RB_ROOT;
  31. init_rwsem(&dsos->lock);
  32. }
  33. static void machine__threads_init(struct machine *machine)
  34. {
  35. int i;
  36. for (i = 0; i < THREADS__TABLE_SIZE; i++) {
  37. struct threads *threads = &machine->threads[i];
  38. threads->entries = RB_ROOT;
  39. init_rwsem(&threads->lock);
  40. threads->nr = 0;
  41. INIT_LIST_HEAD(&threads->dead);
  42. threads->last_match = NULL;
  43. }
  44. }
  45. static int machine__set_mmap_name(struct machine *machine)
  46. {
  47. if (machine__is_host(machine))
  48. machine->mmap_name = strdup("[kernel.kallsyms]");
  49. else if (machine__is_default_guest(machine))
  50. machine->mmap_name = strdup("[guest.kernel.kallsyms]");
  51. else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
  52. machine->pid) < 0)
  53. machine->mmap_name = NULL;
  54. return machine->mmap_name ? 0 : -ENOMEM;
  55. }
  56. int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
  57. {
  58. int err = -ENOMEM;
  59. memset(machine, 0, sizeof(*machine));
  60. map_groups__init(&machine->kmaps, machine);
  61. RB_CLEAR_NODE(&machine->rb_node);
  62. dsos__init(&machine->dsos);
  63. machine__threads_init(machine);
  64. machine->vdso_info = NULL;
  65. machine->env = NULL;
  66. machine->pid = pid;
  67. machine->id_hdr_size = 0;
  68. machine->kptr_restrict_warned = false;
  69. machine->comm_exec = false;
  70. machine->kernel_start = 0;
  71. memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps));
  72. machine->root_dir = strdup(root_dir);
  73. if (machine->root_dir == NULL)
  74. return -ENOMEM;
  75. if (machine__set_mmap_name(machine))
  76. goto out;
  77. if (pid != HOST_KERNEL_ID) {
  78. struct thread *thread = machine__findnew_thread(machine, -1,
  79. pid);
  80. char comm[64];
  81. if (thread == NULL)
  82. goto out;
  83. snprintf(comm, sizeof(comm), "[guest/%d]", pid);
  84. thread__set_comm(thread, comm, 0);
  85. thread__put(thread);
  86. }
  87. machine->current_tid = NULL;
  88. err = 0;
  89. out:
  90. if (err) {
  91. zfree(&machine->root_dir);
  92. zfree(&machine->mmap_name);
  93. }
  94. return 0;
  95. }
  96. struct machine *machine__new_host(void)
  97. {
  98. struct machine *machine = malloc(sizeof(*machine));
  99. if (machine != NULL) {
  100. machine__init(machine, "", HOST_KERNEL_ID);
  101. if (machine__create_kernel_maps(machine) < 0)
  102. goto out_delete;
  103. }
  104. return machine;
  105. out_delete:
  106. free(machine);
  107. return NULL;
  108. }
  109. struct machine *machine__new_kallsyms(void)
  110. {
  111. struct machine *machine = machine__new_host();
  112. /*
  113. * FIXME:
  114. * 1) We should switch to machine__load_kallsyms(), i.e. not explicitely
  115. * ask for not using the kcore parsing code, once this one is fixed
  116. * to create a map per module.
  117. */
  118. if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
  119. machine__delete(machine);
  120. machine = NULL;
  121. }
  122. return machine;
  123. }
  124. static void dsos__purge(struct dsos *dsos)
  125. {
  126. struct dso *pos, *n;
  127. down_write(&dsos->lock);
  128. list_for_each_entry_safe(pos, n, &dsos->head, node) {
  129. RB_CLEAR_NODE(&pos->rb_node);
  130. pos->root = NULL;
  131. list_del_init(&pos->node);
  132. dso__put(pos);
  133. }
  134. up_write(&dsos->lock);
  135. }
  136. static void dsos__exit(struct dsos *dsos)
  137. {
  138. dsos__purge(dsos);
  139. exit_rwsem(&dsos->lock);
  140. }
  141. void machine__delete_threads(struct machine *machine)
  142. {
  143. struct rb_node *nd;
  144. int i;
  145. for (i = 0; i < THREADS__TABLE_SIZE; i++) {
  146. struct threads *threads = &machine->threads[i];
  147. down_write(&threads->lock);
  148. nd = rb_first(&threads->entries);
  149. while (nd) {
  150. struct thread *t = rb_entry(nd, struct thread, rb_node);
  151. nd = rb_next(nd);
  152. __machine__remove_thread(machine, t, false);
  153. }
  154. up_write(&threads->lock);
  155. }
  156. }
  157. void machine__exit(struct machine *machine)
  158. {
  159. int i;
  160. if (machine == NULL)
  161. return;
  162. machine__destroy_kernel_maps(machine);
  163. map_groups__exit(&machine->kmaps);
  164. dsos__exit(&machine->dsos);
  165. machine__exit_vdso(machine);
  166. zfree(&machine->root_dir);
  167. zfree(&machine->mmap_name);
  168. zfree(&machine->current_tid);
  169. for (i = 0; i < THREADS__TABLE_SIZE; i++) {
  170. struct threads *threads = &machine->threads[i];
  171. exit_rwsem(&threads->lock);
  172. }
  173. }
  174. void machine__delete(struct machine *machine)
  175. {
  176. if (machine) {
  177. machine__exit(machine);
  178. free(machine);
  179. }
  180. }
  181. void machines__init(struct machines *machines)
  182. {
  183. machine__init(&machines->host, "", HOST_KERNEL_ID);
  184. machines->guests = RB_ROOT;
  185. }
  186. void machines__exit(struct machines *machines)
  187. {
  188. machine__exit(&machines->host);
  189. /* XXX exit guest */
  190. }
  191. struct machine *machines__add(struct machines *machines, pid_t pid,
  192. const char *root_dir)
  193. {
  194. struct rb_node **p = &machines->guests.rb_node;
  195. struct rb_node *parent = NULL;
  196. struct machine *pos, *machine = malloc(sizeof(*machine));
  197. if (machine == NULL)
  198. return NULL;
  199. if (machine__init(machine, root_dir, pid) != 0) {
  200. free(machine);
  201. return NULL;
  202. }
  203. while (*p != NULL) {
  204. parent = *p;
  205. pos = rb_entry(parent, struct machine, rb_node);
  206. if (pid < pos->pid)
  207. p = &(*p)->rb_left;
  208. else
  209. p = &(*p)->rb_right;
  210. }
  211. rb_link_node(&machine->rb_node, parent, p);
  212. rb_insert_color(&machine->rb_node, &machines->guests);
  213. return machine;
  214. }
  215. void machines__set_comm_exec(struct machines *machines, bool comm_exec)
  216. {
  217. struct rb_node *nd;
  218. machines->host.comm_exec = comm_exec;
  219. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  220. struct machine *machine = rb_entry(nd, struct machine, rb_node);
  221. machine->comm_exec = comm_exec;
  222. }
  223. }
  224. struct machine *machines__find(struct machines *machines, pid_t pid)
  225. {
  226. struct rb_node **p = &machines->guests.rb_node;
  227. struct rb_node *parent = NULL;
  228. struct machine *machine;
  229. struct machine *default_machine = NULL;
  230. if (pid == HOST_KERNEL_ID)
  231. return &machines->host;
  232. while (*p != NULL) {
  233. parent = *p;
  234. machine = rb_entry(parent, struct machine, rb_node);
  235. if (pid < machine->pid)
  236. p = &(*p)->rb_left;
  237. else if (pid > machine->pid)
  238. p = &(*p)->rb_right;
  239. else
  240. return machine;
  241. if (!machine->pid)
  242. default_machine = machine;
  243. }
  244. return default_machine;
  245. }
  246. struct machine *machines__findnew(struct machines *machines, pid_t pid)
  247. {
  248. char path[PATH_MAX];
  249. const char *root_dir = "";
  250. struct machine *machine = machines__find(machines, pid);
  251. if (machine && (machine->pid == pid))
  252. goto out;
  253. if ((pid != HOST_KERNEL_ID) &&
  254. (pid != DEFAULT_GUEST_KERNEL_ID) &&
  255. (symbol_conf.guestmount)) {
  256. sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
  257. if (access(path, R_OK)) {
  258. static struct strlist *seen;
  259. if (!seen)
  260. seen = strlist__new(NULL, NULL);
  261. if (!strlist__has_entry(seen, path)) {
  262. pr_err("Can't access file %s\n", path);
  263. strlist__add(seen, path);
  264. }
  265. machine = NULL;
  266. goto out;
  267. }
  268. root_dir = path;
  269. }
  270. machine = machines__add(machines, pid, root_dir);
  271. out:
  272. return machine;
  273. }
  274. void machines__process_guests(struct machines *machines,
  275. machine__process_t process, void *data)
  276. {
  277. struct rb_node *nd;
  278. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  279. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  280. process(pos, data);
  281. }
  282. }
  283. void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
  284. {
  285. struct rb_node *node;
  286. struct machine *machine;
  287. machines->host.id_hdr_size = id_hdr_size;
  288. for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
  289. machine = rb_entry(node, struct machine, rb_node);
  290. machine->id_hdr_size = id_hdr_size;
  291. }
  292. return;
  293. }
  294. static void machine__update_thread_pid(struct machine *machine,
  295. struct thread *th, pid_t pid)
  296. {
  297. struct thread *leader;
  298. if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
  299. return;
  300. th->pid_ = pid;
  301. if (th->pid_ == th->tid)
  302. return;
  303. leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
  304. if (!leader)
  305. goto out_err;
  306. if (!leader->mg)
  307. leader->mg = map_groups__new(machine);
  308. if (!leader->mg)
  309. goto out_err;
  310. if (th->mg == leader->mg)
  311. return;
  312. if (th->mg) {
  313. /*
  314. * Maps are created from MMAP events which provide the pid and
  315. * tid. Consequently there never should be any maps on a thread
  316. * with an unknown pid. Just print an error if there are.
  317. */
  318. if (!map_groups__empty(th->mg))
  319. pr_err("Discarding thread maps for %d:%d\n",
  320. th->pid_, th->tid);
  321. map_groups__put(th->mg);
  322. }
  323. th->mg = map_groups__get(leader->mg);
  324. out_put:
  325. thread__put(leader);
  326. return;
  327. out_err:
  328. pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
  329. goto out_put;
  330. }
  331. /*
  332. * Caller must eventually drop thread->refcnt returned with a successful
  333. * lookup/new thread inserted.
  334. */
  335. static struct thread *____machine__findnew_thread(struct machine *machine,
  336. struct threads *threads,
  337. pid_t pid, pid_t tid,
  338. bool create)
  339. {
  340. struct rb_node **p = &threads->entries.rb_node;
  341. struct rb_node *parent = NULL;
  342. struct thread *th;
  343. /*
  344. * Front-end cache - TID lookups come in blocks,
  345. * so most of the time we dont have to look up
  346. * the full rbtree:
  347. */
  348. th = threads->last_match;
  349. if (th != NULL) {
  350. if (th->tid == tid) {
  351. machine__update_thread_pid(machine, th, pid);
  352. return thread__get(th);
  353. }
  354. threads->last_match = NULL;
  355. }
  356. while (*p != NULL) {
  357. parent = *p;
  358. th = rb_entry(parent, struct thread, rb_node);
  359. if (th->tid == tid) {
  360. threads->last_match = th;
  361. machine__update_thread_pid(machine, th, pid);
  362. return thread__get(th);
  363. }
  364. if (tid < th->tid)
  365. p = &(*p)->rb_left;
  366. else
  367. p = &(*p)->rb_right;
  368. }
  369. if (!create)
  370. return NULL;
  371. th = thread__new(pid, tid);
  372. if (th != NULL) {
  373. rb_link_node(&th->rb_node, parent, p);
  374. rb_insert_color(&th->rb_node, &threads->entries);
  375. /*
  376. * We have to initialize map_groups separately
  377. * after rb tree is updated.
  378. *
  379. * The reason is that we call machine__findnew_thread
  380. * within thread__init_map_groups to find the thread
  381. * leader and that would screwed the rb tree.
  382. */
  383. if (thread__init_map_groups(th, machine)) {
  384. rb_erase_init(&th->rb_node, &threads->entries);
  385. RB_CLEAR_NODE(&th->rb_node);
  386. thread__put(th);
  387. return NULL;
  388. }
  389. /*
  390. * It is now in the rbtree, get a ref
  391. */
  392. thread__get(th);
  393. threads->last_match = th;
  394. ++threads->nr;
  395. }
  396. return th;
  397. }
  398. struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
  399. {
  400. return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
  401. }
  402. struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
  403. pid_t tid)
  404. {
  405. struct threads *threads = machine__threads(machine, tid);
  406. struct thread *th;
  407. down_write(&threads->lock);
  408. th = __machine__findnew_thread(machine, pid, tid);
  409. up_write(&threads->lock);
  410. return th;
  411. }
  412. struct thread *machine__find_thread(struct machine *machine, pid_t pid,
  413. pid_t tid)
  414. {
  415. struct threads *threads = machine__threads(machine, tid);
  416. struct thread *th;
  417. down_read(&threads->lock);
  418. th = ____machine__findnew_thread(machine, threads, pid, tid, false);
  419. up_read(&threads->lock);
  420. return th;
  421. }
  422. struct comm *machine__thread_exec_comm(struct machine *machine,
  423. struct thread *thread)
  424. {
  425. if (machine->comm_exec)
  426. return thread__exec_comm(thread);
  427. else
  428. return thread__comm(thread);
  429. }
  430. int machine__process_comm_event(struct machine *machine, union perf_event *event,
  431. struct perf_sample *sample)
  432. {
  433. struct thread *thread = machine__findnew_thread(machine,
  434. event->comm.pid,
  435. event->comm.tid);
  436. bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
  437. int err = 0;
  438. if (exec)
  439. machine->comm_exec = true;
  440. if (dump_trace)
  441. perf_event__fprintf_comm(event, stdout);
  442. if (thread == NULL ||
  443. __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
  444. dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
  445. err = -1;
  446. }
  447. thread__put(thread);
  448. return err;
  449. }
  450. int machine__process_namespaces_event(struct machine *machine __maybe_unused,
  451. union perf_event *event,
  452. struct perf_sample *sample __maybe_unused)
  453. {
  454. struct thread *thread = machine__findnew_thread(machine,
  455. event->namespaces.pid,
  456. event->namespaces.tid);
  457. int err = 0;
  458. WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
  459. "\nWARNING: kernel seems to support more namespaces than perf"
  460. " tool.\nTry updating the perf tool..\n\n");
  461. WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
  462. "\nWARNING: perf tool seems to support more namespaces than"
  463. " the kernel.\nTry updating the kernel..\n\n");
  464. if (dump_trace)
  465. perf_event__fprintf_namespaces(event, stdout);
  466. if (thread == NULL ||
  467. thread__set_namespaces(thread, sample->time, &event->namespaces)) {
  468. dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
  469. err = -1;
  470. }
  471. thread__put(thread);
  472. return err;
  473. }
  474. int machine__process_lost_event(struct machine *machine __maybe_unused,
  475. union perf_event *event, struct perf_sample *sample __maybe_unused)
  476. {
  477. dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
  478. event->lost.id, event->lost.lost);
  479. return 0;
  480. }
  481. int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
  482. union perf_event *event, struct perf_sample *sample)
  483. {
  484. dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n",
  485. sample->id, event->lost_samples.lost);
  486. return 0;
  487. }
  488. static struct dso *machine__findnew_module_dso(struct machine *machine,
  489. struct kmod_path *m,
  490. const char *filename)
  491. {
  492. struct dso *dso;
  493. down_write(&machine->dsos.lock);
  494. dso = __dsos__find(&machine->dsos, m->name, true);
  495. if (!dso) {
  496. dso = __dsos__addnew(&machine->dsos, m->name);
  497. if (dso == NULL)
  498. goto out_unlock;
  499. dso__set_module_info(dso, m, machine);
  500. dso__set_long_name(dso, strdup(filename), true);
  501. }
  502. dso__get(dso);
  503. out_unlock:
  504. up_write(&machine->dsos.lock);
  505. return dso;
  506. }
  507. int machine__process_aux_event(struct machine *machine __maybe_unused,
  508. union perf_event *event)
  509. {
  510. if (dump_trace)
  511. perf_event__fprintf_aux(event, stdout);
  512. return 0;
  513. }
  514. int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
  515. union perf_event *event)
  516. {
  517. if (dump_trace)
  518. perf_event__fprintf_itrace_start(event, stdout);
  519. return 0;
  520. }
  521. int machine__process_switch_event(struct machine *machine __maybe_unused,
  522. union perf_event *event)
  523. {
  524. if (dump_trace)
  525. perf_event__fprintf_switch(event, stdout);
  526. return 0;
  527. }
  528. static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
  529. {
  530. const char *dup_filename;
  531. if (!filename || !dso || !dso->long_name)
  532. return;
  533. if (dso->long_name[0] != '[')
  534. return;
  535. if (!strchr(filename, '/'))
  536. return;
  537. dup_filename = strdup(filename);
  538. if (!dup_filename)
  539. return;
  540. dso__set_long_name(dso, dup_filename, true);
  541. }
  542. struct map *machine__findnew_module_map(struct machine *machine, u64 start,
  543. const char *filename)
  544. {
  545. struct map *map = NULL;
  546. struct dso *dso = NULL;
  547. struct kmod_path m;
  548. if (kmod_path__parse_name(&m, filename))
  549. return NULL;
  550. map = map_groups__find_by_name(&machine->kmaps, m.name);
  551. if (map) {
  552. /*
  553. * If the map's dso is an offline module, give dso__load()
  554. * a chance to find the file path of that module by fixing
  555. * long_name.
  556. */
  557. dso__adjust_kmod_long_name(map->dso, filename);
  558. goto out;
  559. }
  560. dso = machine__findnew_module_dso(machine, &m, filename);
  561. if (dso == NULL)
  562. goto out;
  563. map = map__new2(start, dso, MAP__FUNCTION);
  564. if (map == NULL)
  565. goto out;
  566. map_groups__insert(&machine->kmaps, map);
  567. /* Put the map here because map_groups__insert alread got it */
  568. map__put(map);
  569. out:
  570. /* put the dso here, corresponding to machine__findnew_module_dso */
  571. dso__put(dso);
  572. free(m.name);
  573. return map;
  574. }
  575. size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
  576. {
  577. struct rb_node *nd;
  578. size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
  579. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  580. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  581. ret += __dsos__fprintf(&pos->dsos.head, fp);
  582. }
  583. return ret;
  584. }
  585. size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
  586. bool (skip)(struct dso *dso, int parm), int parm)
  587. {
  588. return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
  589. }
  590. size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
  591. bool (skip)(struct dso *dso, int parm), int parm)
  592. {
  593. struct rb_node *nd;
  594. size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
  595. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  596. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  597. ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
  598. }
  599. return ret;
  600. }
  601. size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
  602. {
  603. int i;
  604. size_t printed = 0;
  605. struct dso *kdso = machine__kernel_map(machine)->dso;
  606. if (kdso->has_build_id) {
  607. char filename[PATH_MAX];
  608. if (dso__build_id_filename(kdso, filename, sizeof(filename),
  609. false))
  610. printed += fprintf(fp, "[0] %s\n", filename);
  611. }
  612. for (i = 0; i < vmlinux_path__nr_entries; ++i)
  613. printed += fprintf(fp, "[%d] %s\n",
  614. i + kdso->has_build_id, vmlinux_path[i]);
  615. return printed;
  616. }
  617. size_t machine__fprintf(struct machine *machine, FILE *fp)
  618. {
  619. struct rb_node *nd;
  620. size_t ret;
  621. int i;
  622. for (i = 0; i < THREADS__TABLE_SIZE; i++) {
  623. struct threads *threads = &machine->threads[i];
  624. down_read(&threads->lock);
  625. ret = fprintf(fp, "Threads: %u\n", threads->nr);
  626. for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
  627. struct thread *pos = rb_entry(nd, struct thread, rb_node);
  628. ret += thread__fprintf(pos, fp);
  629. }
  630. up_read(&threads->lock);
  631. }
  632. return ret;
  633. }
  634. static struct dso *machine__get_kernel(struct machine *machine)
  635. {
  636. const char *vmlinux_name = machine->mmap_name;
  637. struct dso *kernel;
  638. if (machine__is_host(machine)) {
  639. if (symbol_conf.vmlinux_name)
  640. vmlinux_name = symbol_conf.vmlinux_name;
  641. kernel = machine__findnew_kernel(machine, vmlinux_name,
  642. "[kernel]", DSO_TYPE_KERNEL);
  643. } else {
  644. if (symbol_conf.default_guest_vmlinux_name)
  645. vmlinux_name = symbol_conf.default_guest_vmlinux_name;
  646. kernel = machine__findnew_kernel(machine, vmlinux_name,
  647. "[guest.kernel]",
  648. DSO_TYPE_GUEST_KERNEL);
  649. }
  650. if (kernel != NULL && (!kernel->has_build_id))
  651. dso__read_running_kernel_build_id(kernel, machine);
  652. return kernel;
  653. }
  654. struct process_args {
  655. u64 start;
  656. };
  657. static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
  658. size_t bufsz)
  659. {
  660. if (machine__is_default_guest(machine))
  661. scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
  662. else
  663. scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
  664. }
  665. const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
  666. /* Figure out the start address of kernel map from /proc/kallsyms.
  667. * Returns the name of the start symbol in *symbol_name. Pass in NULL as
  668. * symbol_name if it's not that important.
  669. */
  670. static int machine__get_running_kernel_start(struct machine *machine,
  671. const char **symbol_name, u64 *start)
  672. {
  673. char filename[PATH_MAX];
  674. int i, err = -1;
  675. const char *name;
  676. u64 addr = 0;
  677. machine__get_kallsyms_filename(machine, filename, PATH_MAX);
  678. if (symbol__restricted_filename(filename, "/proc/kallsyms"))
  679. return 0;
  680. for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
  681. err = kallsyms__get_function_start(filename, name, &addr);
  682. if (!err)
  683. break;
  684. }
  685. if (err)
  686. return -1;
  687. if (symbol_name)
  688. *symbol_name = name;
  689. *start = addr;
  690. return 0;
  691. }
  692. static int
  693. __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
  694. {
  695. int type;
  696. /* In case of renewal the kernel map, destroy previous one */
  697. machine__destroy_kernel_maps(machine);
  698. for (type = 0; type < MAP__NR_TYPES; ++type) {
  699. struct kmap *kmap;
  700. struct map *map;
  701. machine->vmlinux_maps[type] = map__new2(0, kernel, type);
  702. if (machine->vmlinux_maps[type] == NULL)
  703. return -1;
  704. machine->vmlinux_maps[type]->map_ip =
  705. machine->vmlinux_maps[type]->unmap_ip =
  706. identity__map_ip;
  707. map = __machine__kernel_map(machine, type);
  708. kmap = map__kmap(map);
  709. if (!kmap)
  710. return -1;
  711. kmap->kmaps = &machine->kmaps;
  712. map_groups__insert(&machine->kmaps, map);
  713. }
  714. return 0;
  715. }
  716. void machine__destroy_kernel_maps(struct machine *machine)
  717. {
  718. int type;
  719. for (type = 0; type < MAP__NR_TYPES; ++type) {
  720. struct kmap *kmap;
  721. struct map *map = __machine__kernel_map(machine, type);
  722. if (map == NULL)
  723. continue;
  724. kmap = map__kmap(map);
  725. map_groups__remove(&machine->kmaps, map);
  726. if (kmap && kmap->ref_reloc_sym) {
  727. /*
  728. * ref_reloc_sym is shared among all maps, so free just
  729. * on one of them.
  730. */
  731. if (type == MAP__FUNCTION) {
  732. zfree((char **)&kmap->ref_reloc_sym->name);
  733. zfree(&kmap->ref_reloc_sym);
  734. } else
  735. kmap->ref_reloc_sym = NULL;
  736. }
  737. map__put(machine->vmlinux_maps[type]);
  738. machine->vmlinux_maps[type] = NULL;
  739. }
  740. }
  741. int machines__create_guest_kernel_maps(struct machines *machines)
  742. {
  743. int ret = 0;
  744. struct dirent **namelist = NULL;
  745. int i, items = 0;
  746. char path[PATH_MAX];
  747. pid_t pid;
  748. char *endp;
  749. if (symbol_conf.default_guest_vmlinux_name ||
  750. symbol_conf.default_guest_modules ||
  751. symbol_conf.default_guest_kallsyms) {
  752. machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
  753. }
  754. if (symbol_conf.guestmount) {
  755. items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
  756. if (items <= 0)
  757. return -ENOENT;
  758. for (i = 0; i < items; i++) {
  759. if (!isdigit(namelist[i]->d_name[0])) {
  760. /* Filter out . and .. */
  761. continue;
  762. }
  763. pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
  764. if ((*endp != '\0') ||
  765. (endp == namelist[i]->d_name) ||
  766. (errno == ERANGE)) {
  767. pr_debug("invalid directory (%s). Skipping.\n",
  768. namelist[i]->d_name);
  769. continue;
  770. }
  771. sprintf(path, "%s/%s/proc/kallsyms",
  772. symbol_conf.guestmount,
  773. namelist[i]->d_name);
  774. ret = access(path, R_OK);
  775. if (ret) {
  776. pr_debug("Can't access file %s\n", path);
  777. goto failure;
  778. }
  779. machines__create_kernel_maps(machines, pid);
  780. }
  781. failure:
  782. free(namelist);
  783. }
  784. return ret;
  785. }
  786. void machines__destroy_kernel_maps(struct machines *machines)
  787. {
  788. struct rb_node *next = rb_first(&machines->guests);
  789. machine__destroy_kernel_maps(&machines->host);
  790. while (next) {
  791. struct machine *pos = rb_entry(next, struct machine, rb_node);
  792. next = rb_next(&pos->rb_node);
  793. rb_erase(&pos->rb_node, &machines->guests);
  794. machine__delete(pos);
  795. }
  796. }
  797. int machines__create_kernel_maps(struct machines *machines, pid_t pid)
  798. {
  799. struct machine *machine = machines__findnew(machines, pid);
  800. if (machine == NULL)
  801. return -1;
  802. return machine__create_kernel_maps(machine);
  803. }
  804. int __machine__load_kallsyms(struct machine *machine, const char *filename,
  805. enum map_type type)
  806. {
  807. struct map *map = machine__kernel_map(machine);
  808. int ret = __dso__load_kallsyms(map->dso, filename, map, true);
  809. if (ret > 0) {
  810. dso__set_loaded(map->dso, type);
  811. /*
  812. * Since /proc/kallsyms will have multiple sessions for the
  813. * kernel, with modules between them, fixup the end of all
  814. * sections.
  815. */
  816. __map_groups__fixup_end(&machine->kmaps, type);
  817. }
  818. return ret;
  819. }
  820. int machine__load_vmlinux_path(struct machine *machine, enum map_type type)
  821. {
  822. struct map *map = machine__kernel_map(machine);
  823. int ret = dso__load_vmlinux_path(map->dso, map);
  824. if (ret > 0)
  825. dso__set_loaded(map->dso, type);
  826. return ret;
  827. }
  828. static char *get_kernel_version(const char *root_dir)
  829. {
  830. char version[PATH_MAX];
  831. FILE *file;
  832. char *name, *tmp;
  833. const char *prefix = "Linux version ";
  834. sprintf(version, "%s/proc/version", root_dir);
  835. file = fopen(version, "r");
  836. if (!file)
  837. return NULL;
  838. version[0] = '\0';
  839. tmp = fgets(version, sizeof(version), file);
  840. fclose(file);
  841. name = strstr(version, prefix);
  842. if (!name)
  843. return NULL;
  844. name += strlen(prefix);
  845. tmp = strchr(name, ' ');
  846. if (tmp)
  847. *tmp = '\0';
  848. return strdup(name);
  849. }
  850. static bool is_kmod_dso(struct dso *dso)
  851. {
  852. return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
  853. dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
  854. }
  855. static int map_groups__set_module_path(struct map_groups *mg, const char *path,
  856. struct kmod_path *m)
  857. {
  858. char *long_name;
  859. struct map *map = map_groups__find_by_name(mg, m->name);
  860. if (map == NULL)
  861. return 0;
  862. long_name = strdup(path);
  863. if (long_name == NULL)
  864. return -ENOMEM;
  865. dso__set_long_name(map->dso, long_name, true);
  866. dso__kernel_module_get_build_id(map->dso, "");
  867. /*
  868. * Full name could reveal us kmod compression, so
  869. * we need to update the symtab_type if needed.
  870. */
  871. if (m->comp && is_kmod_dso(map->dso))
  872. map->dso->symtab_type++;
  873. return 0;
  874. }
  875. static int map_groups__set_modules_path_dir(struct map_groups *mg,
  876. const char *dir_name, int depth)
  877. {
  878. struct dirent *dent;
  879. DIR *dir = opendir(dir_name);
  880. int ret = 0;
  881. if (!dir) {
  882. pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
  883. return -1;
  884. }
  885. while ((dent = readdir(dir)) != NULL) {
  886. char path[PATH_MAX];
  887. struct stat st;
  888. /*sshfs might return bad dent->d_type, so we have to stat*/
  889. snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
  890. if (stat(path, &st))
  891. continue;
  892. if (S_ISDIR(st.st_mode)) {
  893. if (!strcmp(dent->d_name, ".") ||
  894. !strcmp(dent->d_name, ".."))
  895. continue;
  896. /* Do not follow top-level source and build symlinks */
  897. if (depth == 0) {
  898. if (!strcmp(dent->d_name, "source") ||
  899. !strcmp(dent->d_name, "build"))
  900. continue;
  901. }
  902. ret = map_groups__set_modules_path_dir(mg, path,
  903. depth + 1);
  904. if (ret < 0)
  905. goto out;
  906. } else {
  907. struct kmod_path m;
  908. ret = kmod_path__parse_name(&m, dent->d_name);
  909. if (ret)
  910. goto out;
  911. if (m.kmod)
  912. ret = map_groups__set_module_path(mg, path, &m);
  913. free(m.name);
  914. if (ret)
  915. goto out;
  916. }
  917. }
  918. out:
  919. closedir(dir);
  920. return ret;
  921. }
  922. static int machine__set_modules_path(struct machine *machine)
  923. {
  924. char *version;
  925. char modules_path[PATH_MAX];
  926. version = get_kernel_version(machine->root_dir);
  927. if (!version)
  928. return -1;
  929. snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
  930. machine->root_dir, version);
  931. free(version);
  932. return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
  933. }
  934. int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
  935. const char *name __maybe_unused)
  936. {
  937. return 0;
  938. }
  939. static int machine__create_module(void *arg, const char *name, u64 start,
  940. u64 size)
  941. {
  942. struct machine *machine = arg;
  943. struct map *map;
  944. if (arch__fix_module_text_start(&start, name) < 0)
  945. return -1;
  946. map = machine__findnew_module_map(machine, start, name);
  947. if (map == NULL)
  948. return -1;
  949. map->end = start + size;
  950. dso__kernel_module_get_build_id(map->dso, machine->root_dir);
  951. return 0;
  952. }
  953. static int machine__create_modules(struct machine *machine)
  954. {
  955. const char *modules;
  956. char path[PATH_MAX];
  957. if (machine__is_default_guest(machine)) {
  958. modules = symbol_conf.default_guest_modules;
  959. } else {
  960. snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
  961. modules = path;
  962. }
  963. if (symbol__restricted_filename(modules, "/proc/modules"))
  964. return -1;
  965. if (modules__parse(modules, machine, machine__create_module))
  966. return -1;
  967. if (!machine__set_modules_path(machine))
  968. return 0;
  969. pr_debug("Problems setting modules path maps, continuing anyway...\n");
  970. return 0;
  971. }
  972. static void machine__set_kernel_mmap(struct machine *machine,
  973. u64 start, u64 end)
  974. {
  975. int i;
  976. for (i = 0; i < MAP__NR_TYPES; i++) {
  977. machine->vmlinux_maps[i]->start = start;
  978. machine->vmlinux_maps[i]->end = end;
  979. /*
  980. * Be a bit paranoid here, some perf.data file came with
  981. * a zero sized synthesized MMAP event for the kernel.
  982. */
  983. if (start == 0 && end == 0)
  984. machine->vmlinux_maps[i]->end = ~0ULL;
  985. }
  986. }
  987. int machine__create_kernel_maps(struct machine *machine)
  988. {
  989. struct dso *kernel = machine__get_kernel(machine);
  990. const char *name = NULL;
  991. struct map *map;
  992. u64 addr = 0;
  993. int ret;
  994. if (kernel == NULL)
  995. return -1;
  996. ret = __machine__create_kernel_maps(machine, kernel);
  997. dso__put(kernel);
  998. if (ret < 0)
  999. return -1;
  1000. if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
  1001. if (machine__is_host(machine))
  1002. pr_debug("Problems creating module maps, "
  1003. "continuing anyway...\n");
  1004. else
  1005. pr_debug("Problems creating module maps for guest %d, "
  1006. "continuing anyway...\n", machine->pid);
  1007. }
  1008. if (!machine__get_running_kernel_start(machine, &name, &addr)) {
  1009. if (name &&
  1010. maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
  1011. machine__destroy_kernel_maps(machine);
  1012. return -1;
  1013. }
  1014. /* we have a real start address now, so re-order the kmaps */
  1015. map = machine__kernel_map(machine);
  1016. map__get(map);
  1017. map_groups__remove(&machine->kmaps, map);
  1018. /* assume it's the last in the kmaps */
  1019. machine__set_kernel_mmap(machine, addr, ~0ULL);
  1020. map_groups__insert(&machine->kmaps, map);
  1021. map__put(map);
  1022. }
  1023. /* update end address of the kernel map using adjacent module address */
  1024. map = map__next(machine__kernel_map(machine));
  1025. if (map)
  1026. machine__set_kernel_mmap(machine, addr, map->start);
  1027. return 0;
  1028. }
  1029. static bool machine__uses_kcore(struct machine *machine)
  1030. {
  1031. struct dso *dso;
  1032. list_for_each_entry(dso, &machine->dsos.head, node) {
  1033. if (dso__is_kcore(dso))
  1034. return true;
  1035. }
  1036. return false;
  1037. }
  1038. static int machine__process_kernel_mmap_event(struct machine *machine,
  1039. union perf_event *event)
  1040. {
  1041. struct map *map;
  1042. enum dso_kernel_type kernel_type;
  1043. bool is_kernel_mmap;
  1044. /* If we have maps from kcore then we do not need or want any others */
  1045. if (machine__uses_kcore(machine))
  1046. return 0;
  1047. if (machine__is_host(machine))
  1048. kernel_type = DSO_TYPE_KERNEL;
  1049. else
  1050. kernel_type = DSO_TYPE_GUEST_KERNEL;
  1051. is_kernel_mmap = memcmp(event->mmap.filename,
  1052. machine->mmap_name,
  1053. strlen(machine->mmap_name) - 1) == 0;
  1054. if (event->mmap.filename[0] == '/' ||
  1055. (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
  1056. map = machine__findnew_module_map(machine, event->mmap.start,
  1057. event->mmap.filename);
  1058. if (map == NULL)
  1059. goto out_problem;
  1060. map->end = map->start + event->mmap.len;
  1061. } else if (is_kernel_mmap) {
  1062. const char *symbol_name = (event->mmap.filename +
  1063. strlen(machine->mmap_name));
  1064. /*
  1065. * Should be there already, from the build-id table in
  1066. * the header.
  1067. */
  1068. struct dso *kernel = NULL;
  1069. struct dso *dso;
  1070. down_read(&machine->dsos.lock);
  1071. list_for_each_entry(dso, &machine->dsos.head, node) {
  1072. /*
  1073. * The cpumode passed to is_kernel_module is not the
  1074. * cpumode of *this* event. If we insist on passing
  1075. * correct cpumode to is_kernel_module, we should
  1076. * record the cpumode when we adding this dso to the
  1077. * linked list.
  1078. *
  1079. * However we don't really need passing correct
  1080. * cpumode. We know the correct cpumode must be kernel
  1081. * mode (if not, we should not link it onto kernel_dsos
  1082. * list).
  1083. *
  1084. * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
  1085. * is_kernel_module() treats it as a kernel cpumode.
  1086. */
  1087. if (!dso->kernel ||
  1088. is_kernel_module(dso->long_name,
  1089. PERF_RECORD_MISC_CPUMODE_UNKNOWN))
  1090. continue;
  1091. kernel = dso;
  1092. break;
  1093. }
  1094. up_read(&machine->dsos.lock);
  1095. if (kernel == NULL)
  1096. kernel = machine__findnew_dso(machine, machine->mmap_name);
  1097. if (kernel == NULL)
  1098. goto out_problem;
  1099. kernel->kernel = kernel_type;
  1100. if (__machine__create_kernel_maps(machine, kernel) < 0) {
  1101. dso__put(kernel);
  1102. goto out_problem;
  1103. }
  1104. if (strstr(kernel->long_name, "vmlinux"))
  1105. dso__set_short_name(kernel, "[kernel.vmlinux]", false);
  1106. machine__set_kernel_mmap(machine, event->mmap.start,
  1107. event->mmap.start + event->mmap.len);
  1108. /*
  1109. * Avoid using a zero address (kptr_restrict) for the ref reloc
  1110. * symbol. Effectively having zero here means that at record
  1111. * time /proc/sys/kernel/kptr_restrict was non zero.
  1112. */
  1113. if (event->mmap.pgoff != 0) {
  1114. maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
  1115. symbol_name,
  1116. event->mmap.pgoff);
  1117. }
  1118. if (machine__is_default_guest(machine)) {
  1119. /*
  1120. * preload dso of guest kernel and modules
  1121. */
  1122. dso__load(kernel, machine__kernel_map(machine));
  1123. }
  1124. }
  1125. return 0;
  1126. out_problem:
  1127. return -1;
  1128. }
  1129. int machine__process_mmap2_event(struct machine *machine,
  1130. union perf_event *event,
  1131. struct perf_sample *sample)
  1132. {
  1133. struct thread *thread;
  1134. struct map *map;
  1135. enum map_type type;
  1136. int ret = 0;
  1137. if (dump_trace)
  1138. perf_event__fprintf_mmap2(event, stdout);
  1139. if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  1140. sample->cpumode == PERF_RECORD_MISC_KERNEL) {
  1141. ret = machine__process_kernel_mmap_event(machine, event);
  1142. if (ret < 0)
  1143. goto out_problem;
  1144. return 0;
  1145. }
  1146. thread = machine__findnew_thread(machine, event->mmap2.pid,
  1147. event->mmap2.tid);
  1148. if (thread == NULL)
  1149. goto out_problem;
  1150. if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
  1151. type = MAP__VARIABLE;
  1152. else
  1153. type = MAP__FUNCTION;
  1154. map = map__new(machine, event->mmap2.start,
  1155. event->mmap2.len, event->mmap2.pgoff,
  1156. event->mmap2.maj,
  1157. event->mmap2.min, event->mmap2.ino,
  1158. event->mmap2.ino_generation,
  1159. event->mmap2.prot,
  1160. event->mmap2.flags,
  1161. event->mmap2.filename, type, thread);
  1162. if (map == NULL)
  1163. goto out_problem_map;
  1164. ret = thread__insert_map(thread, map);
  1165. if (ret)
  1166. goto out_problem_insert;
  1167. thread__put(thread);
  1168. map__put(map);
  1169. return 0;
  1170. out_problem_insert:
  1171. map__put(map);
  1172. out_problem_map:
  1173. thread__put(thread);
  1174. out_problem:
  1175. dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
  1176. return 0;
  1177. }
  1178. int machine__process_mmap_event(struct machine *machine, union perf_event *event,
  1179. struct perf_sample *sample)
  1180. {
  1181. struct thread *thread;
  1182. struct map *map;
  1183. enum map_type type;
  1184. int ret = 0;
  1185. if (dump_trace)
  1186. perf_event__fprintf_mmap(event, stdout);
  1187. if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  1188. sample->cpumode == PERF_RECORD_MISC_KERNEL) {
  1189. ret = machine__process_kernel_mmap_event(machine, event);
  1190. if (ret < 0)
  1191. goto out_problem;
  1192. return 0;
  1193. }
  1194. thread = machine__findnew_thread(machine, event->mmap.pid,
  1195. event->mmap.tid);
  1196. if (thread == NULL)
  1197. goto out_problem;
  1198. if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
  1199. type = MAP__VARIABLE;
  1200. else
  1201. type = MAP__FUNCTION;
  1202. map = map__new(machine, event->mmap.start,
  1203. event->mmap.len, event->mmap.pgoff,
  1204. 0, 0, 0, 0, 0, 0,
  1205. event->mmap.filename,
  1206. type, thread);
  1207. if (map == NULL)
  1208. goto out_problem_map;
  1209. ret = thread__insert_map(thread, map);
  1210. if (ret)
  1211. goto out_problem_insert;
  1212. thread__put(thread);
  1213. map__put(map);
  1214. return 0;
  1215. out_problem_insert:
  1216. map__put(map);
  1217. out_problem_map:
  1218. thread__put(thread);
  1219. out_problem:
  1220. dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
  1221. return 0;
  1222. }
  1223. static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
  1224. {
  1225. struct threads *threads = machine__threads(machine, th->tid);
  1226. if (threads->last_match == th)
  1227. threads->last_match = NULL;
  1228. BUG_ON(refcount_read(&th->refcnt) == 0);
  1229. if (lock)
  1230. down_write(&threads->lock);
  1231. rb_erase_init(&th->rb_node, &threads->entries);
  1232. RB_CLEAR_NODE(&th->rb_node);
  1233. --threads->nr;
  1234. /*
  1235. * Move it first to the dead_threads list, then drop the reference,
  1236. * if this is the last reference, then the thread__delete destructor
  1237. * will be called and we will remove it from the dead_threads list.
  1238. */
  1239. list_add_tail(&th->node, &threads->dead);
  1240. if (lock)
  1241. up_write(&threads->lock);
  1242. thread__put(th);
  1243. }
  1244. void machine__remove_thread(struct machine *machine, struct thread *th)
  1245. {
  1246. return __machine__remove_thread(machine, th, true);
  1247. }
  1248. int machine__process_fork_event(struct machine *machine, union perf_event *event,
  1249. struct perf_sample *sample)
  1250. {
  1251. struct thread *thread = machine__find_thread(machine,
  1252. event->fork.pid,
  1253. event->fork.tid);
  1254. struct thread *parent = machine__findnew_thread(machine,
  1255. event->fork.ppid,
  1256. event->fork.ptid);
  1257. int err = 0;
  1258. if (dump_trace)
  1259. perf_event__fprintf_task(event, stdout);
  1260. /*
  1261. * There may be an existing thread that is not actually the parent,
  1262. * either because we are processing events out of order, or because the
  1263. * (fork) event that would have removed the thread was lost. Assume the
  1264. * latter case and continue on as best we can.
  1265. */
  1266. if (parent->pid_ != (pid_t)event->fork.ppid) {
  1267. dump_printf("removing erroneous parent thread %d/%d\n",
  1268. parent->pid_, parent->tid);
  1269. machine__remove_thread(machine, parent);
  1270. thread__put(parent);
  1271. parent = machine__findnew_thread(machine, event->fork.ppid,
  1272. event->fork.ptid);
  1273. }
  1274. /* if a thread currently exists for the thread id remove it */
  1275. if (thread != NULL) {
  1276. machine__remove_thread(machine, thread);
  1277. thread__put(thread);
  1278. }
  1279. thread = machine__findnew_thread(machine, event->fork.pid,
  1280. event->fork.tid);
  1281. if (thread == NULL || parent == NULL ||
  1282. thread__fork(thread, parent, sample->time) < 0) {
  1283. dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
  1284. err = -1;
  1285. }
  1286. thread__put(thread);
  1287. thread__put(parent);
  1288. return err;
  1289. }
  1290. int machine__process_exit_event(struct machine *machine, union perf_event *event,
  1291. struct perf_sample *sample __maybe_unused)
  1292. {
  1293. struct thread *thread = machine__find_thread(machine,
  1294. event->fork.pid,
  1295. event->fork.tid);
  1296. if (dump_trace)
  1297. perf_event__fprintf_task(event, stdout);
  1298. if (thread != NULL) {
  1299. thread__exited(thread);
  1300. thread__put(thread);
  1301. }
  1302. return 0;
  1303. }
  1304. int machine__process_event(struct machine *machine, union perf_event *event,
  1305. struct perf_sample *sample)
  1306. {
  1307. int ret;
  1308. switch (event->header.type) {
  1309. case PERF_RECORD_COMM:
  1310. ret = machine__process_comm_event(machine, event, sample); break;
  1311. case PERF_RECORD_MMAP:
  1312. ret = machine__process_mmap_event(machine, event, sample); break;
  1313. case PERF_RECORD_NAMESPACES:
  1314. ret = machine__process_namespaces_event(machine, event, sample); break;
  1315. case PERF_RECORD_MMAP2:
  1316. ret = machine__process_mmap2_event(machine, event, sample); break;
  1317. case PERF_RECORD_FORK:
  1318. ret = machine__process_fork_event(machine, event, sample); break;
  1319. case PERF_RECORD_EXIT:
  1320. ret = machine__process_exit_event(machine, event, sample); break;
  1321. case PERF_RECORD_LOST:
  1322. ret = machine__process_lost_event(machine, event, sample); break;
  1323. case PERF_RECORD_AUX:
  1324. ret = machine__process_aux_event(machine, event); break;
  1325. case PERF_RECORD_ITRACE_START:
  1326. ret = machine__process_itrace_start_event(machine, event); break;
  1327. case PERF_RECORD_LOST_SAMPLES:
  1328. ret = machine__process_lost_samples_event(machine, event, sample); break;
  1329. case PERF_RECORD_SWITCH:
  1330. case PERF_RECORD_SWITCH_CPU_WIDE:
  1331. ret = machine__process_switch_event(machine, event); break;
  1332. default:
  1333. ret = -1;
  1334. break;
  1335. }
  1336. return ret;
  1337. }
  1338. static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
  1339. {
  1340. if (!regexec(regex, sym->name, 0, NULL, 0))
  1341. return 1;
  1342. return 0;
  1343. }
  1344. static void ip__resolve_ams(struct thread *thread,
  1345. struct addr_map_symbol *ams,
  1346. u64 ip)
  1347. {
  1348. struct addr_location al;
  1349. memset(&al, 0, sizeof(al));
  1350. /*
  1351. * We cannot use the header.misc hint to determine whether a
  1352. * branch stack address is user, kernel, guest, hypervisor.
  1353. * Branches may straddle the kernel/user/hypervisor boundaries.
  1354. * Thus, we have to try consecutively until we find a match
  1355. * or else, the symbol is unknown
  1356. */
  1357. thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
  1358. ams->addr = ip;
  1359. ams->al_addr = al.addr;
  1360. ams->sym = al.sym;
  1361. ams->map = al.map;
  1362. ams->phys_addr = 0;
  1363. }
  1364. static void ip__resolve_data(struct thread *thread,
  1365. u8 m, struct addr_map_symbol *ams,
  1366. u64 addr, u64 phys_addr)
  1367. {
  1368. struct addr_location al;
  1369. memset(&al, 0, sizeof(al));
  1370. __thread__find_symbol(thread, m, MAP__VARIABLE, addr, &al);
  1371. if (al.map == NULL) {
  1372. /*
  1373. * some shared data regions have execute bit set which puts
  1374. * their mapping in the MAP__FUNCTION type array.
  1375. * Check there as a fallback option before dropping the sample.
  1376. */
  1377. thread__find_symbol(thread, m, addr, &al);
  1378. }
  1379. ams->addr = addr;
  1380. ams->al_addr = al.addr;
  1381. ams->sym = al.sym;
  1382. ams->map = al.map;
  1383. ams->phys_addr = phys_addr;
  1384. }
  1385. struct mem_info *sample__resolve_mem(struct perf_sample *sample,
  1386. struct addr_location *al)
  1387. {
  1388. struct mem_info *mi = mem_info__new();
  1389. if (!mi)
  1390. return NULL;
  1391. ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
  1392. ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
  1393. sample->addr, sample->phys_addr);
  1394. mi->data_src.val = sample->data_src;
  1395. return mi;
  1396. }
  1397. static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
  1398. {
  1399. char *srcline = NULL;
  1400. if (!map || callchain_param.key == CCKEY_FUNCTION)
  1401. return srcline;
  1402. srcline = srcline__tree_find(&map->dso->srclines, ip);
  1403. if (!srcline) {
  1404. bool show_sym = false;
  1405. bool show_addr = callchain_param.key == CCKEY_ADDRESS;
  1406. srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
  1407. sym, show_sym, show_addr, ip);
  1408. srcline__tree_insert(&map->dso->srclines, ip, srcline);
  1409. }
  1410. return srcline;
  1411. }
  1412. struct iterations {
  1413. int nr_loop_iter;
  1414. u64 cycles;
  1415. };
  1416. static int add_callchain_ip(struct thread *thread,
  1417. struct callchain_cursor *cursor,
  1418. struct symbol **parent,
  1419. struct addr_location *root_al,
  1420. u8 *cpumode,
  1421. u64 ip,
  1422. bool branch,
  1423. struct branch_flags *flags,
  1424. struct iterations *iter,
  1425. u64 branch_from)
  1426. {
  1427. struct addr_location al;
  1428. int nr_loop_iter = 0;
  1429. u64 iter_cycles = 0;
  1430. const char *srcline = NULL;
  1431. al.filtered = 0;
  1432. al.sym = NULL;
  1433. if (!cpumode) {
  1434. thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
  1435. ip, &al);
  1436. } else {
  1437. if (ip >= PERF_CONTEXT_MAX) {
  1438. switch (ip) {
  1439. case PERF_CONTEXT_HV:
  1440. *cpumode = PERF_RECORD_MISC_HYPERVISOR;
  1441. break;
  1442. case PERF_CONTEXT_KERNEL:
  1443. *cpumode = PERF_RECORD_MISC_KERNEL;
  1444. break;
  1445. case PERF_CONTEXT_USER:
  1446. *cpumode = PERF_RECORD_MISC_USER;
  1447. break;
  1448. default:
  1449. pr_debug("invalid callchain context: "
  1450. "%"PRId64"\n", (s64) ip);
  1451. /*
  1452. * It seems the callchain is corrupted.
  1453. * Discard all.
  1454. */
  1455. callchain_cursor_reset(cursor);
  1456. return 1;
  1457. }
  1458. return 0;
  1459. }
  1460. thread__find_symbol(thread, *cpumode, ip, &al);
  1461. }
  1462. if (al.sym != NULL) {
  1463. if (perf_hpp_list.parent && !*parent &&
  1464. symbol__match_regex(al.sym, &parent_regex))
  1465. *parent = al.sym;
  1466. else if (have_ignore_callees && root_al &&
  1467. symbol__match_regex(al.sym, &ignore_callees_regex)) {
  1468. /* Treat this symbol as the root,
  1469. forgetting its callees. */
  1470. *root_al = al;
  1471. callchain_cursor_reset(cursor);
  1472. }
  1473. }
  1474. if (symbol_conf.hide_unresolved && al.sym == NULL)
  1475. return 0;
  1476. if (iter) {
  1477. nr_loop_iter = iter->nr_loop_iter;
  1478. iter_cycles = iter->cycles;
  1479. }
  1480. srcline = callchain_srcline(al.map, al.sym, al.addr);
  1481. return callchain_cursor_append(cursor, al.addr, al.map, al.sym,
  1482. branch, flags, nr_loop_iter,
  1483. iter_cycles, branch_from, srcline);
  1484. }
  1485. struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
  1486. struct addr_location *al)
  1487. {
  1488. unsigned int i;
  1489. const struct branch_stack *bs = sample->branch_stack;
  1490. struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
  1491. if (!bi)
  1492. return NULL;
  1493. for (i = 0; i < bs->nr; i++) {
  1494. ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
  1495. ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
  1496. bi[i].flags = bs->entries[i].flags;
  1497. }
  1498. return bi;
  1499. }
  1500. static void save_iterations(struct iterations *iter,
  1501. struct branch_entry *be, int nr)
  1502. {
  1503. int i;
  1504. iter->nr_loop_iter = nr;
  1505. iter->cycles = 0;
  1506. for (i = 0; i < nr; i++)
  1507. iter->cycles += be[i].flags.cycles;
  1508. }
  1509. #define CHASHSZ 127
  1510. #define CHASHBITS 7
  1511. #define NO_ENTRY 0xff
  1512. #define PERF_MAX_BRANCH_DEPTH 127
  1513. /* Remove loops. */
  1514. static int remove_loops(struct branch_entry *l, int nr,
  1515. struct iterations *iter)
  1516. {
  1517. int i, j, off;
  1518. unsigned char chash[CHASHSZ];
  1519. memset(chash, NO_ENTRY, sizeof(chash));
  1520. BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
  1521. for (i = 0; i < nr; i++) {
  1522. int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
  1523. /* no collision handling for now */
  1524. if (chash[h] == NO_ENTRY) {
  1525. chash[h] = i;
  1526. } else if (l[chash[h]].from == l[i].from) {
  1527. bool is_loop = true;
  1528. /* check if it is a real loop */
  1529. off = 0;
  1530. for (j = chash[h]; j < i && i + off < nr; j++, off++)
  1531. if (l[j].from != l[i + off].from) {
  1532. is_loop = false;
  1533. break;
  1534. }
  1535. if (is_loop) {
  1536. j = nr - (i + off);
  1537. if (j > 0) {
  1538. save_iterations(iter + i + off,
  1539. l + i, off);
  1540. memmove(iter + i, iter + i + off,
  1541. j * sizeof(*iter));
  1542. memmove(l + i, l + i + off,
  1543. j * sizeof(*l));
  1544. }
  1545. nr -= off;
  1546. }
  1547. }
  1548. }
  1549. return nr;
  1550. }
  1551. /*
  1552. * Recolve LBR callstack chain sample
  1553. * Return:
  1554. * 1 on success get LBR callchain information
  1555. * 0 no available LBR callchain information, should try fp
  1556. * negative error code on other errors.
  1557. */
  1558. static int resolve_lbr_callchain_sample(struct thread *thread,
  1559. struct callchain_cursor *cursor,
  1560. struct perf_sample *sample,
  1561. struct symbol **parent,
  1562. struct addr_location *root_al,
  1563. int max_stack)
  1564. {
  1565. struct ip_callchain *chain = sample->callchain;
  1566. int chain_nr = min(max_stack, (int)chain->nr), i;
  1567. u8 cpumode = PERF_RECORD_MISC_USER;
  1568. u64 ip, branch_from = 0;
  1569. for (i = 0; i < chain_nr; i++) {
  1570. if (chain->ips[i] == PERF_CONTEXT_USER)
  1571. break;
  1572. }
  1573. /* LBR only affects the user callchain */
  1574. if (i != chain_nr) {
  1575. struct branch_stack *lbr_stack = sample->branch_stack;
  1576. int lbr_nr = lbr_stack->nr, j, k;
  1577. bool branch;
  1578. struct branch_flags *flags;
  1579. /*
  1580. * LBR callstack can only get user call chain.
  1581. * The mix_chain_nr is kernel call chain
  1582. * number plus LBR user call chain number.
  1583. * i is kernel call chain number,
  1584. * 1 is PERF_CONTEXT_USER,
  1585. * lbr_nr + 1 is the user call chain number.
  1586. * For details, please refer to the comments
  1587. * in callchain__printf
  1588. */
  1589. int mix_chain_nr = i + 1 + lbr_nr + 1;
  1590. for (j = 0; j < mix_chain_nr; j++) {
  1591. int err;
  1592. branch = false;
  1593. flags = NULL;
  1594. if (callchain_param.order == ORDER_CALLEE) {
  1595. if (j < i + 1)
  1596. ip = chain->ips[j];
  1597. else if (j > i + 1) {
  1598. k = j - i - 2;
  1599. ip = lbr_stack->entries[k].from;
  1600. branch = true;
  1601. flags = &lbr_stack->entries[k].flags;
  1602. } else {
  1603. ip = lbr_stack->entries[0].to;
  1604. branch = true;
  1605. flags = &lbr_stack->entries[0].flags;
  1606. branch_from =
  1607. lbr_stack->entries[0].from;
  1608. }
  1609. } else {
  1610. if (j < lbr_nr) {
  1611. k = lbr_nr - j - 1;
  1612. ip = lbr_stack->entries[k].from;
  1613. branch = true;
  1614. flags = &lbr_stack->entries[k].flags;
  1615. }
  1616. else if (j > lbr_nr)
  1617. ip = chain->ips[i + 1 - (j - lbr_nr)];
  1618. else {
  1619. ip = lbr_stack->entries[0].to;
  1620. branch = true;
  1621. flags = &lbr_stack->entries[0].flags;
  1622. branch_from =
  1623. lbr_stack->entries[0].from;
  1624. }
  1625. }
  1626. err = add_callchain_ip(thread, cursor, parent,
  1627. root_al, &cpumode, ip,
  1628. branch, flags, NULL,
  1629. branch_from);
  1630. if (err)
  1631. return (err < 0) ? err : 0;
  1632. }
  1633. return 1;
  1634. }
  1635. return 0;
  1636. }
  1637. static int thread__resolve_callchain_sample(struct thread *thread,
  1638. struct callchain_cursor *cursor,
  1639. struct perf_evsel *evsel,
  1640. struct perf_sample *sample,
  1641. struct symbol **parent,
  1642. struct addr_location *root_al,
  1643. int max_stack)
  1644. {
  1645. struct branch_stack *branch = sample->branch_stack;
  1646. struct ip_callchain *chain = sample->callchain;
  1647. int chain_nr = 0;
  1648. u8 cpumode = PERF_RECORD_MISC_USER;
  1649. int i, j, err, nr_entries;
  1650. int skip_idx = -1;
  1651. int first_call = 0;
  1652. if (chain)
  1653. chain_nr = chain->nr;
  1654. if (perf_evsel__has_branch_callstack(evsel)) {
  1655. err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
  1656. root_al, max_stack);
  1657. if (err)
  1658. return (err < 0) ? err : 0;
  1659. }
  1660. /*
  1661. * Based on DWARF debug information, some architectures skip
  1662. * a callchain entry saved by the kernel.
  1663. */
  1664. skip_idx = arch_skip_callchain_idx(thread, chain);
  1665. /*
  1666. * Add branches to call stack for easier browsing. This gives
  1667. * more context for a sample than just the callers.
  1668. *
  1669. * This uses individual histograms of paths compared to the
  1670. * aggregated histograms the normal LBR mode uses.
  1671. *
  1672. * Limitations for now:
  1673. * - No extra filters
  1674. * - No annotations (should annotate somehow)
  1675. */
  1676. if (branch && callchain_param.branch_callstack) {
  1677. int nr = min(max_stack, (int)branch->nr);
  1678. struct branch_entry be[nr];
  1679. struct iterations iter[nr];
  1680. if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
  1681. pr_warning("corrupted branch chain. skipping...\n");
  1682. goto check_calls;
  1683. }
  1684. for (i = 0; i < nr; i++) {
  1685. if (callchain_param.order == ORDER_CALLEE) {
  1686. be[i] = branch->entries[i];
  1687. if (chain == NULL)
  1688. continue;
  1689. /*
  1690. * Check for overlap into the callchain.
  1691. * The return address is one off compared to
  1692. * the branch entry. To adjust for this
  1693. * assume the calling instruction is not longer
  1694. * than 8 bytes.
  1695. */
  1696. if (i == skip_idx ||
  1697. chain->ips[first_call] >= PERF_CONTEXT_MAX)
  1698. first_call++;
  1699. else if (be[i].from < chain->ips[first_call] &&
  1700. be[i].from >= chain->ips[first_call] - 8)
  1701. first_call++;
  1702. } else
  1703. be[i] = branch->entries[branch->nr - i - 1];
  1704. }
  1705. memset(iter, 0, sizeof(struct iterations) * nr);
  1706. nr = remove_loops(be, nr, iter);
  1707. for (i = 0; i < nr; i++) {
  1708. err = add_callchain_ip(thread, cursor, parent,
  1709. root_al,
  1710. NULL, be[i].to,
  1711. true, &be[i].flags,
  1712. NULL, be[i].from);
  1713. if (!err)
  1714. err = add_callchain_ip(thread, cursor, parent, root_al,
  1715. NULL, be[i].from,
  1716. true, &be[i].flags,
  1717. &iter[i], 0);
  1718. if (err == -EINVAL)
  1719. break;
  1720. if (err)
  1721. return err;
  1722. }
  1723. if (chain_nr == 0)
  1724. return 0;
  1725. chain_nr -= nr;
  1726. }
  1727. check_calls:
  1728. for (i = first_call, nr_entries = 0;
  1729. i < chain_nr && nr_entries < max_stack; i++) {
  1730. u64 ip;
  1731. if (callchain_param.order == ORDER_CALLEE)
  1732. j = i;
  1733. else
  1734. j = chain->nr - i - 1;
  1735. #ifdef HAVE_SKIP_CALLCHAIN_IDX
  1736. if (j == skip_idx)
  1737. continue;
  1738. #endif
  1739. ip = chain->ips[j];
  1740. if (ip < PERF_CONTEXT_MAX)
  1741. ++nr_entries;
  1742. err = add_callchain_ip(thread, cursor, parent,
  1743. root_al, &cpumode, ip,
  1744. false, NULL, NULL, 0);
  1745. if (err)
  1746. return (err < 0) ? err : 0;
  1747. }
  1748. return 0;
  1749. }
  1750. static int append_inlines(struct callchain_cursor *cursor,
  1751. struct map *map, struct symbol *sym, u64 ip)
  1752. {
  1753. struct inline_node *inline_node;
  1754. struct inline_list *ilist;
  1755. u64 addr;
  1756. int ret = 1;
  1757. if (!symbol_conf.inline_name || !map || !sym)
  1758. return ret;
  1759. addr = map__rip_2objdump(map, ip);
  1760. inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
  1761. if (!inline_node) {
  1762. inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
  1763. if (!inline_node)
  1764. return ret;
  1765. inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
  1766. }
  1767. list_for_each_entry(ilist, &inline_node->val, list) {
  1768. ret = callchain_cursor_append(cursor, ip, map,
  1769. ilist->symbol, false,
  1770. NULL, 0, 0, 0, ilist->srcline);
  1771. if (ret != 0)
  1772. return ret;
  1773. }
  1774. return ret;
  1775. }
  1776. static int unwind_entry(struct unwind_entry *entry, void *arg)
  1777. {
  1778. struct callchain_cursor *cursor = arg;
  1779. const char *srcline = NULL;
  1780. if (symbol_conf.hide_unresolved && entry->sym == NULL)
  1781. return 0;
  1782. if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
  1783. return 0;
  1784. srcline = callchain_srcline(entry->map, entry->sym, entry->ip);
  1785. return callchain_cursor_append(cursor, entry->ip,
  1786. entry->map, entry->sym,
  1787. false, NULL, 0, 0, 0, srcline);
  1788. }
  1789. static int thread__resolve_callchain_unwind(struct thread *thread,
  1790. struct callchain_cursor *cursor,
  1791. struct perf_evsel *evsel,
  1792. struct perf_sample *sample,
  1793. int max_stack)
  1794. {
  1795. /* Can we do dwarf post unwind? */
  1796. if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
  1797. (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
  1798. return 0;
  1799. /* Bail out if nothing was captured. */
  1800. if ((!sample->user_regs.regs) ||
  1801. (!sample->user_stack.size))
  1802. return 0;
  1803. return unwind__get_entries(unwind_entry, cursor,
  1804. thread, sample, max_stack);
  1805. }
  1806. int thread__resolve_callchain(struct thread *thread,
  1807. struct callchain_cursor *cursor,
  1808. struct perf_evsel *evsel,
  1809. struct perf_sample *sample,
  1810. struct symbol **parent,
  1811. struct addr_location *root_al,
  1812. int max_stack)
  1813. {
  1814. int ret = 0;
  1815. callchain_cursor_reset(cursor);
  1816. if (callchain_param.order == ORDER_CALLEE) {
  1817. ret = thread__resolve_callchain_sample(thread, cursor,
  1818. evsel, sample,
  1819. parent, root_al,
  1820. max_stack);
  1821. if (ret)
  1822. return ret;
  1823. ret = thread__resolve_callchain_unwind(thread, cursor,
  1824. evsel, sample,
  1825. max_stack);
  1826. } else {
  1827. ret = thread__resolve_callchain_unwind(thread, cursor,
  1828. evsel, sample,
  1829. max_stack);
  1830. if (ret)
  1831. return ret;
  1832. ret = thread__resolve_callchain_sample(thread, cursor,
  1833. evsel, sample,
  1834. parent, root_al,
  1835. max_stack);
  1836. }
  1837. return ret;
  1838. }
  1839. int machine__for_each_thread(struct machine *machine,
  1840. int (*fn)(struct thread *thread, void *p),
  1841. void *priv)
  1842. {
  1843. struct threads *threads;
  1844. struct rb_node *nd;
  1845. struct thread *thread;
  1846. int rc = 0;
  1847. int i;
  1848. for (i = 0; i < THREADS__TABLE_SIZE; i++) {
  1849. threads = &machine->threads[i];
  1850. for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
  1851. thread = rb_entry(nd, struct thread, rb_node);
  1852. rc = fn(thread, priv);
  1853. if (rc != 0)
  1854. return rc;
  1855. }
  1856. list_for_each_entry(thread, &threads->dead, node) {
  1857. rc = fn(thread, priv);
  1858. if (rc != 0)
  1859. return rc;
  1860. }
  1861. }
  1862. return rc;
  1863. }
  1864. int machines__for_each_thread(struct machines *machines,
  1865. int (*fn)(struct thread *thread, void *p),
  1866. void *priv)
  1867. {
  1868. struct rb_node *nd;
  1869. int rc = 0;
  1870. rc = machine__for_each_thread(&machines->host, fn, priv);
  1871. if (rc != 0)
  1872. return rc;
  1873. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  1874. struct machine *machine = rb_entry(nd, struct machine, rb_node);
  1875. rc = machine__for_each_thread(machine, fn, priv);
  1876. if (rc != 0)
  1877. return rc;
  1878. }
  1879. return rc;
  1880. }
  1881. int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
  1882. struct target *target, struct thread_map *threads,
  1883. perf_event__handler_t process, bool data_mmap,
  1884. unsigned int proc_map_timeout,
  1885. unsigned int nr_threads_synthesize)
  1886. {
  1887. if (target__has_task(target))
  1888. return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
  1889. else if (target__has_cpu(target))
  1890. return perf_event__synthesize_threads(tool, process,
  1891. machine, data_mmap,
  1892. proc_map_timeout,
  1893. nr_threads_synthesize);
  1894. /* command specified */
  1895. return 0;
  1896. }
  1897. pid_t machine__get_current_tid(struct machine *machine, int cpu)
  1898. {
  1899. if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
  1900. return -1;
  1901. return machine->current_tid[cpu];
  1902. }
  1903. int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
  1904. pid_t tid)
  1905. {
  1906. struct thread *thread;
  1907. if (cpu < 0)
  1908. return -EINVAL;
  1909. if (!machine->current_tid) {
  1910. int i;
  1911. machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
  1912. if (!machine->current_tid)
  1913. return -ENOMEM;
  1914. for (i = 0; i < MAX_NR_CPUS; i++)
  1915. machine->current_tid[i] = -1;
  1916. }
  1917. if (cpu >= MAX_NR_CPUS) {
  1918. pr_err("Requested CPU %d too large. ", cpu);
  1919. pr_err("Consider raising MAX_NR_CPUS\n");
  1920. return -EINVAL;
  1921. }
  1922. machine->current_tid[cpu] = tid;
  1923. thread = machine__findnew_thread(machine, pid, tid);
  1924. if (!thread)
  1925. return -ENOMEM;
  1926. thread->cpu = cpu;
  1927. thread__put(thread);
  1928. return 0;
  1929. }
  1930. int machine__get_kernel_start(struct machine *machine)
  1931. {
  1932. struct map *map = machine__kernel_map(machine);
  1933. int err = 0;
  1934. /*
  1935. * The only addresses above 2^63 are kernel addresses of a 64-bit
  1936. * kernel. Note that addresses are unsigned so that on a 32-bit system
  1937. * all addresses including kernel addresses are less than 2^32. In
  1938. * that case (32-bit system), if the kernel mapping is unknown, all
  1939. * addresses will be assumed to be in user space - see
  1940. * machine__kernel_ip().
  1941. */
  1942. machine->kernel_start = 1ULL << 63;
  1943. if (map) {
  1944. err = map__load(map);
  1945. if (!err)
  1946. machine->kernel_start = map->start;
  1947. }
  1948. return err;
  1949. }
  1950. struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
  1951. {
  1952. return dsos__findnew(&machine->dsos, filename);
  1953. }
  1954. char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
  1955. {
  1956. struct machine *machine = vmachine;
  1957. struct map *map;
  1958. struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map);
  1959. if (sym == NULL)
  1960. return NULL;
  1961. *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
  1962. *addrp = map->unmap_ip(map, sym->start);
  1963. return sym->name;
  1964. }