machine.c 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <dirent.h>
  3. #include <errno.h>
  4. #include <inttypes.h>
  5. #include <regex.h>
  6. #include "callchain.h"
  7. #include "debug.h"
  8. #include "event.h"
  9. #include "evsel.h"
  10. #include "hist.h"
  11. #include "machine.h"
  12. #include "map.h"
  13. #include "sort.h"
  14. #include "strlist.h"
  15. #include "thread.h"
  16. #include "vdso.h"
  17. #include <stdbool.h>
  18. #include <sys/types.h>
  19. #include <sys/stat.h>
  20. #include <unistd.h>
  21. #include "unwind.h"
  22. #include "linux/hash.h"
  23. #include "asm/bug.h"
  24. #include "sane_ctype.h"
  25. #include <symbol/kallsyms.h>
  26. #include <linux/mman.h>
  27. static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
  28. static void dsos__init(struct dsos *dsos)
  29. {
  30. INIT_LIST_HEAD(&dsos->head);
  31. dsos->root = RB_ROOT;
  32. init_rwsem(&dsos->lock);
  33. }
  34. static void machine__threads_init(struct machine *machine)
  35. {
  36. int i;
  37. for (i = 0; i < THREADS__TABLE_SIZE; i++) {
  38. struct threads *threads = &machine->threads[i];
  39. threads->entries = RB_ROOT;
  40. init_rwsem(&threads->lock);
  41. threads->nr = 0;
  42. INIT_LIST_HEAD(&threads->dead);
  43. threads->last_match = NULL;
  44. }
  45. }
  46. static int machine__set_mmap_name(struct machine *machine)
  47. {
  48. if (machine__is_host(machine))
  49. machine->mmap_name = strdup("[kernel.kallsyms]");
  50. else if (machine__is_default_guest(machine))
  51. machine->mmap_name = strdup("[guest.kernel.kallsyms]");
  52. else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
  53. machine->pid) < 0)
  54. machine->mmap_name = NULL;
  55. return machine->mmap_name ? 0 : -ENOMEM;
  56. }
  57. int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
  58. {
  59. int err = -ENOMEM;
  60. memset(machine, 0, sizeof(*machine));
  61. map_groups__init(&machine->kmaps, machine);
  62. RB_CLEAR_NODE(&machine->rb_node);
  63. dsos__init(&machine->dsos);
  64. machine__threads_init(machine);
  65. machine->vdso_info = NULL;
  66. machine->env = NULL;
  67. machine->pid = pid;
  68. machine->id_hdr_size = 0;
  69. machine->kptr_restrict_warned = false;
  70. machine->comm_exec = false;
  71. machine->kernel_start = 0;
  72. machine->vmlinux_map = NULL;
  73. machine->root_dir = strdup(root_dir);
  74. if (machine->root_dir == NULL)
  75. return -ENOMEM;
  76. if (machine__set_mmap_name(machine))
  77. goto out;
  78. if (pid != HOST_KERNEL_ID) {
  79. struct thread *thread = machine__findnew_thread(machine, -1,
  80. pid);
  81. char comm[64];
  82. if (thread == NULL)
  83. goto out;
  84. snprintf(comm, sizeof(comm), "[guest/%d]", pid);
  85. thread__set_comm(thread, comm, 0);
  86. thread__put(thread);
  87. }
  88. machine->current_tid = NULL;
  89. err = 0;
  90. out:
  91. if (err) {
  92. zfree(&machine->root_dir);
  93. zfree(&machine->mmap_name);
  94. }
  95. return 0;
  96. }
  97. struct machine *machine__new_host(void)
  98. {
  99. struct machine *machine = malloc(sizeof(*machine));
  100. if (machine != NULL) {
  101. machine__init(machine, "", HOST_KERNEL_ID);
  102. if (machine__create_kernel_maps(machine) < 0)
  103. goto out_delete;
  104. }
  105. return machine;
  106. out_delete:
  107. free(machine);
  108. return NULL;
  109. }
  110. struct machine *machine__new_kallsyms(void)
  111. {
  112. struct machine *machine = machine__new_host();
  113. /*
  114. * FIXME:
  115. * 1) We should switch to machine__load_kallsyms(), i.e. not explicitely
  116. * ask for not using the kcore parsing code, once this one is fixed
  117. * to create a map per module.
  118. */
  119. if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
  120. machine__delete(machine);
  121. machine = NULL;
  122. }
  123. return machine;
  124. }
  125. static void dsos__purge(struct dsos *dsos)
  126. {
  127. struct dso *pos, *n;
  128. down_write(&dsos->lock);
  129. list_for_each_entry_safe(pos, n, &dsos->head, node) {
  130. RB_CLEAR_NODE(&pos->rb_node);
  131. pos->root = NULL;
  132. list_del_init(&pos->node);
  133. dso__put(pos);
  134. }
  135. up_write(&dsos->lock);
  136. }
  137. static void dsos__exit(struct dsos *dsos)
  138. {
  139. dsos__purge(dsos);
  140. exit_rwsem(&dsos->lock);
  141. }
  142. void machine__delete_threads(struct machine *machine)
  143. {
  144. struct rb_node *nd;
  145. int i;
  146. for (i = 0; i < THREADS__TABLE_SIZE; i++) {
  147. struct threads *threads = &machine->threads[i];
  148. down_write(&threads->lock);
  149. nd = rb_first(&threads->entries);
  150. while (nd) {
  151. struct thread *t = rb_entry(nd, struct thread, rb_node);
  152. nd = rb_next(nd);
  153. __machine__remove_thread(machine, t, false);
  154. }
  155. up_write(&threads->lock);
  156. }
  157. }
  158. void machine__exit(struct machine *machine)
  159. {
  160. int i;
  161. if (machine == NULL)
  162. return;
  163. machine__destroy_kernel_maps(machine);
  164. map_groups__exit(&machine->kmaps);
  165. dsos__exit(&machine->dsos);
  166. machine__exit_vdso(machine);
  167. zfree(&machine->root_dir);
  168. zfree(&machine->mmap_name);
  169. zfree(&machine->current_tid);
  170. for (i = 0; i < THREADS__TABLE_SIZE; i++) {
  171. struct threads *threads = &machine->threads[i];
  172. exit_rwsem(&threads->lock);
  173. }
  174. }
  175. void machine__delete(struct machine *machine)
  176. {
  177. if (machine) {
  178. machine__exit(machine);
  179. free(machine);
  180. }
  181. }
  182. void machines__init(struct machines *machines)
  183. {
  184. machine__init(&machines->host, "", HOST_KERNEL_ID);
  185. machines->guests = RB_ROOT;
  186. }
  187. void machines__exit(struct machines *machines)
  188. {
  189. machine__exit(&machines->host);
  190. /* XXX exit guest */
  191. }
  192. struct machine *machines__add(struct machines *machines, pid_t pid,
  193. const char *root_dir)
  194. {
  195. struct rb_node **p = &machines->guests.rb_node;
  196. struct rb_node *parent = NULL;
  197. struct machine *pos, *machine = malloc(sizeof(*machine));
  198. if (machine == NULL)
  199. return NULL;
  200. if (machine__init(machine, root_dir, pid) != 0) {
  201. free(machine);
  202. return NULL;
  203. }
  204. while (*p != NULL) {
  205. parent = *p;
  206. pos = rb_entry(parent, struct machine, rb_node);
  207. if (pid < pos->pid)
  208. p = &(*p)->rb_left;
  209. else
  210. p = &(*p)->rb_right;
  211. }
  212. rb_link_node(&machine->rb_node, parent, p);
  213. rb_insert_color(&machine->rb_node, &machines->guests);
  214. return machine;
  215. }
  216. void machines__set_comm_exec(struct machines *machines, bool comm_exec)
  217. {
  218. struct rb_node *nd;
  219. machines->host.comm_exec = comm_exec;
  220. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  221. struct machine *machine = rb_entry(nd, struct machine, rb_node);
  222. machine->comm_exec = comm_exec;
  223. }
  224. }
  225. struct machine *machines__find(struct machines *machines, pid_t pid)
  226. {
  227. struct rb_node **p = &machines->guests.rb_node;
  228. struct rb_node *parent = NULL;
  229. struct machine *machine;
  230. struct machine *default_machine = NULL;
  231. if (pid == HOST_KERNEL_ID)
  232. return &machines->host;
  233. while (*p != NULL) {
  234. parent = *p;
  235. machine = rb_entry(parent, struct machine, rb_node);
  236. if (pid < machine->pid)
  237. p = &(*p)->rb_left;
  238. else if (pid > machine->pid)
  239. p = &(*p)->rb_right;
  240. else
  241. return machine;
  242. if (!machine->pid)
  243. default_machine = machine;
  244. }
  245. return default_machine;
  246. }
  247. struct machine *machines__findnew(struct machines *machines, pid_t pid)
  248. {
  249. char path[PATH_MAX];
  250. const char *root_dir = "";
  251. struct machine *machine = machines__find(machines, pid);
  252. if (machine && (machine->pid == pid))
  253. goto out;
  254. if ((pid != HOST_KERNEL_ID) &&
  255. (pid != DEFAULT_GUEST_KERNEL_ID) &&
  256. (symbol_conf.guestmount)) {
  257. sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
  258. if (access(path, R_OK)) {
  259. static struct strlist *seen;
  260. if (!seen)
  261. seen = strlist__new(NULL, NULL);
  262. if (!strlist__has_entry(seen, path)) {
  263. pr_err("Can't access file %s\n", path);
  264. strlist__add(seen, path);
  265. }
  266. machine = NULL;
  267. goto out;
  268. }
  269. root_dir = path;
  270. }
  271. machine = machines__add(machines, pid, root_dir);
  272. out:
  273. return machine;
  274. }
  275. void machines__process_guests(struct machines *machines,
  276. machine__process_t process, void *data)
  277. {
  278. struct rb_node *nd;
  279. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  280. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  281. process(pos, data);
  282. }
  283. }
  284. void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
  285. {
  286. struct rb_node *node;
  287. struct machine *machine;
  288. machines->host.id_hdr_size = id_hdr_size;
  289. for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
  290. machine = rb_entry(node, struct machine, rb_node);
  291. machine->id_hdr_size = id_hdr_size;
  292. }
  293. return;
  294. }
  295. static void machine__update_thread_pid(struct machine *machine,
  296. struct thread *th, pid_t pid)
  297. {
  298. struct thread *leader;
  299. if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
  300. return;
  301. th->pid_ = pid;
  302. if (th->pid_ == th->tid)
  303. return;
  304. leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
  305. if (!leader)
  306. goto out_err;
  307. if (!leader->mg)
  308. leader->mg = map_groups__new(machine);
  309. if (!leader->mg)
  310. goto out_err;
  311. if (th->mg == leader->mg)
  312. return;
  313. if (th->mg) {
  314. /*
  315. * Maps are created from MMAP events which provide the pid and
  316. * tid. Consequently there never should be any maps on a thread
  317. * with an unknown pid. Just print an error if there are.
  318. */
  319. if (!map_groups__empty(th->mg))
  320. pr_err("Discarding thread maps for %d:%d\n",
  321. th->pid_, th->tid);
  322. map_groups__put(th->mg);
  323. }
  324. th->mg = map_groups__get(leader->mg);
  325. out_put:
  326. thread__put(leader);
  327. return;
  328. out_err:
  329. pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
  330. goto out_put;
  331. }
  332. /*
  333. * Front-end cache - TID lookups come in blocks,
  334. * so most of the time we dont have to look up
  335. * the full rbtree:
  336. */
  337. static struct thread*
  338. __threads__get_last_match(struct threads *threads, struct machine *machine,
  339. int pid, int tid)
  340. {
  341. struct thread *th;
  342. th = threads->last_match;
  343. if (th != NULL) {
  344. if (th->tid == tid) {
  345. machine__update_thread_pid(machine, th, pid);
  346. return thread__get(th);
  347. }
  348. threads->last_match = NULL;
  349. }
  350. return NULL;
  351. }
  352. static struct thread*
  353. threads__get_last_match(struct threads *threads, struct machine *machine,
  354. int pid, int tid)
  355. {
  356. struct thread *th = NULL;
  357. if (perf_singlethreaded)
  358. th = __threads__get_last_match(threads, machine, pid, tid);
  359. return th;
  360. }
  361. static void
  362. __threads__set_last_match(struct threads *threads, struct thread *th)
  363. {
  364. threads->last_match = th;
  365. }
  366. static void
  367. threads__set_last_match(struct threads *threads, struct thread *th)
  368. {
  369. if (perf_singlethreaded)
  370. __threads__set_last_match(threads, th);
  371. }
  372. /*
  373. * Caller must eventually drop thread->refcnt returned with a successful
  374. * lookup/new thread inserted.
  375. */
  376. static struct thread *____machine__findnew_thread(struct machine *machine,
  377. struct threads *threads,
  378. pid_t pid, pid_t tid,
  379. bool create)
  380. {
  381. struct rb_node **p = &threads->entries.rb_node;
  382. struct rb_node *parent = NULL;
  383. struct thread *th;
  384. th = threads__get_last_match(threads, machine, pid, tid);
  385. if (th)
  386. return th;
  387. while (*p != NULL) {
  388. parent = *p;
  389. th = rb_entry(parent, struct thread, rb_node);
  390. if (th->tid == tid) {
  391. threads__set_last_match(threads, th);
  392. machine__update_thread_pid(machine, th, pid);
  393. return thread__get(th);
  394. }
  395. if (tid < th->tid)
  396. p = &(*p)->rb_left;
  397. else
  398. p = &(*p)->rb_right;
  399. }
  400. if (!create)
  401. return NULL;
  402. th = thread__new(pid, tid);
  403. if (th != NULL) {
  404. rb_link_node(&th->rb_node, parent, p);
  405. rb_insert_color(&th->rb_node, &threads->entries);
  406. /*
  407. * We have to initialize map_groups separately
  408. * after rb tree is updated.
  409. *
  410. * The reason is that we call machine__findnew_thread
  411. * within thread__init_map_groups to find the thread
  412. * leader and that would screwed the rb tree.
  413. */
  414. if (thread__init_map_groups(th, machine)) {
  415. rb_erase_init(&th->rb_node, &threads->entries);
  416. RB_CLEAR_NODE(&th->rb_node);
  417. thread__put(th);
  418. return NULL;
  419. }
  420. /*
  421. * It is now in the rbtree, get a ref
  422. */
  423. thread__get(th);
  424. threads__set_last_match(threads, th);
  425. ++threads->nr;
  426. }
  427. return th;
  428. }
  429. struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
  430. {
  431. return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
  432. }
  433. struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
  434. pid_t tid)
  435. {
  436. struct threads *threads = machine__threads(machine, tid);
  437. struct thread *th;
  438. down_write(&threads->lock);
  439. th = __machine__findnew_thread(machine, pid, tid);
  440. up_write(&threads->lock);
  441. return th;
  442. }
  443. struct thread *machine__find_thread(struct machine *machine, pid_t pid,
  444. pid_t tid)
  445. {
  446. struct threads *threads = machine__threads(machine, tid);
  447. struct thread *th;
  448. down_read(&threads->lock);
  449. th = ____machine__findnew_thread(machine, threads, pid, tid, false);
  450. up_read(&threads->lock);
  451. return th;
  452. }
  453. struct comm *machine__thread_exec_comm(struct machine *machine,
  454. struct thread *thread)
  455. {
  456. if (machine->comm_exec)
  457. return thread__exec_comm(thread);
  458. else
  459. return thread__comm(thread);
  460. }
  461. int machine__process_comm_event(struct machine *machine, union perf_event *event,
  462. struct perf_sample *sample)
  463. {
  464. struct thread *thread = machine__findnew_thread(machine,
  465. event->comm.pid,
  466. event->comm.tid);
  467. bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
  468. int err = 0;
  469. if (exec)
  470. machine->comm_exec = true;
  471. if (dump_trace)
  472. perf_event__fprintf_comm(event, stdout);
  473. if (thread == NULL ||
  474. __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
  475. dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
  476. err = -1;
  477. }
  478. thread__put(thread);
  479. return err;
  480. }
  481. int machine__process_namespaces_event(struct machine *machine __maybe_unused,
  482. union perf_event *event,
  483. struct perf_sample *sample __maybe_unused)
  484. {
  485. struct thread *thread = machine__findnew_thread(machine,
  486. event->namespaces.pid,
  487. event->namespaces.tid);
  488. int err = 0;
  489. WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
  490. "\nWARNING: kernel seems to support more namespaces than perf"
  491. " tool.\nTry updating the perf tool..\n\n");
  492. WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
  493. "\nWARNING: perf tool seems to support more namespaces than"
  494. " the kernel.\nTry updating the kernel..\n\n");
  495. if (dump_trace)
  496. perf_event__fprintf_namespaces(event, stdout);
  497. if (thread == NULL ||
  498. thread__set_namespaces(thread, sample->time, &event->namespaces)) {
  499. dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
  500. err = -1;
  501. }
  502. thread__put(thread);
  503. return err;
  504. }
  505. int machine__process_lost_event(struct machine *machine __maybe_unused,
  506. union perf_event *event, struct perf_sample *sample __maybe_unused)
  507. {
  508. dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
  509. event->lost.id, event->lost.lost);
  510. return 0;
  511. }
  512. int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
  513. union perf_event *event, struct perf_sample *sample)
  514. {
  515. dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n",
  516. sample->id, event->lost_samples.lost);
  517. return 0;
  518. }
  519. static struct dso *machine__findnew_module_dso(struct machine *machine,
  520. struct kmod_path *m,
  521. const char *filename)
  522. {
  523. struct dso *dso;
  524. down_write(&machine->dsos.lock);
  525. dso = __dsos__find(&machine->dsos, m->name, true);
  526. if (!dso) {
  527. dso = __dsos__addnew(&machine->dsos, m->name);
  528. if (dso == NULL)
  529. goto out_unlock;
  530. dso__set_module_info(dso, m, machine);
  531. dso__set_long_name(dso, strdup(filename), true);
  532. }
  533. dso__get(dso);
  534. out_unlock:
  535. up_write(&machine->dsos.lock);
  536. return dso;
  537. }
  538. int machine__process_aux_event(struct machine *machine __maybe_unused,
  539. union perf_event *event)
  540. {
  541. if (dump_trace)
  542. perf_event__fprintf_aux(event, stdout);
  543. return 0;
  544. }
  545. int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
  546. union perf_event *event)
  547. {
  548. if (dump_trace)
  549. perf_event__fprintf_itrace_start(event, stdout);
  550. return 0;
  551. }
  552. int machine__process_switch_event(struct machine *machine __maybe_unused,
  553. union perf_event *event)
  554. {
  555. if (dump_trace)
  556. perf_event__fprintf_switch(event, stdout);
  557. return 0;
  558. }
  559. static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
  560. {
  561. const char *dup_filename;
  562. if (!filename || !dso || !dso->long_name)
  563. return;
  564. if (dso->long_name[0] != '[')
  565. return;
  566. if (!strchr(filename, '/'))
  567. return;
  568. dup_filename = strdup(filename);
  569. if (!dup_filename)
  570. return;
  571. dso__set_long_name(dso, dup_filename, true);
  572. }
  573. struct map *machine__findnew_module_map(struct machine *machine, u64 start,
  574. const char *filename)
  575. {
  576. struct map *map = NULL;
  577. struct dso *dso = NULL;
  578. struct kmod_path m;
  579. if (kmod_path__parse_name(&m, filename))
  580. return NULL;
  581. map = map_groups__find_by_name(&machine->kmaps, m.name);
  582. if (map) {
  583. /*
  584. * If the map's dso is an offline module, give dso__load()
  585. * a chance to find the file path of that module by fixing
  586. * long_name.
  587. */
  588. dso__adjust_kmod_long_name(map->dso, filename);
  589. goto out;
  590. }
  591. dso = machine__findnew_module_dso(machine, &m, filename);
  592. if (dso == NULL)
  593. goto out;
  594. map = map__new2(start, dso);
  595. if (map == NULL)
  596. goto out;
  597. map_groups__insert(&machine->kmaps, map);
  598. /* Put the map here because map_groups__insert alread got it */
  599. map__put(map);
  600. out:
  601. /* put the dso here, corresponding to machine__findnew_module_dso */
  602. dso__put(dso);
  603. free(m.name);
  604. return map;
  605. }
  606. size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
  607. {
  608. struct rb_node *nd;
  609. size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
  610. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  611. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  612. ret += __dsos__fprintf(&pos->dsos.head, fp);
  613. }
  614. return ret;
  615. }
  616. size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
  617. bool (skip)(struct dso *dso, int parm), int parm)
  618. {
  619. return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
  620. }
  621. size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
  622. bool (skip)(struct dso *dso, int parm), int parm)
  623. {
  624. struct rb_node *nd;
  625. size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
  626. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  627. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  628. ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
  629. }
  630. return ret;
  631. }
  632. size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
  633. {
  634. int i;
  635. size_t printed = 0;
  636. struct dso *kdso = machine__kernel_map(machine)->dso;
  637. if (kdso->has_build_id) {
  638. char filename[PATH_MAX];
  639. if (dso__build_id_filename(kdso, filename, sizeof(filename),
  640. false))
  641. printed += fprintf(fp, "[0] %s\n", filename);
  642. }
  643. for (i = 0; i < vmlinux_path__nr_entries; ++i)
  644. printed += fprintf(fp, "[%d] %s\n",
  645. i + kdso->has_build_id, vmlinux_path[i]);
  646. return printed;
  647. }
  648. size_t machine__fprintf(struct machine *machine, FILE *fp)
  649. {
  650. struct rb_node *nd;
  651. size_t ret;
  652. int i;
  653. for (i = 0; i < THREADS__TABLE_SIZE; i++) {
  654. struct threads *threads = &machine->threads[i];
  655. down_read(&threads->lock);
  656. ret = fprintf(fp, "Threads: %u\n", threads->nr);
  657. for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
  658. struct thread *pos = rb_entry(nd, struct thread, rb_node);
  659. ret += thread__fprintf(pos, fp);
  660. }
  661. up_read(&threads->lock);
  662. }
  663. return ret;
  664. }
  665. static struct dso *machine__get_kernel(struct machine *machine)
  666. {
  667. const char *vmlinux_name = machine->mmap_name;
  668. struct dso *kernel;
  669. if (machine__is_host(machine)) {
  670. if (symbol_conf.vmlinux_name)
  671. vmlinux_name = symbol_conf.vmlinux_name;
  672. kernel = machine__findnew_kernel(machine, vmlinux_name,
  673. "[kernel]", DSO_TYPE_KERNEL);
  674. } else {
  675. if (symbol_conf.default_guest_vmlinux_name)
  676. vmlinux_name = symbol_conf.default_guest_vmlinux_name;
  677. kernel = machine__findnew_kernel(machine, vmlinux_name,
  678. "[guest.kernel]",
  679. DSO_TYPE_GUEST_KERNEL);
  680. }
  681. if (kernel != NULL && (!kernel->has_build_id))
  682. dso__read_running_kernel_build_id(kernel, machine);
  683. return kernel;
  684. }
  685. struct process_args {
  686. u64 start;
  687. };
  688. void machine__get_kallsyms_filename(struct machine *machine, char *buf,
  689. size_t bufsz)
  690. {
  691. if (machine__is_default_guest(machine))
  692. scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
  693. else
  694. scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
  695. }
  696. const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
  697. /* Figure out the start address of kernel map from /proc/kallsyms.
  698. * Returns the name of the start symbol in *symbol_name. Pass in NULL as
  699. * symbol_name if it's not that important.
  700. */
  701. static int machine__get_running_kernel_start(struct machine *machine,
  702. const char **symbol_name, u64 *start)
  703. {
  704. char filename[PATH_MAX];
  705. int i, err = -1;
  706. const char *name;
  707. u64 addr = 0;
  708. machine__get_kallsyms_filename(machine, filename, PATH_MAX);
  709. if (symbol__restricted_filename(filename, "/proc/kallsyms"))
  710. return 0;
  711. for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
  712. err = kallsyms__get_function_start(filename, name, &addr);
  713. if (!err)
  714. break;
  715. }
  716. if (err)
  717. return -1;
  718. if (symbol_name)
  719. *symbol_name = name;
  720. *start = addr;
  721. return 0;
  722. }
  723. int machine__create_extra_kernel_map(struct machine *machine,
  724. struct dso *kernel,
  725. struct extra_kernel_map *xm)
  726. {
  727. struct kmap *kmap;
  728. struct map *map;
  729. map = map__new2(xm->start, kernel);
  730. if (!map)
  731. return -1;
  732. map->end = xm->end;
  733. map->pgoff = xm->pgoff;
  734. kmap = map__kmap(map);
  735. kmap->kmaps = &machine->kmaps;
  736. strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
  737. map_groups__insert(&machine->kmaps, map);
  738. pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
  739. kmap->name, map->start, map->end);
  740. map__put(map);
  741. return 0;
  742. }
  743. static u64 find_entry_trampoline(struct dso *dso)
  744. {
  745. /* Duplicates are removed so lookup all aliases */
  746. const char *syms[] = {
  747. "_entry_trampoline",
  748. "__entry_trampoline_start",
  749. "entry_SYSCALL_64_trampoline",
  750. };
  751. struct symbol *sym = dso__first_symbol(dso);
  752. unsigned int i;
  753. for (; sym; sym = dso__next_symbol(sym)) {
  754. if (sym->binding != STB_GLOBAL)
  755. continue;
  756. for (i = 0; i < ARRAY_SIZE(syms); i++) {
  757. if (!strcmp(sym->name, syms[i]))
  758. return sym->start;
  759. }
  760. }
  761. return 0;
  762. }
  763. /*
  764. * These values can be used for kernels that do not have symbols for the entry
  765. * trampolines in kallsyms.
  766. */
  767. #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
  768. #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
  769. #define X86_64_ENTRY_TRAMPOLINE 0x6000
  770. /* Map x86_64 PTI entry trampolines */
  771. int machine__map_x86_64_entry_trampolines(struct machine *machine,
  772. struct dso *kernel)
  773. {
  774. struct map_groups *kmaps = &machine->kmaps;
  775. struct maps *maps = &kmaps->maps;
  776. int nr_cpus_avail, cpu;
  777. bool found = false;
  778. struct map *map;
  779. u64 pgoff;
  780. /*
  781. * In the vmlinux case, pgoff is a virtual address which must now be
  782. * mapped to a vmlinux offset.
  783. */
  784. for (map = maps__first(maps); map; map = map__next(map)) {
  785. struct kmap *kmap = __map__kmap(map);
  786. struct map *dest_map;
  787. if (!kmap || !is_entry_trampoline(kmap->name))
  788. continue;
  789. dest_map = map_groups__find(kmaps, map->pgoff);
  790. if (dest_map != map)
  791. map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
  792. found = true;
  793. }
  794. if (found || machine->trampolines_mapped)
  795. return 0;
  796. pgoff = find_entry_trampoline(kernel);
  797. if (!pgoff)
  798. return 0;
  799. nr_cpus_avail = machine__nr_cpus_avail(machine);
  800. /* Add a 1 page map for each CPU's entry trampoline */
  801. for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
  802. u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
  803. cpu * X86_64_CPU_ENTRY_AREA_SIZE +
  804. X86_64_ENTRY_TRAMPOLINE;
  805. struct extra_kernel_map xm = {
  806. .start = va,
  807. .end = va + page_size,
  808. .pgoff = pgoff,
  809. };
  810. strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
  811. if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
  812. return -1;
  813. }
  814. machine->trampolines_mapped = nr_cpus_avail;
  815. return 0;
  816. }
  817. int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
  818. struct dso *kernel __maybe_unused)
  819. {
  820. return 0;
  821. }
  822. static int
  823. __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
  824. {
  825. struct kmap *kmap;
  826. struct map *map;
  827. /* In case of renewal the kernel map, destroy previous one */
  828. machine__destroy_kernel_maps(machine);
  829. machine->vmlinux_map = map__new2(0, kernel);
  830. if (machine->vmlinux_map == NULL)
  831. return -1;
  832. machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
  833. map = machine__kernel_map(machine);
  834. kmap = map__kmap(map);
  835. if (!kmap)
  836. return -1;
  837. kmap->kmaps = &machine->kmaps;
  838. map_groups__insert(&machine->kmaps, map);
  839. return 0;
  840. }
  841. void machine__destroy_kernel_maps(struct machine *machine)
  842. {
  843. struct kmap *kmap;
  844. struct map *map = machine__kernel_map(machine);
  845. if (map == NULL)
  846. return;
  847. kmap = map__kmap(map);
  848. map_groups__remove(&machine->kmaps, map);
  849. if (kmap && kmap->ref_reloc_sym) {
  850. zfree((char **)&kmap->ref_reloc_sym->name);
  851. zfree(&kmap->ref_reloc_sym);
  852. }
  853. map__zput(machine->vmlinux_map);
  854. }
  855. int machines__create_guest_kernel_maps(struct machines *machines)
  856. {
  857. int ret = 0;
  858. struct dirent **namelist = NULL;
  859. int i, items = 0;
  860. char path[PATH_MAX];
  861. pid_t pid;
  862. char *endp;
  863. if (symbol_conf.default_guest_vmlinux_name ||
  864. symbol_conf.default_guest_modules ||
  865. symbol_conf.default_guest_kallsyms) {
  866. machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
  867. }
  868. if (symbol_conf.guestmount) {
  869. items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
  870. if (items <= 0)
  871. return -ENOENT;
  872. for (i = 0; i < items; i++) {
  873. if (!isdigit(namelist[i]->d_name[0])) {
  874. /* Filter out . and .. */
  875. continue;
  876. }
  877. pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
  878. if ((*endp != '\0') ||
  879. (endp == namelist[i]->d_name) ||
  880. (errno == ERANGE)) {
  881. pr_debug("invalid directory (%s). Skipping.\n",
  882. namelist[i]->d_name);
  883. continue;
  884. }
  885. sprintf(path, "%s/%s/proc/kallsyms",
  886. symbol_conf.guestmount,
  887. namelist[i]->d_name);
  888. ret = access(path, R_OK);
  889. if (ret) {
  890. pr_debug("Can't access file %s\n", path);
  891. goto failure;
  892. }
  893. machines__create_kernel_maps(machines, pid);
  894. }
  895. failure:
  896. free(namelist);
  897. }
  898. return ret;
  899. }
  900. void machines__destroy_kernel_maps(struct machines *machines)
  901. {
  902. struct rb_node *next = rb_first(&machines->guests);
  903. machine__destroy_kernel_maps(&machines->host);
  904. while (next) {
  905. struct machine *pos = rb_entry(next, struct machine, rb_node);
  906. next = rb_next(&pos->rb_node);
  907. rb_erase(&pos->rb_node, &machines->guests);
  908. machine__delete(pos);
  909. }
  910. }
  911. int machines__create_kernel_maps(struct machines *machines, pid_t pid)
  912. {
  913. struct machine *machine = machines__findnew(machines, pid);
  914. if (machine == NULL)
  915. return -1;
  916. return machine__create_kernel_maps(machine);
  917. }
  918. int machine__load_kallsyms(struct machine *machine, const char *filename)
  919. {
  920. struct map *map = machine__kernel_map(machine);
  921. int ret = __dso__load_kallsyms(map->dso, filename, map, true);
  922. if (ret > 0) {
  923. dso__set_loaded(map->dso);
  924. /*
  925. * Since /proc/kallsyms will have multiple sessions for the
  926. * kernel, with modules between them, fixup the end of all
  927. * sections.
  928. */
  929. map_groups__fixup_end(&machine->kmaps);
  930. }
  931. return ret;
  932. }
  933. int machine__load_vmlinux_path(struct machine *machine)
  934. {
  935. struct map *map = machine__kernel_map(machine);
  936. int ret = dso__load_vmlinux_path(map->dso, map);
  937. if (ret > 0)
  938. dso__set_loaded(map->dso);
  939. return ret;
  940. }
  941. static char *get_kernel_version(const char *root_dir)
  942. {
  943. char version[PATH_MAX];
  944. FILE *file;
  945. char *name, *tmp;
  946. const char *prefix = "Linux version ";
  947. sprintf(version, "%s/proc/version", root_dir);
  948. file = fopen(version, "r");
  949. if (!file)
  950. return NULL;
  951. version[0] = '\0';
  952. tmp = fgets(version, sizeof(version), file);
  953. fclose(file);
  954. name = strstr(version, prefix);
  955. if (!name)
  956. return NULL;
  957. name += strlen(prefix);
  958. tmp = strchr(name, ' ');
  959. if (tmp)
  960. *tmp = '\0';
  961. return strdup(name);
  962. }
  963. static bool is_kmod_dso(struct dso *dso)
  964. {
  965. return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
  966. dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
  967. }
  968. static int map_groups__set_module_path(struct map_groups *mg, const char *path,
  969. struct kmod_path *m)
  970. {
  971. char *long_name;
  972. struct map *map = map_groups__find_by_name(mg, m->name);
  973. if (map == NULL)
  974. return 0;
  975. long_name = strdup(path);
  976. if (long_name == NULL)
  977. return -ENOMEM;
  978. dso__set_long_name(map->dso, long_name, true);
  979. dso__kernel_module_get_build_id(map->dso, "");
  980. /*
  981. * Full name could reveal us kmod compression, so
  982. * we need to update the symtab_type if needed.
  983. */
  984. if (m->comp && is_kmod_dso(map->dso)) {
  985. map->dso->symtab_type++;
  986. map->dso->comp = m->comp;
  987. }
  988. return 0;
  989. }
  990. static int map_groups__set_modules_path_dir(struct map_groups *mg,
  991. const char *dir_name, int depth)
  992. {
  993. struct dirent *dent;
  994. DIR *dir = opendir(dir_name);
  995. int ret = 0;
  996. if (!dir) {
  997. pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
  998. return -1;
  999. }
  1000. while ((dent = readdir(dir)) != NULL) {
  1001. char path[PATH_MAX];
  1002. struct stat st;
  1003. /*sshfs might return bad dent->d_type, so we have to stat*/
  1004. snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
  1005. if (stat(path, &st))
  1006. continue;
  1007. if (S_ISDIR(st.st_mode)) {
  1008. if (!strcmp(dent->d_name, ".") ||
  1009. !strcmp(dent->d_name, ".."))
  1010. continue;
  1011. /* Do not follow top-level source and build symlinks */
  1012. if (depth == 0) {
  1013. if (!strcmp(dent->d_name, "source") ||
  1014. !strcmp(dent->d_name, "build"))
  1015. continue;
  1016. }
  1017. ret = map_groups__set_modules_path_dir(mg, path,
  1018. depth + 1);
  1019. if (ret < 0)
  1020. goto out;
  1021. } else {
  1022. struct kmod_path m;
  1023. ret = kmod_path__parse_name(&m, dent->d_name);
  1024. if (ret)
  1025. goto out;
  1026. if (m.kmod)
  1027. ret = map_groups__set_module_path(mg, path, &m);
  1028. free(m.name);
  1029. if (ret)
  1030. goto out;
  1031. }
  1032. }
  1033. out:
  1034. closedir(dir);
  1035. return ret;
  1036. }
  1037. static int machine__set_modules_path(struct machine *machine)
  1038. {
  1039. char *version;
  1040. char modules_path[PATH_MAX];
  1041. version = get_kernel_version(machine->root_dir);
  1042. if (!version)
  1043. return -1;
  1044. snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
  1045. machine->root_dir, version);
  1046. free(version);
  1047. return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
  1048. }
  1049. int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
  1050. const char *name __maybe_unused)
  1051. {
  1052. return 0;
  1053. }
  1054. static int machine__create_module(void *arg, const char *name, u64 start,
  1055. u64 size)
  1056. {
  1057. struct machine *machine = arg;
  1058. struct map *map;
  1059. if (arch__fix_module_text_start(&start, name) < 0)
  1060. return -1;
  1061. map = machine__findnew_module_map(machine, start, name);
  1062. if (map == NULL)
  1063. return -1;
  1064. map->end = start + size;
  1065. dso__kernel_module_get_build_id(map->dso, machine->root_dir);
  1066. return 0;
  1067. }
  1068. static int machine__create_modules(struct machine *machine)
  1069. {
  1070. const char *modules;
  1071. char path[PATH_MAX];
  1072. if (machine__is_default_guest(machine)) {
  1073. modules = symbol_conf.default_guest_modules;
  1074. } else {
  1075. snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
  1076. modules = path;
  1077. }
  1078. if (symbol__restricted_filename(modules, "/proc/modules"))
  1079. return -1;
  1080. if (modules__parse(modules, machine, machine__create_module))
  1081. return -1;
  1082. if (!machine__set_modules_path(machine))
  1083. return 0;
  1084. pr_debug("Problems setting modules path maps, continuing anyway...\n");
  1085. return 0;
  1086. }
  1087. static void machine__set_kernel_mmap(struct machine *machine,
  1088. u64 start, u64 end)
  1089. {
  1090. machine->vmlinux_map->start = start;
  1091. machine->vmlinux_map->end = end;
  1092. /*
  1093. * Be a bit paranoid here, some perf.data file came with
  1094. * a zero sized synthesized MMAP event for the kernel.
  1095. */
  1096. if (start == 0 && end == 0)
  1097. machine->vmlinux_map->end = ~0ULL;
  1098. }
  1099. int machine__create_kernel_maps(struct machine *machine)
  1100. {
  1101. struct dso *kernel = machine__get_kernel(machine);
  1102. const char *name = NULL;
  1103. struct map *map;
  1104. u64 addr = 0;
  1105. int ret;
  1106. if (kernel == NULL)
  1107. return -1;
  1108. ret = __machine__create_kernel_maps(machine, kernel);
  1109. if (ret < 0)
  1110. goto out_put;
  1111. if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
  1112. if (machine__is_host(machine))
  1113. pr_debug("Problems creating module maps, "
  1114. "continuing anyway...\n");
  1115. else
  1116. pr_debug("Problems creating module maps for guest %d, "
  1117. "continuing anyway...\n", machine->pid);
  1118. }
  1119. if (!machine__get_running_kernel_start(machine, &name, &addr)) {
  1120. if (name &&
  1121. map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, addr)) {
  1122. machine__destroy_kernel_maps(machine);
  1123. ret = -1;
  1124. goto out_put;
  1125. }
  1126. /* we have a real start address now, so re-order the kmaps */
  1127. map = machine__kernel_map(machine);
  1128. map__get(map);
  1129. map_groups__remove(&machine->kmaps, map);
  1130. /* assume it's the last in the kmaps */
  1131. machine__set_kernel_mmap(machine, addr, ~0ULL);
  1132. map_groups__insert(&machine->kmaps, map);
  1133. map__put(map);
  1134. }
  1135. if (machine__create_extra_kernel_maps(machine, kernel))
  1136. pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
  1137. /* update end address of the kernel map using adjacent module address */
  1138. map = map__next(machine__kernel_map(machine));
  1139. if (map)
  1140. machine__set_kernel_mmap(machine, addr, map->start);
  1141. out_put:
  1142. dso__put(kernel);
  1143. return ret;
  1144. }
  1145. static bool machine__uses_kcore(struct machine *machine)
  1146. {
  1147. struct dso *dso;
  1148. list_for_each_entry(dso, &machine->dsos.head, node) {
  1149. if (dso__is_kcore(dso))
  1150. return true;
  1151. }
  1152. return false;
  1153. }
  1154. static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
  1155. union perf_event *event)
  1156. {
  1157. return machine__is(machine, "x86_64") &&
  1158. is_entry_trampoline(event->mmap.filename);
  1159. }
  1160. static int machine__process_extra_kernel_map(struct machine *machine,
  1161. union perf_event *event)
  1162. {
  1163. struct map *kernel_map = machine__kernel_map(machine);
  1164. struct dso *kernel = kernel_map ? kernel_map->dso : NULL;
  1165. struct extra_kernel_map xm = {
  1166. .start = event->mmap.start,
  1167. .end = event->mmap.start + event->mmap.len,
  1168. .pgoff = event->mmap.pgoff,
  1169. };
  1170. if (kernel == NULL)
  1171. return -1;
  1172. strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
  1173. return machine__create_extra_kernel_map(machine, kernel, &xm);
  1174. }
  1175. static int machine__process_kernel_mmap_event(struct machine *machine,
  1176. union perf_event *event)
  1177. {
  1178. struct map *map;
  1179. enum dso_kernel_type kernel_type;
  1180. bool is_kernel_mmap;
  1181. /* If we have maps from kcore then we do not need or want any others */
  1182. if (machine__uses_kcore(machine))
  1183. return 0;
  1184. if (machine__is_host(machine))
  1185. kernel_type = DSO_TYPE_KERNEL;
  1186. else
  1187. kernel_type = DSO_TYPE_GUEST_KERNEL;
  1188. is_kernel_mmap = memcmp(event->mmap.filename,
  1189. machine->mmap_name,
  1190. strlen(machine->mmap_name) - 1) == 0;
  1191. if (event->mmap.filename[0] == '/' ||
  1192. (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
  1193. map = machine__findnew_module_map(machine, event->mmap.start,
  1194. event->mmap.filename);
  1195. if (map == NULL)
  1196. goto out_problem;
  1197. map->end = map->start + event->mmap.len;
  1198. } else if (is_kernel_mmap) {
  1199. const char *symbol_name = (event->mmap.filename +
  1200. strlen(machine->mmap_name));
  1201. /*
  1202. * Should be there already, from the build-id table in
  1203. * the header.
  1204. */
  1205. struct dso *kernel = NULL;
  1206. struct dso *dso;
  1207. down_read(&machine->dsos.lock);
  1208. list_for_each_entry(dso, &machine->dsos.head, node) {
  1209. /*
  1210. * The cpumode passed to is_kernel_module is not the
  1211. * cpumode of *this* event. If we insist on passing
  1212. * correct cpumode to is_kernel_module, we should
  1213. * record the cpumode when we adding this dso to the
  1214. * linked list.
  1215. *
  1216. * However we don't really need passing correct
  1217. * cpumode. We know the correct cpumode must be kernel
  1218. * mode (if not, we should not link it onto kernel_dsos
  1219. * list).
  1220. *
  1221. * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
  1222. * is_kernel_module() treats it as a kernel cpumode.
  1223. */
  1224. if (!dso->kernel ||
  1225. is_kernel_module(dso->long_name,
  1226. PERF_RECORD_MISC_CPUMODE_UNKNOWN))
  1227. continue;
  1228. kernel = dso;
  1229. break;
  1230. }
  1231. up_read(&machine->dsos.lock);
  1232. if (kernel == NULL)
  1233. kernel = machine__findnew_dso(machine, machine->mmap_name);
  1234. if (kernel == NULL)
  1235. goto out_problem;
  1236. kernel->kernel = kernel_type;
  1237. if (__machine__create_kernel_maps(machine, kernel) < 0) {
  1238. dso__put(kernel);
  1239. goto out_problem;
  1240. }
  1241. if (strstr(kernel->long_name, "vmlinux"))
  1242. dso__set_short_name(kernel, "[kernel.vmlinux]", false);
  1243. machine__set_kernel_mmap(machine, event->mmap.start,
  1244. event->mmap.start + event->mmap.len);
  1245. /*
  1246. * Avoid using a zero address (kptr_restrict) for the ref reloc
  1247. * symbol. Effectively having zero here means that at record
  1248. * time /proc/sys/kernel/kptr_restrict was non zero.
  1249. */
  1250. if (event->mmap.pgoff != 0) {
  1251. map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
  1252. symbol_name,
  1253. event->mmap.pgoff);
  1254. }
  1255. if (machine__is_default_guest(machine)) {
  1256. /*
  1257. * preload dso of guest kernel and modules
  1258. */
  1259. dso__load(kernel, machine__kernel_map(machine));
  1260. }
  1261. } else if (perf_event__is_extra_kernel_mmap(machine, event)) {
  1262. return machine__process_extra_kernel_map(machine, event);
  1263. }
  1264. return 0;
  1265. out_problem:
  1266. return -1;
  1267. }
  1268. int machine__process_mmap2_event(struct machine *machine,
  1269. union perf_event *event,
  1270. struct perf_sample *sample)
  1271. {
  1272. struct thread *thread;
  1273. struct map *map;
  1274. int ret = 0;
  1275. if (dump_trace)
  1276. perf_event__fprintf_mmap2(event, stdout);
  1277. if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  1278. sample->cpumode == PERF_RECORD_MISC_KERNEL) {
  1279. ret = machine__process_kernel_mmap_event(machine, event);
  1280. if (ret < 0)
  1281. goto out_problem;
  1282. return 0;
  1283. }
  1284. thread = machine__findnew_thread(machine, event->mmap2.pid,
  1285. event->mmap2.tid);
  1286. if (thread == NULL)
  1287. goto out_problem;
  1288. map = map__new(machine, event->mmap2.start,
  1289. event->mmap2.len, event->mmap2.pgoff,
  1290. event->mmap2.maj,
  1291. event->mmap2.min, event->mmap2.ino,
  1292. event->mmap2.ino_generation,
  1293. event->mmap2.prot,
  1294. event->mmap2.flags,
  1295. event->mmap2.filename, thread);
  1296. if (map == NULL)
  1297. goto out_problem_map;
  1298. ret = thread__insert_map(thread, map);
  1299. if (ret)
  1300. goto out_problem_insert;
  1301. thread__put(thread);
  1302. map__put(map);
  1303. return 0;
  1304. out_problem_insert:
  1305. map__put(map);
  1306. out_problem_map:
  1307. thread__put(thread);
  1308. out_problem:
  1309. dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
  1310. return 0;
  1311. }
  1312. int machine__process_mmap_event(struct machine *machine, union perf_event *event,
  1313. struct perf_sample *sample)
  1314. {
  1315. struct thread *thread;
  1316. struct map *map;
  1317. u32 prot = 0;
  1318. int ret = 0;
  1319. if (dump_trace)
  1320. perf_event__fprintf_mmap(event, stdout);
  1321. if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  1322. sample->cpumode == PERF_RECORD_MISC_KERNEL) {
  1323. ret = machine__process_kernel_mmap_event(machine, event);
  1324. if (ret < 0)
  1325. goto out_problem;
  1326. return 0;
  1327. }
  1328. thread = machine__findnew_thread(machine, event->mmap.pid,
  1329. event->mmap.tid);
  1330. if (thread == NULL)
  1331. goto out_problem;
  1332. if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
  1333. prot = PROT_EXEC;
  1334. map = map__new(machine, event->mmap.start,
  1335. event->mmap.len, event->mmap.pgoff,
  1336. 0, 0, 0, 0, prot, 0,
  1337. event->mmap.filename,
  1338. thread);
  1339. if (map == NULL)
  1340. goto out_problem_map;
  1341. ret = thread__insert_map(thread, map);
  1342. if (ret)
  1343. goto out_problem_insert;
  1344. thread__put(thread);
  1345. map__put(map);
  1346. return 0;
  1347. out_problem_insert:
  1348. map__put(map);
  1349. out_problem_map:
  1350. thread__put(thread);
  1351. out_problem:
  1352. dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
  1353. return 0;
  1354. }
  1355. static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
  1356. {
  1357. struct threads *threads = machine__threads(machine, th->tid);
  1358. if (threads->last_match == th)
  1359. threads__set_last_match(threads, NULL);
  1360. BUG_ON(refcount_read(&th->refcnt) == 0);
  1361. if (lock)
  1362. down_write(&threads->lock);
  1363. rb_erase_init(&th->rb_node, &threads->entries);
  1364. RB_CLEAR_NODE(&th->rb_node);
  1365. --threads->nr;
  1366. /*
  1367. * Move it first to the dead_threads list, then drop the reference,
  1368. * if this is the last reference, then the thread__delete destructor
  1369. * will be called and we will remove it from the dead_threads list.
  1370. */
  1371. list_add_tail(&th->node, &threads->dead);
  1372. if (lock)
  1373. up_write(&threads->lock);
  1374. thread__put(th);
  1375. }
  1376. void machine__remove_thread(struct machine *machine, struct thread *th)
  1377. {
  1378. return __machine__remove_thread(machine, th, true);
  1379. }
  1380. int machine__process_fork_event(struct machine *machine, union perf_event *event,
  1381. struct perf_sample *sample)
  1382. {
  1383. struct thread *thread = machine__find_thread(machine,
  1384. event->fork.pid,
  1385. event->fork.tid);
  1386. struct thread *parent = machine__findnew_thread(machine,
  1387. event->fork.ppid,
  1388. event->fork.ptid);
  1389. int err = 0;
  1390. if (dump_trace)
  1391. perf_event__fprintf_task(event, stdout);
  1392. /*
  1393. * There may be an existing thread that is not actually the parent,
  1394. * either because we are processing events out of order, or because the
  1395. * (fork) event that would have removed the thread was lost. Assume the
  1396. * latter case and continue on as best we can.
  1397. */
  1398. if (parent->pid_ != (pid_t)event->fork.ppid) {
  1399. dump_printf("removing erroneous parent thread %d/%d\n",
  1400. parent->pid_, parent->tid);
  1401. machine__remove_thread(machine, parent);
  1402. thread__put(parent);
  1403. parent = machine__findnew_thread(machine, event->fork.ppid,
  1404. event->fork.ptid);
  1405. }
  1406. /* if a thread currently exists for the thread id remove it */
  1407. if (thread != NULL) {
  1408. machine__remove_thread(machine, thread);
  1409. thread__put(thread);
  1410. }
  1411. thread = machine__findnew_thread(machine, event->fork.pid,
  1412. event->fork.tid);
  1413. if (thread == NULL || parent == NULL ||
  1414. thread__fork(thread, parent, sample->time) < 0) {
  1415. dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
  1416. err = -1;
  1417. }
  1418. thread__put(thread);
  1419. thread__put(parent);
  1420. return err;
  1421. }
  1422. int machine__process_exit_event(struct machine *machine, union perf_event *event,
  1423. struct perf_sample *sample __maybe_unused)
  1424. {
  1425. struct thread *thread = machine__find_thread(machine,
  1426. event->fork.pid,
  1427. event->fork.tid);
  1428. if (dump_trace)
  1429. perf_event__fprintf_task(event, stdout);
  1430. if (thread != NULL) {
  1431. thread__exited(thread);
  1432. thread__put(thread);
  1433. }
  1434. return 0;
  1435. }
  1436. int machine__process_event(struct machine *machine, union perf_event *event,
  1437. struct perf_sample *sample)
  1438. {
  1439. int ret;
  1440. switch (event->header.type) {
  1441. case PERF_RECORD_COMM:
  1442. ret = machine__process_comm_event(machine, event, sample); break;
  1443. case PERF_RECORD_MMAP:
  1444. ret = machine__process_mmap_event(machine, event, sample); break;
  1445. case PERF_RECORD_NAMESPACES:
  1446. ret = machine__process_namespaces_event(machine, event, sample); break;
  1447. case PERF_RECORD_MMAP2:
  1448. ret = machine__process_mmap2_event(machine, event, sample); break;
  1449. case PERF_RECORD_FORK:
  1450. ret = machine__process_fork_event(machine, event, sample); break;
  1451. case PERF_RECORD_EXIT:
  1452. ret = machine__process_exit_event(machine, event, sample); break;
  1453. case PERF_RECORD_LOST:
  1454. ret = machine__process_lost_event(machine, event, sample); break;
  1455. case PERF_RECORD_AUX:
  1456. ret = machine__process_aux_event(machine, event); break;
  1457. case PERF_RECORD_ITRACE_START:
  1458. ret = machine__process_itrace_start_event(machine, event); break;
  1459. case PERF_RECORD_LOST_SAMPLES:
  1460. ret = machine__process_lost_samples_event(machine, event, sample); break;
  1461. case PERF_RECORD_SWITCH:
  1462. case PERF_RECORD_SWITCH_CPU_WIDE:
  1463. ret = machine__process_switch_event(machine, event); break;
  1464. default:
  1465. ret = -1;
  1466. break;
  1467. }
  1468. return ret;
  1469. }
  1470. static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
  1471. {
  1472. if (!regexec(regex, sym->name, 0, NULL, 0))
  1473. return 1;
  1474. return 0;
  1475. }
  1476. static void ip__resolve_ams(struct thread *thread,
  1477. struct addr_map_symbol *ams,
  1478. u64 ip)
  1479. {
  1480. struct addr_location al;
  1481. memset(&al, 0, sizeof(al));
  1482. /*
  1483. * We cannot use the header.misc hint to determine whether a
  1484. * branch stack address is user, kernel, guest, hypervisor.
  1485. * Branches may straddle the kernel/user/hypervisor boundaries.
  1486. * Thus, we have to try consecutively until we find a match
  1487. * or else, the symbol is unknown
  1488. */
  1489. thread__find_cpumode_addr_location(thread, ip, &al);
  1490. ams->addr = ip;
  1491. ams->al_addr = al.addr;
  1492. ams->sym = al.sym;
  1493. ams->map = al.map;
  1494. ams->phys_addr = 0;
  1495. }
  1496. static void ip__resolve_data(struct thread *thread,
  1497. u8 m, struct addr_map_symbol *ams,
  1498. u64 addr, u64 phys_addr)
  1499. {
  1500. struct addr_location al;
  1501. memset(&al, 0, sizeof(al));
  1502. thread__find_symbol(thread, m, addr, &al);
  1503. ams->addr = addr;
  1504. ams->al_addr = al.addr;
  1505. ams->sym = al.sym;
  1506. ams->map = al.map;
  1507. ams->phys_addr = phys_addr;
  1508. }
  1509. struct mem_info *sample__resolve_mem(struct perf_sample *sample,
  1510. struct addr_location *al)
  1511. {
  1512. struct mem_info *mi = mem_info__new();
  1513. if (!mi)
  1514. return NULL;
  1515. ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
  1516. ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
  1517. sample->addr, sample->phys_addr);
  1518. mi->data_src.val = sample->data_src;
  1519. return mi;
  1520. }
  1521. static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
  1522. {
  1523. char *srcline = NULL;
  1524. if (!map || callchain_param.key == CCKEY_FUNCTION)
  1525. return srcline;
  1526. srcline = srcline__tree_find(&map->dso->srclines, ip);
  1527. if (!srcline) {
  1528. bool show_sym = false;
  1529. bool show_addr = callchain_param.key == CCKEY_ADDRESS;
  1530. srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
  1531. sym, show_sym, show_addr, ip);
  1532. srcline__tree_insert(&map->dso->srclines, ip, srcline);
  1533. }
  1534. return srcline;
  1535. }
  1536. struct iterations {
  1537. int nr_loop_iter;
  1538. u64 cycles;
  1539. };
  1540. static int add_callchain_ip(struct thread *thread,
  1541. struct callchain_cursor *cursor,
  1542. struct symbol **parent,
  1543. struct addr_location *root_al,
  1544. u8 *cpumode,
  1545. u64 ip,
  1546. bool branch,
  1547. struct branch_flags *flags,
  1548. struct iterations *iter,
  1549. u64 branch_from)
  1550. {
  1551. struct addr_location al;
  1552. int nr_loop_iter = 0;
  1553. u64 iter_cycles = 0;
  1554. const char *srcline = NULL;
  1555. al.filtered = 0;
  1556. al.sym = NULL;
  1557. if (!cpumode) {
  1558. thread__find_cpumode_addr_location(thread, ip, &al);
  1559. } else {
  1560. if (ip >= PERF_CONTEXT_MAX) {
  1561. switch (ip) {
  1562. case PERF_CONTEXT_HV:
  1563. *cpumode = PERF_RECORD_MISC_HYPERVISOR;
  1564. break;
  1565. case PERF_CONTEXT_KERNEL:
  1566. *cpumode = PERF_RECORD_MISC_KERNEL;
  1567. break;
  1568. case PERF_CONTEXT_USER:
  1569. *cpumode = PERF_RECORD_MISC_USER;
  1570. break;
  1571. default:
  1572. pr_debug("invalid callchain context: "
  1573. "%"PRId64"\n", (s64) ip);
  1574. /*
  1575. * It seems the callchain is corrupted.
  1576. * Discard all.
  1577. */
  1578. callchain_cursor_reset(cursor);
  1579. return 1;
  1580. }
  1581. return 0;
  1582. }
  1583. thread__find_symbol(thread, *cpumode, ip, &al);
  1584. }
  1585. if (al.sym != NULL) {
  1586. if (perf_hpp_list.parent && !*parent &&
  1587. symbol__match_regex(al.sym, &parent_regex))
  1588. *parent = al.sym;
  1589. else if (have_ignore_callees && root_al &&
  1590. symbol__match_regex(al.sym, &ignore_callees_regex)) {
  1591. /* Treat this symbol as the root,
  1592. forgetting its callees. */
  1593. *root_al = al;
  1594. callchain_cursor_reset(cursor);
  1595. }
  1596. }
  1597. if (symbol_conf.hide_unresolved && al.sym == NULL)
  1598. return 0;
  1599. if (iter) {
  1600. nr_loop_iter = iter->nr_loop_iter;
  1601. iter_cycles = iter->cycles;
  1602. }
  1603. srcline = callchain_srcline(al.map, al.sym, al.addr);
  1604. return callchain_cursor_append(cursor, ip, al.map, al.sym,
  1605. branch, flags, nr_loop_iter,
  1606. iter_cycles, branch_from, srcline);
  1607. }
  1608. struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
  1609. struct addr_location *al)
  1610. {
  1611. unsigned int i;
  1612. const struct branch_stack *bs = sample->branch_stack;
  1613. struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
  1614. if (!bi)
  1615. return NULL;
  1616. for (i = 0; i < bs->nr; i++) {
  1617. ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
  1618. ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
  1619. bi[i].flags = bs->entries[i].flags;
  1620. }
  1621. return bi;
  1622. }
  1623. static void save_iterations(struct iterations *iter,
  1624. struct branch_entry *be, int nr)
  1625. {
  1626. int i;
  1627. iter->nr_loop_iter = nr;
  1628. iter->cycles = 0;
  1629. for (i = 0; i < nr; i++)
  1630. iter->cycles += be[i].flags.cycles;
  1631. }
  1632. #define CHASHSZ 127
  1633. #define CHASHBITS 7
  1634. #define NO_ENTRY 0xff
  1635. #define PERF_MAX_BRANCH_DEPTH 127
  1636. /* Remove loops. */
  1637. static int remove_loops(struct branch_entry *l, int nr,
  1638. struct iterations *iter)
  1639. {
  1640. int i, j, off;
  1641. unsigned char chash[CHASHSZ];
  1642. memset(chash, NO_ENTRY, sizeof(chash));
  1643. BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
  1644. for (i = 0; i < nr; i++) {
  1645. int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
  1646. /* no collision handling for now */
  1647. if (chash[h] == NO_ENTRY) {
  1648. chash[h] = i;
  1649. } else if (l[chash[h]].from == l[i].from) {
  1650. bool is_loop = true;
  1651. /* check if it is a real loop */
  1652. off = 0;
  1653. for (j = chash[h]; j < i && i + off < nr; j++, off++)
  1654. if (l[j].from != l[i + off].from) {
  1655. is_loop = false;
  1656. break;
  1657. }
  1658. if (is_loop) {
  1659. j = nr - (i + off);
  1660. if (j > 0) {
  1661. save_iterations(iter + i + off,
  1662. l + i, off);
  1663. memmove(iter + i, iter + i + off,
  1664. j * sizeof(*iter));
  1665. memmove(l + i, l + i + off,
  1666. j * sizeof(*l));
  1667. }
  1668. nr -= off;
  1669. }
  1670. }
  1671. }
  1672. return nr;
  1673. }
  1674. /*
  1675. * Recolve LBR callstack chain sample
  1676. * Return:
  1677. * 1 on success get LBR callchain information
  1678. * 0 no available LBR callchain information, should try fp
  1679. * negative error code on other errors.
  1680. */
  1681. static int resolve_lbr_callchain_sample(struct thread *thread,
  1682. struct callchain_cursor *cursor,
  1683. struct perf_sample *sample,
  1684. struct symbol **parent,
  1685. struct addr_location *root_al,
  1686. int max_stack)
  1687. {
  1688. struct ip_callchain *chain = sample->callchain;
  1689. int chain_nr = min(max_stack, (int)chain->nr), i;
  1690. u8 cpumode = PERF_RECORD_MISC_USER;
  1691. u64 ip, branch_from = 0;
  1692. for (i = 0; i < chain_nr; i++) {
  1693. if (chain->ips[i] == PERF_CONTEXT_USER)
  1694. break;
  1695. }
  1696. /* LBR only affects the user callchain */
  1697. if (i != chain_nr) {
  1698. struct branch_stack *lbr_stack = sample->branch_stack;
  1699. int lbr_nr = lbr_stack->nr, j, k;
  1700. bool branch;
  1701. struct branch_flags *flags;
  1702. /*
  1703. * LBR callstack can only get user call chain.
  1704. * The mix_chain_nr is kernel call chain
  1705. * number plus LBR user call chain number.
  1706. * i is kernel call chain number,
  1707. * 1 is PERF_CONTEXT_USER,
  1708. * lbr_nr + 1 is the user call chain number.
  1709. * For details, please refer to the comments
  1710. * in callchain__printf
  1711. */
  1712. int mix_chain_nr = i + 1 + lbr_nr + 1;
  1713. for (j = 0; j < mix_chain_nr; j++) {
  1714. int err;
  1715. branch = false;
  1716. flags = NULL;
  1717. if (callchain_param.order == ORDER_CALLEE) {
  1718. if (j < i + 1)
  1719. ip = chain->ips[j];
  1720. else if (j > i + 1) {
  1721. k = j - i - 2;
  1722. ip = lbr_stack->entries[k].from;
  1723. branch = true;
  1724. flags = &lbr_stack->entries[k].flags;
  1725. } else {
  1726. ip = lbr_stack->entries[0].to;
  1727. branch = true;
  1728. flags = &lbr_stack->entries[0].flags;
  1729. branch_from =
  1730. lbr_stack->entries[0].from;
  1731. }
  1732. } else {
  1733. if (j < lbr_nr) {
  1734. k = lbr_nr - j - 1;
  1735. ip = lbr_stack->entries[k].from;
  1736. branch = true;
  1737. flags = &lbr_stack->entries[k].flags;
  1738. }
  1739. else if (j > lbr_nr)
  1740. ip = chain->ips[i + 1 - (j - lbr_nr)];
  1741. else {
  1742. ip = lbr_stack->entries[0].to;
  1743. branch = true;
  1744. flags = &lbr_stack->entries[0].flags;
  1745. branch_from =
  1746. lbr_stack->entries[0].from;
  1747. }
  1748. }
  1749. err = add_callchain_ip(thread, cursor, parent,
  1750. root_al, &cpumode, ip,
  1751. branch, flags, NULL,
  1752. branch_from);
  1753. if (err)
  1754. return (err < 0) ? err : 0;
  1755. }
  1756. return 1;
  1757. }
  1758. return 0;
  1759. }
  1760. static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
  1761. struct callchain_cursor *cursor,
  1762. struct symbol **parent,
  1763. struct addr_location *root_al,
  1764. u8 *cpumode, int ent)
  1765. {
  1766. int err = 0;
  1767. while (--ent >= 0) {
  1768. u64 ip = chain->ips[ent];
  1769. if (ip >= PERF_CONTEXT_MAX) {
  1770. err = add_callchain_ip(thread, cursor, parent,
  1771. root_al, cpumode, ip,
  1772. false, NULL, NULL, 0);
  1773. break;
  1774. }
  1775. }
  1776. return err;
  1777. }
  1778. static int thread__resolve_callchain_sample(struct thread *thread,
  1779. struct callchain_cursor *cursor,
  1780. struct perf_evsel *evsel,
  1781. struct perf_sample *sample,
  1782. struct symbol **parent,
  1783. struct addr_location *root_al,
  1784. int max_stack)
  1785. {
  1786. struct branch_stack *branch = sample->branch_stack;
  1787. struct ip_callchain *chain = sample->callchain;
  1788. int chain_nr = 0;
  1789. u8 cpumode = PERF_RECORD_MISC_USER;
  1790. int i, j, err, nr_entries;
  1791. int skip_idx = -1;
  1792. int first_call = 0;
  1793. if (chain)
  1794. chain_nr = chain->nr;
  1795. if (perf_evsel__has_branch_callstack(evsel)) {
  1796. err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
  1797. root_al, max_stack);
  1798. if (err)
  1799. return (err < 0) ? err : 0;
  1800. }
  1801. /*
  1802. * Based on DWARF debug information, some architectures skip
  1803. * a callchain entry saved by the kernel.
  1804. */
  1805. skip_idx = arch_skip_callchain_idx(thread, chain);
  1806. /*
  1807. * Add branches to call stack for easier browsing. This gives
  1808. * more context for a sample than just the callers.
  1809. *
  1810. * This uses individual histograms of paths compared to the
  1811. * aggregated histograms the normal LBR mode uses.
  1812. *
  1813. * Limitations for now:
  1814. * - No extra filters
  1815. * - No annotations (should annotate somehow)
  1816. */
  1817. if (branch && callchain_param.branch_callstack) {
  1818. int nr = min(max_stack, (int)branch->nr);
  1819. struct branch_entry be[nr];
  1820. struct iterations iter[nr];
  1821. if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
  1822. pr_warning("corrupted branch chain. skipping...\n");
  1823. goto check_calls;
  1824. }
  1825. for (i = 0; i < nr; i++) {
  1826. if (callchain_param.order == ORDER_CALLEE) {
  1827. be[i] = branch->entries[i];
  1828. if (chain == NULL)
  1829. continue;
  1830. /*
  1831. * Check for overlap into the callchain.
  1832. * The return address is one off compared to
  1833. * the branch entry. To adjust for this
  1834. * assume the calling instruction is not longer
  1835. * than 8 bytes.
  1836. */
  1837. if (i == skip_idx ||
  1838. chain->ips[first_call] >= PERF_CONTEXT_MAX)
  1839. first_call++;
  1840. else if (be[i].from < chain->ips[first_call] &&
  1841. be[i].from >= chain->ips[first_call] - 8)
  1842. first_call++;
  1843. } else
  1844. be[i] = branch->entries[branch->nr - i - 1];
  1845. }
  1846. memset(iter, 0, sizeof(struct iterations) * nr);
  1847. nr = remove_loops(be, nr, iter);
  1848. for (i = 0; i < nr; i++) {
  1849. err = add_callchain_ip(thread, cursor, parent,
  1850. root_al,
  1851. NULL, be[i].to,
  1852. true, &be[i].flags,
  1853. NULL, be[i].from);
  1854. if (!err)
  1855. err = add_callchain_ip(thread, cursor, parent, root_al,
  1856. NULL, be[i].from,
  1857. true, &be[i].flags,
  1858. &iter[i], 0);
  1859. if (err == -EINVAL)
  1860. break;
  1861. if (err)
  1862. return err;
  1863. }
  1864. if (chain_nr == 0)
  1865. return 0;
  1866. chain_nr -= nr;
  1867. }
  1868. check_calls:
  1869. if (callchain_param.order != ORDER_CALLEE) {
  1870. err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
  1871. &cpumode, chain->nr - first_call);
  1872. if (err)
  1873. return (err < 0) ? err : 0;
  1874. }
  1875. for (i = first_call, nr_entries = 0;
  1876. i < chain_nr && nr_entries < max_stack; i++) {
  1877. u64 ip;
  1878. if (callchain_param.order == ORDER_CALLEE)
  1879. j = i;
  1880. else
  1881. j = chain->nr - i - 1;
  1882. #ifdef HAVE_SKIP_CALLCHAIN_IDX
  1883. if (j == skip_idx)
  1884. continue;
  1885. #endif
  1886. ip = chain->ips[j];
  1887. if (ip < PERF_CONTEXT_MAX)
  1888. ++nr_entries;
  1889. else if (callchain_param.order != ORDER_CALLEE) {
  1890. err = find_prev_cpumode(chain, thread, cursor, parent,
  1891. root_al, &cpumode, j);
  1892. if (err)
  1893. return (err < 0) ? err : 0;
  1894. continue;
  1895. }
  1896. err = add_callchain_ip(thread, cursor, parent,
  1897. root_al, &cpumode, ip,
  1898. false, NULL, NULL, 0);
  1899. if (err)
  1900. return (err < 0) ? err : 0;
  1901. }
  1902. return 0;
  1903. }
  1904. static int append_inlines(struct callchain_cursor *cursor,
  1905. struct map *map, struct symbol *sym, u64 ip)
  1906. {
  1907. struct inline_node *inline_node;
  1908. struct inline_list *ilist;
  1909. u64 addr;
  1910. int ret = 1;
  1911. if (!symbol_conf.inline_name || !map || !sym)
  1912. return ret;
  1913. addr = map__map_ip(map, ip);
  1914. addr = map__rip_2objdump(map, addr);
  1915. inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
  1916. if (!inline_node) {
  1917. inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
  1918. if (!inline_node)
  1919. return ret;
  1920. inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
  1921. }
  1922. list_for_each_entry(ilist, &inline_node->val, list) {
  1923. ret = callchain_cursor_append(cursor, ip, map,
  1924. ilist->symbol, false,
  1925. NULL, 0, 0, 0, ilist->srcline);
  1926. if (ret != 0)
  1927. return ret;
  1928. }
  1929. return ret;
  1930. }
  1931. static int unwind_entry(struct unwind_entry *entry, void *arg)
  1932. {
  1933. struct callchain_cursor *cursor = arg;
  1934. const char *srcline = NULL;
  1935. u64 addr = entry->ip;
  1936. if (symbol_conf.hide_unresolved && entry->sym == NULL)
  1937. return 0;
  1938. if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
  1939. return 0;
  1940. /*
  1941. * Convert entry->ip from a virtual address to an offset in
  1942. * its corresponding binary.
  1943. */
  1944. if (entry->map)
  1945. addr = map__map_ip(entry->map, entry->ip);
  1946. srcline = callchain_srcline(entry->map, entry->sym, addr);
  1947. return callchain_cursor_append(cursor, entry->ip,
  1948. entry->map, entry->sym,
  1949. false, NULL, 0, 0, 0, srcline);
  1950. }
  1951. static int thread__resolve_callchain_unwind(struct thread *thread,
  1952. struct callchain_cursor *cursor,
  1953. struct perf_evsel *evsel,
  1954. struct perf_sample *sample,
  1955. int max_stack)
  1956. {
  1957. /* Can we do dwarf post unwind? */
  1958. if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
  1959. (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
  1960. return 0;
  1961. /* Bail out if nothing was captured. */
  1962. if ((!sample->user_regs.regs) ||
  1963. (!sample->user_stack.size))
  1964. return 0;
  1965. return unwind__get_entries(unwind_entry, cursor,
  1966. thread, sample, max_stack);
  1967. }
  1968. int thread__resolve_callchain(struct thread *thread,
  1969. struct callchain_cursor *cursor,
  1970. struct perf_evsel *evsel,
  1971. struct perf_sample *sample,
  1972. struct symbol **parent,
  1973. struct addr_location *root_al,
  1974. int max_stack)
  1975. {
  1976. int ret = 0;
  1977. callchain_cursor_reset(cursor);
  1978. if (callchain_param.order == ORDER_CALLEE) {
  1979. ret = thread__resolve_callchain_sample(thread, cursor,
  1980. evsel, sample,
  1981. parent, root_al,
  1982. max_stack);
  1983. if (ret)
  1984. return ret;
  1985. ret = thread__resolve_callchain_unwind(thread, cursor,
  1986. evsel, sample,
  1987. max_stack);
  1988. } else {
  1989. ret = thread__resolve_callchain_unwind(thread, cursor,
  1990. evsel, sample,
  1991. max_stack);
  1992. if (ret)
  1993. return ret;
  1994. ret = thread__resolve_callchain_sample(thread, cursor,
  1995. evsel, sample,
  1996. parent, root_al,
  1997. max_stack);
  1998. }
  1999. return ret;
  2000. }
  2001. int machine__for_each_thread(struct machine *machine,
  2002. int (*fn)(struct thread *thread, void *p),
  2003. void *priv)
  2004. {
  2005. struct threads *threads;
  2006. struct rb_node *nd;
  2007. struct thread *thread;
  2008. int rc = 0;
  2009. int i;
  2010. for (i = 0; i < THREADS__TABLE_SIZE; i++) {
  2011. threads = &machine->threads[i];
  2012. for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
  2013. thread = rb_entry(nd, struct thread, rb_node);
  2014. rc = fn(thread, priv);
  2015. if (rc != 0)
  2016. return rc;
  2017. }
  2018. list_for_each_entry(thread, &threads->dead, node) {
  2019. rc = fn(thread, priv);
  2020. if (rc != 0)
  2021. return rc;
  2022. }
  2023. }
  2024. return rc;
  2025. }
  2026. int machines__for_each_thread(struct machines *machines,
  2027. int (*fn)(struct thread *thread, void *p),
  2028. void *priv)
  2029. {
  2030. struct rb_node *nd;
  2031. int rc = 0;
  2032. rc = machine__for_each_thread(&machines->host, fn, priv);
  2033. if (rc != 0)
  2034. return rc;
  2035. for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
  2036. struct machine *machine = rb_entry(nd, struct machine, rb_node);
  2037. rc = machine__for_each_thread(machine, fn, priv);
  2038. if (rc != 0)
  2039. return rc;
  2040. }
  2041. return rc;
  2042. }
  2043. int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
  2044. struct target *target, struct thread_map *threads,
  2045. perf_event__handler_t process, bool data_mmap,
  2046. unsigned int proc_map_timeout,
  2047. unsigned int nr_threads_synthesize)
  2048. {
  2049. if (target__has_task(target))
  2050. return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
  2051. else if (target__has_cpu(target))
  2052. return perf_event__synthesize_threads(tool, process,
  2053. machine, data_mmap,
  2054. proc_map_timeout,
  2055. nr_threads_synthesize);
  2056. /* command specified */
  2057. return 0;
  2058. }
  2059. pid_t machine__get_current_tid(struct machine *machine, int cpu)
  2060. {
  2061. if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
  2062. return -1;
  2063. return machine->current_tid[cpu];
  2064. }
  2065. int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
  2066. pid_t tid)
  2067. {
  2068. struct thread *thread;
  2069. if (cpu < 0)
  2070. return -EINVAL;
  2071. if (!machine->current_tid) {
  2072. int i;
  2073. machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
  2074. if (!machine->current_tid)
  2075. return -ENOMEM;
  2076. for (i = 0; i < MAX_NR_CPUS; i++)
  2077. machine->current_tid[i] = -1;
  2078. }
  2079. if (cpu >= MAX_NR_CPUS) {
  2080. pr_err("Requested CPU %d too large. ", cpu);
  2081. pr_err("Consider raising MAX_NR_CPUS\n");
  2082. return -EINVAL;
  2083. }
  2084. machine->current_tid[cpu] = tid;
  2085. thread = machine__findnew_thread(machine, pid, tid);
  2086. if (!thread)
  2087. return -ENOMEM;
  2088. thread->cpu = cpu;
  2089. thread__put(thread);
  2090. return 0;
  2091. }
  2092. /*
  2093. * Compares the raw arch string. N.B. see instead perf_env__arch() if a
  2094. * normalized arch is needed.
  2095. */
  2096. bool machine__is(struct machine *machine, const char *arch)
  2097. {
  2098. return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
  2099. }
  2100. int machine__nr_cpus_avail(struct machine *machine)
  2101. {
  2102. return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
  2103. }
  2104. int machine__get_kernel_start(struct machine *machine)
  2105. {
  2106. struct map *map = machine__kernel_map(machine);
  2107. int err = 0;
  2108. /*
  2109. * The only addresses above 2^63 are kernel addresses of a 64-bit
  2110. * kernel. Note that addresses are unsigned so that on a 32-bit system
  2111. * all addresses including kernel addresses are less than 2^32. In
  2112. * that case (32-bit system), if the kernel mapping is unknown, all
  2113. * addresses will be assumed to be in user space - see
  2114. * machine__kernel_ip().
  2115. */
  2116. machine->kernel_start = 1ULL << 63;
  2117. if (map) {
  2118. err = map__load(map);
  2119. /*
  2120. * On x86_64, PTI entry trampolines are less than the
  2121. * start of kernel text, but still above 2^63. So leave
  2122. * kernel_start = 1ULL << 63 for x86_64.
  2123. */
  2124. if (!err && !machine__is(machine, "x86_64"))
  2125. machine->kernel_start = map->start;
  2126. }
  2127. return err;
  2128. }
  2129. struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
  2130. {
  2131. return dsos__findnew(&machine->dsos, filename);
  2132. }
  2133. char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
  2134. {
  2135. struct machine *machine = vmachine;
  2136. struct map *map;
  2137. struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
  2138. if (sym == NULL)
  2139. return NULL;
  2140. *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
  2141. *addrp = map->unmap_ip(map, sym->start);
  2142. return sym->name;
  2143. }