symbol.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879
  1. #include <dirent.h>
  2. #include <errno.h>
  3. #include <stdlib.h>
  4. #include <stdio.h>
  5. #include <string.h>
  6. #include <sys/types.h>
  7. #include <sys/stat.h>
  8. #include <sys/param.h>
  9. #include <fcntl.h>
  10. #include <unistd.h>
  11. #include <inttypes.h>
  12. #include "build-id.h"
  13. #include "util.h"
  14. #include "debug.h"
  15. #include "machine.h"
  16. #include "symbol.h"
  17. #include "strlist.h"
  18. #include <elf.h>
  19. #include <limits.h>
  20. #include <symbol/kallsyms.h>
  21. #include <sys/utsname.h>
  22. static int dso__load_kernel_sym(struct dso *dso, struct map *map,
  23. symbol_filter_t filter);
  24. static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
  25. symbol_filter_t filter);
  26. int vmlinux_path__nr_entries;
  27. char **vmlinux_path;
  28. struct symbol_conf symbol_conf = {
  29. .use_modules = true,
  30. .try_vmlinux_path = true,
  31. .annotate_src = true,
  32. .demangle = true,
  33. .symfs = "",
  34. };
  35. static enum dso_binary_type binary_type_symtab[] = {
  36. DSO_BINARY_TYPE__KALLSYMS,
  37. DSO_BINARY_TYPE__GUEST_KALLSYMS,
  38. DSO_BINARY_TYPE__JAVA_JIT,
  39. DSO_BINARY_TYPE__DEBUGLINK,
  40. DSO_BINARY_TYPE__BUILD_ID_CACHE,
  41. DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
  42. DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
  43. DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
  44. DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
  45. DSO_BINARY_TYPE__GUEST_KMODULE,
  46. DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
  47. DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
  48. DSO_BINARY_TYPE__NOT_FOUND,
  49. };
  50. #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
  51. bool symbol_type__is_a(char symbol_type, enum map_type map_type)
  52. {
  53. symbol_type = toupper(symbol_type);
  54. switch (map_type) {
  55. case MAP__FUNCTION:
  56. return symbol_type == 'T' || symbol_type == 'W';
  57. case MAP__VARIABLE:
  58. return symbol_type == 'D';
  59. default:
  60. return false;
  61. }
  62. }
  63. static int prefix_underscores_count(const char *str)
  64. {
  65. const char *tail = str;
  66. while (*tail == '_')
  67. tail++;
  68. return tail - str;
  69. }
  70. #define SYMBOL_A 0
  71. #define SYMBOL_B 1
  72. static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
  73. {
  74. s64 a;
  75. s64 b;
  76. size_t na, nb;
  77. /* Prefer a symbol with non zero length */
  78. a = syma->end - syma->start;
  79. b = symb->end - symb->start;
  80. if ((b == 0) && (a > 0))
  81. return SYMBOL_A;
  82. else if ((a == 0) && (b > 0))
  83. return SYMBOL_B;
  84. /* Prefer a non weak symbol over a weak one */
  85. a = syma->binding == STB_WEAK;
  86. b = symb->binding == STB_WEAK;
  87. if (b && !a)
  88. return SYMBOL_A;
  89. if (a && !b)
  90. return SYMBOL_B;
  91. /* Prefer a global symbol over a non global one */
  92. a = syma->binding == STB_GLOBAL;
  93. b = symb->binding == STB_GLOBAL;
  94. if (a && !b)
  95. return SYMBOL_A;
  96. if (b && !a)
  97. return SYMBOL_B;
  98. /* Prefer a symbol with less underscores */
  99. a = prefix_underscores_count(syma->name);
  100. b = prefix_underscores_count(symb->name);
  101. if (b > a)
  102. return SYMBOL_A;
  103. else if (a > b)
  104. return SYMBOL_B;
  105. /* Choose the symbol with the longest name */
  106. na = strlen(syma->name);
  107. nb = strlen(symb->name);
  108. if (na > nb)
  109. return SYMBOL_A;
  110. else if (na < nb)
  111. return SYMBOL_B;
  112. /* Avoid "SyS" kernel syscall aliases */
  113. if (na >= 3 && !strncmp(syma->name, "SyS", 3))
  114. return SYMBOL_B;
  115. if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10))
  116. return SYMBOL_B;
  117. return SYMBOL_A;
  118. }
  119. void symbols__fixup_duplicate(struct rb_root *symbols)
  120. {
  121. struct rb_node *nd;
  122. struct symbol *curr, *next;
  123. nd = rb_first(symbols);
  124. while (nd) {
  125. curr = rb_entry(nd, struct symbol, rb_node);
  126. again:
  127. nd = rb_next(&curr->rb_node);
  128. next = rb_entry(nd, struct symbol, rb_node);
  129. if (!nd)
  130. break;
  131. if (curr->start != next->start)
  132. continue;
  133. if (choose_best_symbol(curr, next) == SYMBOL_A) {
  134. rb_erase(&next->rb_node, symbols);
  135. symbol__delete(next);
  136. goto again;
  137. } else {
  138. nd = rb_next(&curr->rb_node);
  139. rb_erase(&curr->rb_node, symbols);
  140. symbol__delete(curr);
  141. }
  142. }
  143. }
  144. void symbols__fixup_end(struct rb_root *symbols)
  145. {
  146. struct rb_node *nd, *prevnd = rb_first(symbols);
  147. struct symbol *curr, *prev;
  148. if (prevnd == NULL)
  149. return;
  150. curr = rb_entry(prevnd, struct symbol, rb_node);
  151. for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
  152. prev = curr;
  153. curr = rb_entry(nd, struct symbol, rb_node);
  154. if (prev->end == prev->start && prev->end != curr->start)
  155. prev->end = curr->start - 1;
  156. }
  157. /* Last entry */
  158. if (curr->end == curr->start)
  159. curr->end = roundup(curr->start, 4096);
  160. }
  161. void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
  162. {
  163. struct map *prev, *curr;
  164. struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]);
  165. if (prevnd == NULL)
  166. return;
  167. curr = rb_entry(prevnd, struct map, rb_node);
  168. for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
  169. prev = curr;
  170. curr = rb_entry(nd, struct map, rb_node);
  171. prev->end = curr->start - 1;
  172. }
  173. /*
  174. * We still haven't the actual symbols, so guess the
  175. * last map final address.
  176. */
  177. curr->end = ~0ULL;
  178. }
  179. struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
  180. {
  181. size_t namelen = strlen(name) + 1;
  182. struct symbol *sym = calloc(1, (symbol_conf.priv_size +
  183. sizeof(*sym) + namelen));
  184. if (sym == NULL)
  185. return NULL;
  186. if (symbol_conf.priv_size)
  187. sym = ((void *)sym) + symbol_conf.priv_size;
  188. sym->start = start;
  189. sym->end = len ? start + len - 1 : start;
  190. sym->binding = binding;
  191. sym->namelen = namelen - 1;
  192. pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
  193. __func__, name, start, sym->end);
  194. memcpy(sym->name, name, namelen);
  195. return sym;
  196. }
  197. void symbol__delete(struct symbol *sym)
  198. {
  199. free(((void *)sym) - symbol_conf.priv_size);
  200. }
  201. size_t symbol__fprintf(struct symbol *sym, FILE *fp)
  202. {
  203. return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
  204. sym->start, sym->end,
  205. sym->binding == STB_GLOBAL ? 'g' :
  206. sym->binding == STB_LOCAL ? 'l' : 'w',
  207. sym->name);
  208. }
  209. size_t symbol__fprintf_symname_offs(const struct symbol *sym,
  210. const struct addr_location *al, FILE *fp)
  211. {
  212. unsigned long offset;
  213. size_t length;
  214. if (sym && sym->name) {
  215. length = fprintf(fp, "%s", sym->name);
  216. if (al) {
  217. if (al->addr < sym->end)
  218. offset = al->addr - sym->start;
  219. else
  220. offset = al->addr - al->map->start - sym->start;
  221. length += fprintf(fp, "+0x%lx", offset);
  222. }
  223. return length;
  224. } else
  225. return fprintf(fp, "[unknown]");
  226. }
  227. size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp)
  228. {
  229. return symbol__fprintf_symname_offs(sym, NULL, fp);
  230. }
  231. void symbols__delete(struct rb_root *symbols)
  232. {
  233. struct symbol *pos;
  234. struct rb_node *next = rb_first(symbols);
  235. while (next) {
  236. pos = rb_entry(next, struct symbol, rb_node);
  237. next = rb_next(&pos->rb_node);
  238. rb_erase(&pos->rb_node, symbols);
  239. symbol__delete(pos);
  240. }
  241. }
  242. void symbols__insert(struct rb_root *symbols, struct symbol *sym)
  243. {
  244. struct rb_node **p = &symbols->rb_node;
  245. struct rb_node *parent = NULL;
  246. const u64 ip = sym->start;
  247. struct symbol *s;
  248. while (*p != NULL) {
  249. parent = *p;
  250. s = rb_entry(parent, struct symbol, rb_node);
  251. if (ip < s->start)
  252. p = &(*p)->rb_left;
  253. else
  254. p = &(*p)->rb_right;
  255. }
  256. rb_link_node(&sym->rb_node, parent, p);
  257. rb_insert_color(&sym->rb_node, symbols);
  258. }
  259. static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
  260. {
  261. struct rb_node *n;
  262. if (symbols == NULL)
  263. return NULL;
  264. n = symbols->rb_node;
  265. while (n) {
  266. struct symbol *s = rb_entry(n, struct symbol, rb_node);
  267. if (ip < s->start)
  268. n = n->rb_left;
  269. else if (ip > s->end)
  270. n = n->rb_right;
  271. else
  272. return s;
  273. }
  274. return NULL;
  275. }
  276. static struct symbol *symbols__first(struct rb_root *symbols)
  277. {
  278. struct rb_node *n = rb_first(symbols);
  279. if (n)
  280. return rb_entry(n, struct symbol, rb_node);
  281. return NULL;
  282. }
  283. struct symbol_name_rb_node {
  284. struct rb_node rb_node;
  285. struct symbol sym;
  286. };
  287. static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
  288. {
  289. struct rb_node **p = &symbols->rb_node;
  290. struct rb_node *parent = NULL;
  291. struct symbol_name_rb_node *symn, *s;
  292. symn = container_of(sym, struct symbol_name_rb_node, sym);
  293. while (*p != NULL) {
  294. parent = *p;
  295. s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
  296. if (strcmp(sym->name, s->sym.name) < 0)
  297. p = &(*p)->rb_left;
  298. else
  299. p = &(*p)->rb_right;
  300. }
  301. rb_link_node(&symn->rb_node, parent, p);
  302. rb_insert_color(&symn->rb_node, symbols);
  303. }
  304. static void symbols__sort_by_name(struct rb_root *symbols,
  305. struct rb_root *source)
  306. {
  307. struct rb_node *nd;
  308. for (nd = rb_first(source); nd; nd = rb_next(nd)) {
  309. struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
  310. symbols__insert_by_name(symbols, pos);
  311. }
  312. }
  313. static struct symbol *symbols__find_by_name(struct rb_root *symbols,
  314. const char *name)
  315. {
  316. struct rb_node *n;
  317. if (symbols == NULL)
  318. return NULL;
  319. n = symbols->rb_node;
  320. while (n) {
  321. struct symbol_name_rb_node *s;
  322. int cmp;
  323. s = rb_entry(n, struct symbol_name_rb_node, rb_node);
  324. cmp = strcmp(name, s->sym.name);
  325. if (cmp < 0)
  326. n = n->rb_left;
  327. else if (cmp > 0)
  328. n = n->rb_right;
  329. else
  330. return &s->sym;
  331. }
  332. return NULL;
  333. }
  334. struct symbol *dso__find_symbol(struct dso *dso,
  335. enum map_type type, u64 addr)
  336. {
  337. return symbols__find(&dso->symbols[type], addr);
  338. }
  339. static struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
  340. {
  341. return symbols__first(&dso->symbols[type]);
  342. }
  343. struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
  344. const char *name)
  345. {
  346. return symbols__find_by_name(&dso->symbol_names[type], name);
  347. }
  348. void dso__sort_by_name(struct dso *dso, enum map_type type)
  349. {
  350. dso__set_sorted_by_name(dso, type);
  351. return symbols__sort_by_name(&dso->symbol_names[type],
  352. &dso->symbols[type]);
  353. }
  354. size_t dso__fprintf_symbols_by_name(struct dso *dso,
  355. enum map_type type, FILE *fp)
  356. {
  357. size_t ret = 0;
  358. struct rb_node *nd;
  359. struct symbol_name_rb_node *pos;
  360. for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) {
  361. pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
  362. fprintf(fp, "%s\n", pos->sym.name);
  363. }
  364. return ret;
  365. }
  366. int modules__parse(const char *filename, void *arg,
  367. int (*process_module)(void *arg, const char *name,
  368. u64 start))
  369. {
  370. char *line = NULL;
  371. size_t n;
  372. FILE *file;
  373. int err = 0;
  374. file = fopen(filename, "r");
  375. if (file == NULL)
  376. return -1;
  377. while (1) {
  378. char name[PATH_MAX];
  379. u64 start;
  380. char *sep;
  381. ssize_t line_len;
  382. line_len = getline(&line, &n, file);
  383. if (line_len < 0) {
  384. if (feof(file))
  385. break;
  386. err = -1;
  387. goto out;
  388. }
  389. if (!line) {
  390. err = -1;
  391. goto out;
  392. }
  393. line[--line_len] = '\0'; /* \n */
  394. sep = strrchr(line, 'x');
  395. if (sep == NULL)
  396. continue;
  397. hex2u64(sep + 1, &start);
  398. sep = strchr(line, ' ');
  399. if (sep == NULL)
  400. continue;
  401. *sep = '\0';
  402. scnprintf(name, sizeof(name), "[%s]", line);
  403. err = process_module(arg, name, start);
  404. if (err)
  405. break;
  406. }
  407. out:
  408. free(line);
  409. fclose(file);
  410. return err;
  411. }
  412. struct process_kallsyms_args {
  413. struct map *map;
  414. struct dso *dso;
  415. };
  416. bool symbol__is_idle(struct symbol *sym)
  417. {
  418. const char * const idle_symbols[] = {
  419. "cpu_idle",
  420. "intel_idle",
  421. "default_idle",
  422. "native_safe_halt",
  423. "enter_idle",
  424. "exit_idle",
  425. "mwait_idle",
  426. "mwait_idle_with_hints",
  427. "poll_idle",
  428. "ppc64_runlatch_off",
  429. "pseries_dedicated_idle_sleep",
  430. NULL
  431. };
  432. int i;
  433. if (!sym)
  434. return false;
  435. for (i = 0; idle_symbols[i]; i++) {
  436. if (!strcmp(idle_symbols[i], sym->name))
  437. return true;
  438. }
  439. return false;
  440. }
  441. static int map__process_kallsym_symbol(void *arg, const char *name,
  442. char type, u64 start)
  443. {
  444. struct symbol *sym;
  445. struct process_kallsyms_args *a = arg;
  446. struct rb_root *root = &a->dso->symbols[a->map->type];
  447. if (!symbol_type__is_a(type, a->map->type))
  448. return 0;
  449. /*
  450. * module symbols are not sorted so we add all
  451. * symbols, setting length to 0, and rely on
  452. * symbols__fixup_end() to fix it up.
  453. */
  454. sym = symbol__new(start, 0, kallsyms2elf_type(type), name);
  455. if (sym == NULL)
  456. return -ENOMEM;
  457. /*
  458. * We will pass the symbols to the filter later, in
  459. * map__split_kallsyms, when we have split the maps per module
  460. */
  461. symbols__insert(root, sym);
  462. return 0;
  463. }
  464. /*
  465. * Loads the function entries in /proc/kallsyms into kernel_map->dso,
  466. * so that we can in the next step set the symbol ->end address and then
  467. * call kernel_maps__split_kallsyms.
  468. */
  469. static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
  470. struct map *map)
  471. {
  472. struct process_kallsyms_args args = { .map = map, .dso = dso, };
  473. return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
  474. }
  475. static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
  476. symbol_filter_t filter)
  477. {
  478. struct map_groups *kmaps = map__kmap(map)->kmaps;
  479. struct map *curr_map;
  480. struct symbol *pos;
  481. int count = 0, moved = 0;
  482. struct rb_root *root = &dso->symbols[map->type];
  483. struct rb_node *next = rb_first(root);
  484. while (next) {
  485. char *module;
  486. pos = rb_entry(next, struct symbol, rb_node);
  487. next = rb_next(&pos->rb_node);
  488. module = strchr(pos->name, '\t');
  489. if (module)
  490. *module = '\0';
  491. curr_map = map_groups__find(kmaps, map->type, pos->start);
  492. if (!curr_map || (filter && filter(curr_map, pos))) {
  493. rb_erase(&pos->rb_node, root);
  494. symbol__delete(pos);
  495. } else {
  496. pos->start -= curr_map->start - curr_map->pgoff;
  497. if (pos->end)
  498. pos->end -= curr_map->start - curr_map->pgoff;
  499. if (curr_map != map) {
  500. rb_erase(&pos->rb_node, root);
  501. symbols__insert(
  502. &curr_map->dso->symbols[curr_map->type],
  503. pos);
  504. ++moved;
  505. } else {
  506. ++count;
  507. }
  508. }
  509. }
  510. /* Symbols have been adjusted */
  511. dso->adjust_symbols = 1;
  512. return count + moved;
  513. }
  514. /*
  515. * Split the symbols into maps, making sure there are no overlaps, i.e. the
  516. * kernel range is broken in several maps, named [kernel].N, as we don't have
  517. * the original ELF section names vmlinux have.
  518. */
  519. static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
  520. symbol_filter_t filter)
  521. {
  522. struct map_groups *kmaps = map__kmap(map)->kmaps;
  523. struct machine *machine = kmaps->machine;
  524. struct map *curr_map = map;
  525. struct symbol *pos;
  526. int count = 0, moved = 0;
  527. struct rb_root *root = &dso->symbols[map->type];
  528. struct rb_node *next = rb_first(root);
  529. int kernel_range = 0;
  530. while (next) {
  531. char *module;
  532. pos = rb_entry(next, struct symbol, rb_node);
  533. next = rb_next(&pos->rb_node);
  534. module = strchr(pos->name, '\t');
  535. if (module) {
  536. if (!symbol_conf.use_modules)
  537. goto discard_symbol;
  538. *module++ = '\0';
  539. if (strcmp(curr_map->dso->short_name, module)) {
  540. if (curr_map != map &&
  541. dso->kernel == DSO_TYPE_GUEST_KERNEL &&
  542. machine__is_default_guest(machine)) {
  543. /*
  544. * We assume all symbols of a module are
  545. * continuous in * kallsyms, so curr_map
  546. * points to a module and all its
  547. * symbols are in its kmap. Mark it as
  548. * loaded.
  549. */
  550. dso__set_loaded(curr_map->dso,
  551. curr_map->type);
  552. }
  553. curr_map = map_groups__find_by_name(kmaps,
  554. map->type, module);
  555. if (curr_map == NULL) {
  556. pr_debug("%s/proc/{kallsyms,modules} "
  557. "inconsistency while looking "
  558. "for \"%s\" module!\n",
  559. machine->root_dir, module);
  560. curr_map = map;
  561. goto discard_symbol;
  562. }
  563. if (curr_map->dso->loaded &&
  564. !machine__is_default_guest(machine))
  565. goto discard_symbol;
  566. }
  567. /*
  568. * So that we look just like we get from .ko files,
  569. * i.e. not prelinked, relative to map->start.
  570. */
  571. pos->start = curr_map->map_ip(curr_map, pos->start);
  572. pos->end = curr_map->map_ip(curr_map, pos->end);
  573. } else if (curr_map != map) {
  574. char dso_name[PATH_MAX];
  575. struct dso *ndso;
  576. if (delta) {
  577. /* Kernel was relocated at boot time */
  578. pos->start -= delta;
  579. pos->end -= delta;
  580. }
  581. if (count == 0) {
  582. curr_map = map;
  583. goto filter_symbol;
  584. }
  585. if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  586. snprintf(dso_name, sizeof(dso_name),
  587. "[guest.kernel].%d",
  588. kernel_range++);
  589. else
  590. snprintf(dso_name, sizeof(dso_name),
  591. "[kernel].%d",
  592. kernel_range++);
  593. ndso = dso__new(dso_name);
  594. if (ndso == NULL)
  595. return -1;
  596. ndso->kernel = dso->kernel;
  597. curr_map = map__new2(pos->start, ndso, map->type);
  598. if (curr_map == NULL) {
  599. dso__delete(ndso);
  600. return -1;
  601. }
  602. curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
  603. map_groups__insert(kmaps, curr_map);
  604. ++kernel_range;
  605. } else if (delta) {
  606. /* Kernel was relocated at boot time */
  607. pos->start -= delta;
  608. pos->end -= delta;
  609. }
  610. filter_symbol:
  611. if (filter && filter(curr_map, pos)) {
  612. discard_symbol: rb_erase(&pos->rb_node, root);
  613. symbol__delete(pos);
  614. } else {
  615. if (curr_map != map) {
  616. rb_erase(&pos->rb_node, root);
  617. symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
  618. ++moved;
  619. } else
  620. ++count;
  621. }
  622. }
  623. if (curr_map != map &&
  624. dso->kernel == DSO_TYPE_GUEST_KERNEL &&
  625. machine__is_default_guest(kmaps->machine)) {
  626. dso__set_loaded(curr_map->dso, curr_map->type);
  627. }
  628. return count + moved;
  629. }
  630. bool symbol__restricted_filename(const char *filename,
  631. const char *restricted_filename)
  632. {
  633. bool restricted = false;
  634. if (symbol_conf.kptr_restrict) {
  635. char *r = realpath(filename, NULL);
  636. if (r != NULL) {
  637. restricted = strcmp(r, restricted_filename) == 0;
  638. free(r);
  639. return restricted;
  640. }
  641. }
  642. return restricted;
  643. }
  644. struct module_info {
  645. struct rb_node rb_node;
  646. char *name;
  647. u64 start;
  648. };
  649. static void add_module(struct module_info *mi, struct rb_root *modules)
  650. {
  651. struct rb_node **p = &modules->rb_node;
  652. struct rb_node *parent = NULL;
  653. struct module_info *m;
  654. while (*p != NULL) {
  655. parent = *p;
  656. m = rb_entry(parent, struct module_info, rb_node);
  657. if (strcmp(mi->name, m->name) < 0)
  658. p = &(*p)->rb_left;
  659. else
  660. p = &(*p)->rb_right;
  661. }
  662. rb_link_node(&mi->rb_node, parent, p);
  663. rb_insert_color(&mi->rb_node, modules);
  664. }
  665. static void delete_modules(struct rb_root *modules)
  666. {
  667. struct module_info *mi;
  668. struct rb_node *next = rb_first(modules);
  669. while (next) {
  670. mi = rb_entry(next, struct module_info, rb_node);
  671. next = rb_next(&mi->rb_node);
  672. rb_erase(&mi->rb_node, modules);
  673. zfree(&mi->name);
  674. free(mi);
  675. }
  676. }
  677. static struct module_info *find_module(const char *name,
  678. struct rb_root *modules)
  679. {
  680. struct rb_node *n = modules->rb_node;
  681. while (n) {
  682. struct module_info *m;
  683. int cmp;
  684. m = rb_entry(n, struct module_info, rb_node);
  685. cmp = strcmp(name, m->name);
  686. if (cmp < 0)
  687. n = n->rb_left;
  688. else if (cmp > 0)
  689. n = n->rb_right;
  690. else
  691. return m;
  692. }
  693. return NULL;
  694. }
  695. static int __read_proc_modules(void *arg, const char *name, u64 start)
  696. {
  697. struct rb_root *modules = arg;
  698. struct module_info *mi;
  699. mi = zalloc(sizeof(struct module_info));
  700. if (!mi)
  701. return -ENOMEM;
  702. mi->name = strdup(name);
  703. mi->start = start;
  704. if (!mi->name) {
  705. free(mi);
  706. return -ENOMEM;
  707. }
  708. add_module(mi, modules);
  709. return 0;
  710. }
  711. static int read_proc_modules(const char *filename, struct rb_root *modules)
  712. {
  713. if (symbol__restricted_filename(filename, "/proc/modules"))
  714. return -1;
  715. if (modules__parse(filename, modules, __read_proc_modules)) {
  716. delete_modules(modules);
  717. return -1;
  718. }
  719. return 0;
  720. }
  721. int compare_proc_modules(const char *from, const char *to)
  722. {
  723. struct rb_root from_modules = RB_ROOT;
  724. struct rb_root to_modules = RB_ROOT;
  725. struct rb_node *from_node, *to_node;
  726. struct module_info *from_m, *to_m;
  727. int ret = -1;
  728. if (read_proc_modules(from, &from_modules))
  729. return -1;
  730. if (read_proc_modules(to, &to_modules))
  731. goto out_delete_from;
  732. from_node = rb_first(&from_modules);
  733. to_node = rb_first(&to_modules);
  734. while (from_node) {
  735. if (!to_node)
  736. break;
  737. from_m = rb_entry(from_node, struct module_info, rb_node);
  738. to_m = rb_entry(to_node, struct module_info, rb_node);
  739. if (from_m->start != to_m->start ||
  740. strcmp(from_m->name, to_m->name))
  741. break;
  742. from_node = rb_next(from_node);
  743. to_node = rb_next(to_node);
  744. }
  745. if (!from_node && !to_node)
  746. ret = 0;
  747. delete_modules(&to_modules);
  748. out_delete_from:
  749. delete_modules(&from_modules);
  750. return ret;
  751. }
  752. static int do_validate_kcore_modules(const char *filename, struct map *map,
  753. struct map_groups *kmaps)
  754. {
  755. struct rb_root modules = RB_ROOT;
  756. struct map *old_map;
  757. int err;
  758. err = read_proc_modules(filename, &modules);
  759. if (err)
  760. return err;
  761. old_map = map_groups__first(kmaps, map->type);
  762. while (old_map) {
  763. struct map *next = map_groups__next(old_map);
  764. struct module_info *mi;
  765. if (old_map == map || old_map->start == map->start) {
  766. /* The kernel map */
  767. old_map = next;
  768. continue;
  769. }
  770. /* Module must be in memory at the same address */
  771. mi = find_module(old_map->dso->short_name, &modules);
  772. if (!mi || mi->start != old_map->start) {
  773. err = -EINVAL;
  774. goto out;
  775. }
  776. old_map = next;
  777. }
  778. out:
  779. delete_modules(&modules);
  780. return err;
  781. }
  782. /*
  783. * If kallsyms is referenced by name then we look for filename in the same
  784. * directory.
  785. */
  786. static bool filename_from_kallsyms_filename(char *filename,
  787. const char *base_name,
  788. const char *kallsyms_filename)
  789. {
  790. char *name;
  791. strcpy(filename, kallsyms_filename);
  792. name = strrchr(filename, '/');
  793. if (!name)
  794. return false;
  795. name += 1;
  796. if (!strcmp(name, "kallsyms")) {
  797. strcpy(name, base_name);
  798. return true;
  799. }
  800. return false;
  801. }
  802. static int validate_kcore_modules(const char *kallsyms_filename,
  803. struct map *map)
  804. {
  805. struct map_groups *kmaps = map__kmap(map)->kmaps;
  806. char modules_filename[PATH_MAX];
  807. if (!filename_from_kallsyms_filename(modules_filename, "modules",
  808. kallsyms_filename))
  809. return -EINVAL;
  810. if (do_validate_kcore_modules(modules_filename, map, kmaps))
  811. return -EINVAL;
  812. return 0;
  813. }
  814. static int validate_kcore_addresses(const char *kallsyms_filename,
  815. struct map *map)
  816. {
  817. struct kmap *kmap = map__kmap(map);
  818. if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
  819. u64 start;
  820. start = kallsyms__get_function_start(kallsyms_filename,
  821. kmap->ref_reloc_sym->name);
  822. if (start != kmap->ref_reloc_sym->addr)
  823. return -EINVAL;
  824. }
  825. return validate_kcore_modules(kallsyms_filename, map);
  826. }
  827. struct kcore_mapfn_data {
  828. struct dso *dso;
  829. enum map_type type;
  830. struct list_head maps;
  831. };
  832. static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
  833. {
  834. struct kcore_mapfn_data *md = data;
  835. struct map *map;
  836. map = map__new2(start, md->dso, md->type);
  837. if (map == NULL)
  838. return -ENOMEM;
  839. map->end = map->start + len;
  840. map->pgoff = pgoff;
  841. list_add(&map->node, &md->maps);
  842. return 0;
  843. }
  844. static int dso__load_kcore(struct dso *dso, struct map *map,
  845. const char *kallsyms_filename)
  846. {
  847. struct map_groups *kmaps = map__kmap(map)->kmaps;
  848. struct machine *machine = kmaps->machine;
  849. struct kcore_mapfn_data md;
  850. struct map *old_map, *new_map, *replacement_map = NULL;
  851. bool is_64_bit;
  852. int err, fd;
  853. char kcore_filename[PATH_MAX];
  854. struct symbol *sym;
  855. /* This function requires that the map is the kernel map */
  856. if (map != machine->vmlinux_maps[map->type])
  857. return -EINVAL;
  858. if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
  859. kallsyms_filename))
  860. return -EINVAL;
  861. /* Modules and kernel must be present at their original addresses */
  862. if (validate_kcore_addresses(kallsyms_filename, map))
  863. return -EINVAL;
  864. md.dso = dso;
  865. md.type = map->type;
  866. INIT_LIST_HEAD(&md.maps);
  867. fd = open(kcore_filename, O_RDONLY);
  868. if (fd < 0)
  869. return -EINVAL;
  870. /* Read new maps into temporary lists */
  871. err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md,
  872. &is_64_bit);
  873. if (err)
  874. goto out_err;
  875. if (list_empty(&md.maps)) {
  876. err = -EINVAL;
  877. goto out_err;
  878. }
  879. /* Remove old maps */
  880. old_map = map_groups__first(kmaps, map->type);
  881. while (old_map) {
  882. struct map *next = map_groups__next(old_map);
  883. if (old_map != map)
  884. map_groups__remove(kmaps, old_map);
  885. old_map = next;
  886. }
  887. /* Find the kernel map using the first symbol */
  888. sym = dso__first_symbol(dso, map->type);
  889. list_for_each_entry(new_map, &md.maps, node) {
  890. if (sym && sym->start >= new_map->start &&
  891. sym->start < new_map->end) {
  892. replacement_map = new_map;
  893. break;
  894. }
  895. }
  896. if (!replacement_map)
  897. replacement_map = list_entry(md.maps.next, struct map, node);
  898. /* Add new maps */
  899. while (!list_empty(&md.maps)) {
  900. new_map = list_entry(md.maps.next, struct map, node);
  901. list_del(&new_map->node);
  902. if (new_map == replacement_map) {
  903. map->start = new_map->start;
  904. map->end = new_map->end;
  905. map->pgoff = new_map->pgoff;
  906. map->map_ip = new_map->map_ip;
  907. map->unmap_ip = new_map->unmap_ip;
  908. map__delete(new_map);
  909. /* Ensure maps are correctly ordered */
  910. map_groups__remove(kmaps, map);
  911. map_groups__insert(kmaps, map);
  912. } else {
  913. map_groups__insert(kmaps, new_map);
  914. }
  915. }
  916. /*
  917. * Set the data type and long name so that kcore can be read via
  918. * dso__data_read_addr().
  919. */
  920. if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  921. dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
  922. else
  923. dso->binary_type = DSO_BINARY_TYPE__KCORE;
  924. dso__set_long_name(dso, strdup(kcore_filename), true);
  925. close(fd);
  926. if (map->type == MAP__FUNCTION)
  927. pr_debug("Using %s for kernel object code\n", kcore_filename);
  928. else
  929. pr_debug("Using %s for kernel data\n", kcore_filename);
  930. return 0;
  931. out_err:
  932. while (!list_empty(&md.maps)) {
  933. map = list_entry(md.maps.next, struct map, node);
  934. list_del(&map->node);
  935. map__delete(map);
  936. }
  937. close(fd);
  938. return -EINVAL;
  939. }
  940. /*
  941. * If the kernel is relocated at boot time, kallsyms won't match. Compute the
  942. * delta based on the relocation reference symbol.
  943. */
  944. static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
  945. {
  946. struct kmap *kmap = map__kmap(map);
  947. u64 addr;
  948. if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
  949. return 0;
  950. addr = kallsyms__get_function_start(filename,
  951. kmap->ref_reloc_sym->name);
  952. if (!addr)
  953. return -1;
  954. *delta = addr - kmap->ref_reloc_sym->addr;
  955. return 0;
  956. }
  957. int dso__load_kallsyms(struct dso *dso, const char *filename,
  958. struct map *map, symbol_filter_t filter)
  959. {
  960. u64 delta = 0;
  961. if (symbol__restricted_filename(filename, "/proc/kallsyms"))
  962. return -1;
  963. if (dso__load_all_kallsyms(dso, filename, map) < 0)
  964. return -1;
  965. if (kallsyms__delta(map, filename, &delta))
  966. return -1;
  967. symbols__fixup_duplicate(&dso->symbols[map->type]);
  968. symbols__fixup_end(&dso->symbols[map->type]);
  969. if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  970. dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
  971. else
  972. dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
  973. if (!dso__load_kcore(dso, map, filename))
  974. return dso__split_kallsyms_for_kcore(dso, map, filter);
  975. else
  976. return dso__split_kallsyms(dso, map, delta, filter);
  977. }
  978. static int dso__load_perf_map(struct dso *dso, struct map *map,
  979. symbol_filter_t filter)
  980. {
  981. char *line = NULL;
  982. size_t n;
  983. FILE *file;
  984. int nr_syms = 0;
  985. file = fopen(dso->long_name, "r");
  986. if (file == NULL)
  987. goto out_failure;
  988. while (!feof(file)) {
  989. u64 start, size;
  990. struct symbol *sym;
  991. int line_len, len;
  992. line_len = getline(&line, &n, file);
  993. if (line_len < 0)
  994. break;
  995. if (!line)
  996. goto out_failure;
  997. line[--line_len] = '\0'; /* \n */
  998. len = hex2u64(line, &start);
  999. len++;
  1000. if (len + 2 >= line_len)
  1001. continue;
  1002. len += hex2u64(line + len, &size);
  1003. len++;
  1004. if (len + 2 >= line_len)
  1005. continue;
  1006. sym = symbol__new(start, size, STB_GLOBAL, line + len);
  1007. if (sym == NULL)
  1008. goto out_delete_line;
  1009. if (filter && filter(map, sym))
  1010. symbol__delete(sym);
  1011. else {
  1012. symbols__insert(&dso->symbols[map->type], sym);
  1013. nr_syms++;
  1014. }
  1015. }
  1016. free(line);
  1017. fclose(file);
  1018. return nr_syms;
  1019. out_delete_line:
  1020. free(line);
  1021. out_failure:
  1022. return -1;
  1023. }
  1024. static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
  1025. enum dso_binary_type type)
  1026. {
  1027. switch (type) {
  1028. case DSO_BINARY_TYPE__JAVA_JIT:
  1029. case DSO_BINARY_TYPE__DEBUGLINK:
  1030. case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
  1031. case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
  1032. case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
  1033. case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
  1034. case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
  1035. return !kmod && dso->kernel == DSO_TYPE_USER;
  1036. case DSO_BINARY_TYPE__KALLSYMS:
  1037. case DSO_BINARY_TYPE__VMLINUX:
  1038. case DSO_BINARY_TYPE__KCORE:
  1039. return dso->kernel == DSO_TYPE_KERNEL;
  1040. case DSO_BINARY_TYPE__GUEST_KALLSYMS:
  1041. case DSO_BINARY_TYPE__GUEST_VMLINUX:
  1042. case DSO_BINARY_TYPE__GUEST_KCORE:
  1043. return dso->kernel == DSO_TYPE_GUEST_KERNEL;
  1044. case DSO_BINARY_TYPE__GUEST_KMODULE:
  1045. case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
  1046. /*
  1047. * kernel modules know their symtab type - it's set when
  1048. * creating a module dso in machine__new_module().
  1049. */
  1050. return kmod && dso->symtab_type == type;
  1051. case DSO_BINARY_TYPE__BUILD_ID_CACHE:
  1052. return true;
  1053. case DSO_BINARY_TYPE__NOT_FOUND:
  1054. default:
  1055. return false;
  1056. }
  1057. }
  1058. int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
  1059. {
  1060. char *name;
  1061. int ret = -1;
  1062. u_int i;
  1063. struct machine *machine;
  1064. char *root_dir = (char *) "";
  1065. int ss_pos = 0;
  1066. struct symsrc ss_[2];
  1067. struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
  1068. bool kmod;
  1069. dso__set_loaded(dso, map->type);
  1070. if (dso->kernel == DSO_TYPE_KERNEL)
  1071. return dso__load_kernel_sym(dso, map, filter);
  1072. else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  1073. return dso__load_guest_kernel_sym(dso, map, filter);
  1074. if (map->groups && map->groups->machine)
  1075. machine = map->groups->machine;
  1076. else
  1077. machine = NULL;
  1078. dso->adjust_symbols = 0;
  1079. if (strncmp(dso->name, "/tmp/perf-", 10) == 0) {
  1080. struct stat st;
  1081. if (lstat(dso->name, &st) < 0)
  1082. return -1;
  1083. if (st.st_uid && (st.st_uid != geteuid())) {
  1084. pr_warning("File %s not owned by current user or root, "
  1085. "ignoring it.\n", dso->name);
  1086. return -1;
  1087. }
  1088. ret = dso__load_perf_map(dso, map, filter);
  1089. dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
  1090. DSO_BINARY_TYPE__NOT_FOUND;
  1091. return ret;
  1092. }
  1093. if (machine)
  1094. root_dir = machine->root_dir;
  1095. name = malloc(PATH_MAX);
  1096. if (!name)
  1097. return -1;
  1098. kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
  1099. dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
  1100. /*
  1101. * Iterate over candidate debug images.
  1102. * Keep track of "interesting" ones (those which have a symtab, dynsym,
  1103. * and/or opd section) for processing.
  1104. */
  1105. for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
  1106. struct symsrc *ss = &ss_[ss_pos];
  1107. bool next_slot = false;
  1108. enum dso_binary_type symtab_type = binary_type_symtab[i];
  1109. if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
  1110. continue;
  1111. if (dso__read_binary_type_filename(dso, symtab_type,
  1112. root_dir, name, PATH_MAX))
  1113. continue;
  1114. /* Name is now the name of the next image to try */
  1115. if (symsrc__init(ss, dso, name, symtab_type) < 0)
  1116. continue;
  1117. if (!syms_ss && symsrc__has_symtab(ss)) {
  1118. syms_ss = ss;
  1119. next_slot = true;
  1120. if (!dso->symsrc_filename)
  1121. dso->symsrc_filename = strdup(name);
  1122. }
  1123. if (!runtime_ss && symsrc__possibly_runtime(ss)) {
  1124. runtime_ss = ss;
  1125. next_slot = true;
  1126. }
  1127. if (next_slot) {
  1128. ss_pos++;
  1129. if (syms_ss && runtime_ss)
  1130. break;
  1131. } else {
  1132. symsrc__destroy(ss);
  1133. }
  1134. }
  1135. if (!runtime_ss && !syms_ss)
  1136. goto out_free;
  1137. if (runtime_ss && !syms_ss) {
  1138. syms_ss = runtime_ss;
  1139. }
  1140. /* We'll have to hope for the best */
  1141. if (!runtime_ss && syms_ss)
  1142. runtime_ss = syms_ss;
  1143. if (syms_ss)
  1144. ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, kmod);
  1145. else
  1146. ret = -1;
  1147. if (ret > 0) {
  1148. int nr_plt;
  1149. nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map, filter);
  1150. if (nr_plt > 0)
  1151. ret += nr_plt;
  1152. }
  1153. for (; ss_pos > 0; ss_pos--)
  1154. symsrc__destroy(&ss_[ss_pos - 1]);
  1155. out_free:
  1156. free(name);
  1157. if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
  1158. return 0;
  1159. return ret;
  1160. }
  1161. struct map *map_groups__find_by_name(struct map_groups *mg,
  1162. enum map_type type, const char *name)
  1163. {
  1164. struct rb_node *nd;
  1165. for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
  1166. struct map *map = rb_entry(nd, struct map, rb_node);
  1167. if (map->dso && strcmp(map->dso->short_name, name) == 0)
  1168. return map;
  1169. }
  1170. return NULL;
  1171. }
  1172. int dso__load_vmlinux(struct dso *dso, struct map *map,
  1173. const char *vmlinux, bool vmlinux_allocated,
  1174. symbol_filter_t filter)
  1175. {
  1176. int err = -1;
  1177. struct symsrc ss;
  1178. char symfs_vmlinux[PATH_MAX];
  1179. enum dso_binary_type symtab_type;
  1180. if (vmlinux[0] == '/')
  1181. snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
  1182. else
  1183. snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s",
  1184. symbol_conf.symfs, vmlinux);
  1185. if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  1186. symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
  1187. else
  1188. symtab_type = DSO_BINARY_TYPE__VMLINUX;
  1189. if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
  1190. return -1;
  1191. err = dso__load_sym(dso, map, &ss, &ss, filter, 0);
  1192. symsrc__destroy(&ss);
  1193. if (err > 0) {
  1194. if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  1195. dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
  1196. else
  1197. dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
  1198. dso__set_long_name(dso, vmlinux, vmlinux_allocated);
  1199. dso__set_loaded(dso, map->type);
  1200. pr_debug("Using %s for symbols\n", symfs_vmlinux);
  1201. }
  1202. return err;
  1203. }
  1204. int dso__load_vmlinux_path(struct dso *dso, struct map *map,
  1205. symbol_filter_t filter)
  1206. {
  1207. int i, err = 0;
  1208. char *filename;
  1209. pr_debug("Looking at the vmlinux_path (%d entries long)\n",
  1210. vmlinux_path__nr_entries + 1);
  1211. filename = dso__build_id_filename(dso, NULL, 0);
  1212. if (filename != NULL) {
  1213. err = dso__load_vmlinux(dso, map, filename, true, filter);
  1214. if (err > 0)
  1215. goto out;
  1216. free(filename);
  1217. }
  1218. for (i = 0; i < vmlinux_path__nr_entries; ++i) {
  1219. err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter);
  1220. if (err > 0)
  1221. break;
  1222. }
  1223. out:
  1224. return err;
  1225. }
  1226. static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
  1227. {
  1228. char kallsyms_filename[PATH_MAX];
  1229. struct dirent *dent;
  1230. int ret = -1;
  1231. DIR *d;
  1232. d = opendir(dir);
  1233. if (!d)
  1234. return -1;
  1235. while (1) {
  1236. dent = readdir(d);
  1237. if (!dent)
  1238. break;
  1239. if (dent->d_type != DT_DIR)
  1240. continue;
  1241. scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
  1242. "%s/%s/kallsyms", dir, dent->d_name);
  1243. if (!validate_kcore_addresses(kallsyms_filename, map)) {
  1244. strlcpy(dir, kallsyms_filename, dir_sz);
  1245. ret = 0;
  1246. break;
  1247. }
  1248. }
  1249. closedir(d);
  1250. return ret;
  1251. }
  1252. static char *dso__find_kallsyms(struct dso *dso, struct map *map)
  1253. {
  1254. u8 host_build_id[BUILD_ID_SIZE];
  1255. char sbuild_id[BUILD_ID_SIZE * 2 + 1];
  1256. bool is_host = false;
  1257. char path[PATH_MAX];
  1258. if (!dso->has_build_id) {
  1259. /*
  1260. * Last resort, if we don't have a build-id and couldn't find
  1261. * any vmlinux file, try the running kernel kallsyms table.
  1262. */
  1263. goto proc_kallsyms;
  1264. }
  1265. if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
  1266. sizeof(host_build_id)) == 0)
  1267. is_host = dso__build_id_equal(dso, host_build_id);
  1268. build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
  1269. scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir,
  1270. sbuild_id);
  1271. /* Use /proc/kallsyms if possible */
  1272. if (is_host) {
  1273. DIR *d;
  1274. int fd;
  1275. /* If no cached kcore go with /proc/kallsyms */
  1276. d = opendir(path);
  1277. if (!d)
  1278. goto proc_kallsyms;
  1279. closedir(d);
  1280. /*
  1281. * Do not check the build-id cache, until we know we cannot use
  1282. * /proc/kcore.
  1283. */
  1284. fd = open("/proc/kcore", O_RDONLY);
  1285. if (fd != -1) {
  1286. close(fd);
  1287. /* If module maps match go with /proc/kallsyms */
  1288. if (!validate_kcore_addresses("/proc/kallsyms", map))
  1289. goto proc_kallsyms;
  1290. }
  1291. /* Find kallsyms in build-id cache with kcore */
  1292. if (!find_matching_kcore(map, path, sizeof(path)))
  1293. return strdup(path);
  1294. goto proc_kallsyms;
  1295. }
  1296. /* Find kallsyms in build-id cache with kcore */
  1297. if (!find_matching_kcore(map, path, sizeof(path)))
  1298. return strdup(path);
  1299. scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s",
  1300. buildid_dir, sbuild_id);
  1301. if (access(path, F_OK)) {
  1302. pr_err("No kallsyms or vmlinux with build-id %s was found\n",
  1303. sbuild_id);
  1304. return NULL;
  1305. }
  1306. return strdup(path);
  1307. proc_kallsyms:
  1308. return strdup("/proc/kallsyms");
  1309. }
  1310. static int dso__load_kernel_sym(struct dso *dso, struct map *map,
  1311. symbol_filter_t filter)
  1312. {
  1313. int err;
  1314. const char *kallsyms_filename = NULL;
  1315. char *kallsyms_allocated_filename = NULL;
  1316. /*
  1317. * Step 1: if the user specified a kallsyms or vmlinux filename, use
  1318. * it and only it, reporting errors to the user if it cannot be used.
  1319. *
  1320. * For instance, try to analyse an ARM perf.data file _without_ a
  1321. * build-id, or if the user specifies the wrong path to the right
  1322. * vmlinux file, obviously we can't fallback to another vmlinux (a
  1323. * x86_86 one, on the machine where analysis is being performed, say),
  1324. * or worse, /proc/kallsyms.
  1325. *
  1326. * If the specified file _has_ a build-id and there is a build-id
  1327. * section in the perf.data file, we will still do the expected
  1328. * validation in dso__load_vmlinux and will bail out if they don't
  1329. * match.
  1330. */
  1331. if (symbol_conf.kallsyms_name != NULL) {
  1332. kallsyms_filename = symbol_conf.kallsyms_name;
  1333. goto do_kallsyms;
  1334. }
  1335. if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
  1336. return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name,
  1337. false, filter);
  1338. }
  1339. if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
  1340. err = dso__load_vmlinux_path(dso, map, filter);
  1341. if (err > 0)
  1342. return err;
  1343. }
  1344. /* do not try local files if a symfs was given */
  1345. if (symbol_conf.symfs[0] != 0)
  1346. return -1;
  1347. kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
  1348. if (!kallsyms_allocated_filename)
  1349. return -1;
  1350. kallsyms_filename = kallsyms_allocated_filename;
  1351. do_kallsyms:
  1352. err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
  1353. if (err > 0)
  1354. pr_debug("Using %s for symbols\n", kallsyms_filename);
  1355. free(kallsyms_allocated_filename);
  1356. if (err > 0 && !dso__is_kcore(dso)) {
  1357. dso__set_long_name(dso, "[kernel.kallsyms]", false);
  1358. map__fixup_start(map);
  1359. map__fixup_end(map);
  1360. }
  1361. return err;
  1362. }
  1363. static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
  1364. symbol_filter_t filter)
  1365. {
  1366. int err;
  1367. const char *kallsyms_filename = NULL;
  1368. struct machine *machine;
  1369. char path[PATH_MAX];
  1370. if (!map->groups) {
  1371. pr_debug("Guest kernel map hasn't the point to groups\n");
  1372. return -1;
  1373. }
  1374. machine = map->groups->machine;
  1375. if (machine__is_default_guest(machine)) {
  1376. /*
  1377. * if the user specified a vmlinux filename, use it and only
  1378. * it, reporting errors to the user if it cannot be used.
  1379. * Or use file guest_kallsyms inputted by user on commandline
  1380. */
  1381. if (symbol_conf.default_guest_vmlinux_name != NULL) {
  1382. err = dso__load_vmlinux(dso, map,
  1383. symbol_conf.default_guest_vmlinux_name,
  1384. false, filter);
  1385. return err;
  1386. }
  1387. kallsyms_filename = symbol_conf.default_guest_kallsyms;
  1388. if (!kallsyms_filename)
  1389. return -1;
  1390. } else {
  1391. sprintf(path, "%s/proc/kallsyms", machine->root_dir);
  1392. kallsyms_filename = path;
  1393. }
  1394. err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
  1395. if (err > 0)
  1396. pr_debug("Using %s for symbols\n", kallsyms_filename);
  1397. if (err > 0 && !dso__is_kcore(dso)) {
  1398. machine__mmap_name(machine, path, sizeof(path));
  1399. dso__set_long_name(dso, strdup(path), true);
  1400. map__fixup_start(map);
  1401. map__fixup_end(map);
  1402. }
  1403. return err;
  1404. }
  1405. static void vmlinux_path__exit(void)
  1406. {
  1407. while (--vmlinux_path__nr_entries >= 0)
  1408. zfree(&vmlinux_path[vmlinux_path__nr_entries]);
  1409. zfree(&vmlinux_path);
  1410. }
  1411. static int vmlinux_path__init(void)
  1412. {
  1413. struct utsname uts;
  1414. char bf[PATH_MAX];
  1415. vmlinux_path = malloc(sizeof(char *) * 5);
  1416. if (vmlinux_path == NULL)
  1417. return -1;
  1418. vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux");
  1419. if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
  1420. goto out_fail;
  1421. ++vmlinux_path__nr_entries;
  1422. vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux");
  1423. if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
  1424. goto out_fail;
  1425. ++vmlinux_path__nr_entries;
  1426. /* only try running kernel version if no symfs was given */
  1427. if (symbol_conf.symfs[0] != 0)
  1428. return 0;
  1429. if (uname(&uts) < 0)
  1430. return -1;
  1431. snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release);
  1432. vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
  1433. if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
  1434. goto out_fail;
  1435. ++vmlinux_path__nr_entries;
  1436. snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", uts.release);
  1437. vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
  1438. if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
  1439. goto out_fail;
  1440. ++vmlinux_path__nr_entries;
  1441. snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux",
  1442. uts.release);
  1443. vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
  1444. if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
  1445. goto out_fail;
  1446. ++vmlinux_path__nr_entries;
  1447. return 0;
  1448. out_fail:
  1449. vmlinux_path__exit();
  1450. return -1;
  1451. }
  1452. int setup_list(struct strlist **list, const char *list_str,
  1453. const char *list_name)
  1454. {
  1455. if (list_str == NULL)
  1456. return 0;
  1457. *list = strlist__new(true, list_str);
  1458. if (!*list) {
  1459. pr_err("problems parsing %s list\n", list_name);
  1460. return -1;
  1461. }
  1462. return 0;
  1463. }
  1464. static bool symbol__read_kptr_restrict(void)
  1465. {
  1466. bool value = false;
  1467. if (geteuid() != 0) {
  1468. FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
  1469. if (fp != NULL) {
  1470. char line[8];
  1471. if (fgets(line, sizeof(line), fp) != NULL)
  1472. value = atoi(line) != 0;
  1473. fclose(fp);
  1474. }
  1475. }
  1476. return value;
  1477. }
  1478. int symbol__init(void)
  1479. {
  1480. const char *symfs;
  1481. if (symbol_conf.initialized)
  1482. return 0;
  1483. symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
  1484. symbol__elf_init();
  1485. if (symbol_conf.sort_by_name)
  1486. symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
  1487. sizeof(struct symbol));
  1488. if (symbol_conf.try_vmlinux_path && vmlinux_path__init() < 0)
  1489. return -1;
  1490. if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
  1491. pr_err("'.' is the only non valid --field-separator argument\n");
  1492. return -1;
  1493. }
  1494. if (setup_list(&symbol_conf.dso_list,
  1495. symbol_conf.dso_list_str, "dso") < 0)
  1496. return -1;
  1497. if (setup_list(&symbol_conf.comm_list,
  1498. symbol_conf.comm_list_str, "comm") < 0)
  1499. goto out_free_dso_list;
  1500. if (setup_list(&symbol_conf.sym_list,
  1501. symbol_conf.sym_list_str, "symbol") < 0)
  1502. goto out_free_comm_list;
  1503. /*
  1504. * A path to symbols of "/" is identical to ""
  1505. * reset here for simplicity.
  1506. */
  1507. symfs = realpath(symbol_conf.symfs, NULL);
  1508. if (symfs == NULL)
  1509. symfs = symbol_conf.symfs;
  1510. if (strcmp(symfs, "/") == 0)
  1511. symbol_conf.symfs = "";
  1512. if (symfs != symbol_conf.symfs)
  1513. free((void *)symfs);
  1514. symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
  1515. symbol_conf.initialized = true;
  1516. return 0;
  1517. out_free_comm_list:
  1518. strlist__delete(symbol_conf.comm_list);
  1519. out_free_dso_list:
  1520. strlist__delete(symbol_conf.dso_list);
  1521. return -1;
  1522. }
  1523. void symbol__exit(void)
  1524. {
  1525. if (!symbol_conf.initialized)
  1526. return;
  1527. strlist__delete(symbol_conf.sym_list);
  1528. strlist__delete(symbol_conf.dso_list);
  1529. strlist__delete(symbol_conf.comm_list);
  1530. vmlinux_path__exit();
  1531. symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
  1532. symbol_conf.initialized = false;
  1533. }