symbol.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <dirent.h>
  3. #include <errno.h>
  4. #include <stdlib.h>
  5. #include <stdio.h>
  6. #include <string.h>
  7. #include <linux/kernel.h>
  8. #include <sys/types.h>
  9. #include <sys/stat.h>
  10. #include <sys/param.h>
  11. #include <fcntl.h>
  12. #include <unistd.h>
  13. #include <inttypes.h>
  14. #include "annotate.h"
  15. #include "build-id.h"
  16. #include "util.h"
  17. #include "debug.h"
  18. #include "machine.h"
  19. #include "symbol.h"
  20. #include "strlist.h"
  21. #include "intlist.h"
  22. #include "namespaces.h"
  23. #include "header.h"
  24. #include "path.h"
  25. #include "sane_ctype.h"
  26. #include <elf.h>
  27. #include <limits.h>
  28. #include <symbol/kallsyms.h>
  29. #include <sys/utsname.h>
  30. static int dso__load_kernel_sym(struct dso *dso, struct map *map);
  31. static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
  32. static bool symbol__is_idle(const char *name);
  33. int vmlinux_path__nr_entries;
  34. char **vmlinux_path;
  35. struct symbol_conf symbol_conf = {
  36. .use_modules = true,
  37. .try_vmlinux_path = true,
  38. .annotate_src = true,
  39. .demangle = true,
  40. .demangle_kernel = false,
  41. .cumulate_callchain = true,
  42. .show_hist_headers = true,
  43. .symfs = "",
  44. .event_group = true,
  45. .inline_name = true,
  46. };
  47. static enum dso_binary_type binary_type_symtab[] = {
  48. DSO_BINARY_TYPE__KALLSYMS,
  49. DSO_BINARY_TYPE__GUEST_KALLSYMS,
  50. DSO_BINARY_TYPE__JAVA_JIT,
  51. DSO_BINARY_TYPE__DEBUGLINK,
  52. DSO_BINARY_TYPE__BUILD_ID_CACHE,
  53. DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
  54. DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
  55. DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
  56. DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
  57. DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
  58. DSO_BINARY_TYPE__GUEST_KMODULE,
  59. DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
  60. DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
  61. DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
  62. DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
  63. DSO_BINARY_TYPE__NOT_FOUND,
  64. };
  65. #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
  66. bool symbol_type__is_a(char symbol_type, enum map_type map_type)
  67. {
  68. symbol_type = toupper(symbol_type);
  69. switch (map_type) {
  70. case MAP__FUNCTION:
  71. return symbol_type == 'T' || symbol_type == 'W';
  72. case MAP__VARIABLE:
  73. return symbol_type == 'D';
  74. default:
  75. return false;
  76. }
  77. }
  78. static int prefix_underscores_count(const char *str)
  79. {
  80. const char *tail = str;
  81. while (*tail == '_')
  82. tail++;
  83. return tail - str;
  84. }
  85. const char * __weak arch__normalize_symbol_name(const char *name)
  86. {
  87. return name;
  88. }
  89. int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
  90. {
  91. return strcmp(namea, nameb);
  92. }
  93. int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb,
  94. unsigned int n)
  95. {
  96. return strncmp(namea, nameb, n);
  97. }
  98. int __weak arch__choose_best_symbol(struct symbol *syma,
  99. struct symbol *symb __maybe_unused)
  100. {
  101. /* Avoid "SyS" kernel syscall aliases */
  102. if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
  103. return SYMBOL_B;
  104. if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
  105. return SYMBOL_B;
  106. return SYMBOL_A;
  107. }
  108. static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
  109. {
  110. s64 a;
  111. s64 b;
  112. size_t na, nb;
  113. /* Prefer a symbol with non zero length */
  114. a = syma->end - syma->start;
  115. b = symb->end - symb->start;
  116. if ((b == 0) && (a > 0))
  117. return SYMBOL_A;
  118. else if ((a == 0) && (b > 0))
  119. return SYMBOL_B;
  120. /* Prefer a non weak symbol over a weak one */
  121. a = syma->binding == STB_WEAK;
  122. b = symb->binding == STB_WEAK;
  123. if (b && !a)
  124. return SYMBOL_A;
  125. if (a && !b)
  126. return SYMBOL_B;
  127. /* Prefer a global symbol over a non global one */
  128. a = syma->binding == STB_GLOBAL;
  129. b = symb->binding == STB_GLOBAL;
  130. if (a && !b)
  131. return SYMBOL_A;
  132. if (b && !a)
  133. return SYMBOL_B;
  134. /* Prefer a symbol with less underscores */
  135. a = prefix_underscores_count(syma->name);
  136. b = prefix_underscores_count(symb->name);
  137. if (b > a)
  138. return SYMBOL_A;
  139. else if (a > b)
  140. return SYMBOL_B;
  141. /* Choose the symbol with the longest name */
  142. na = strlen(syma->name);
  143. nb = strlen(symb->name);
  144. if (na > nb)
  145. return SYMBOL_A;
  146. else if (na < nb)
  147. return SYMBOL_B;
  148. return arch__choose_best_symbol(syma, symb);
  149. }
  150. void symbols__fixup_duplicate(struct rb_root *symbols)
  151. {
  152. struct rb_node *nd;
  153. struct symbol *curr, *next;
  154. if (symbol_conf.allow_aliases)
  155. return;
  156. nd = rb_first(symbols);
  157. while (nd) {
  158. curr = rb_entry(nd, struct symbol, rb_node);
  159. again:
  160. nd = rb_next(&curr->rb_node);
  161. next = rb_entry(nd, struct symbol, rb_node);
  162. if (!nd)
  163. break;
  164. if (curr->start != next->start)
  165. continue;
  166. if (choose_best_symbol(curr, next) == SYMBOL_A) {
  167. rb_erase(&next->rb_node, symbols);
  168. symbol__delete(next);
  169. goto again;
  170. } else {
  171. nd = rb_next(&curr->rb_node);
  172. rb_erase(&curr->rb_node, symbols);
  173. symbol__delete(curr);
  174. }
  175. }
  176. }
  177. void symbols__fixup_end(struct rb_root *symbols)
  178. {
  179. struct rb_node *nd, *prevnd = rb_first(symbols);
  180. struct symbol *curr, *prev;
  181. if (prevnd == NULL)
  182. return;
  183. curr = rb_entry(prevnd, struct symbol, rb_node);
  184. for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
  185. prev = curr;
  186. curr = rb_entry(nd, struct symbol, rb_node);
  187. if (prev->end == prev->start && prev->end != curr->start)
  188. prev->end = curr->start;
  189. }
  190. /* Last entry */
  191. if (curr->end == curr->start)
  192. curr->end = roundup(curr->start, 4096) + 4096;
  193. }
  194. void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
  195. {
  196. struct maps *maps = &mg->maps[type];
  197. struct map *next, *curr;
  198. down_write(&maps->lock);
  199. curr = maps__first(maps);
  200. if (curr == NULL)
  201. goto out_unlock;
  202. for (next = map__next(curr); next; next = map__next(curr)) {
  203. if (!curr->end)
  204. curr->end = next->start;
  205. curr = next;
  206. }
  207. /*
  208. * We still haven't the actual symbols, so guess the
  209. * last map final address.
  210. */
  211. if (!curr->end)
  212. curr->end = ~0ULL;
  213. out_unlock:
  214. up_write(&maps->lock);
  215. }
  216. struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
  217. {
  218. size_t namelen = strlen(name) + 1;
  219. struct symbol *sym = calloc(1, (symbol_conf.priv_size +
  220. sizeof(*sym) + namelen));
  221. if (sym == NULL)
  222. return NULL;
  223. if (symbol_conf.priv_size) {
  224. if (symbol_conf.init_annotation) {
  225. struct annotation *notes = (void *)sym;
  226. pthread_mutex_init(&notes->lock, NULL);
  227. }
  228. sym = ((void *)sym) + symbol_conf.priv_size;
  229. }
  230. sym->start = start;
  231. sym->end = len ? start + len : start;
  232. sym->binding = binding;
  233. sym->namelen = namelen - 1;
  234. pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
  235. __func__, name, start, sym->end);
  236. memcpy(sym->name, name, namelen);
  237. return sym;
  238. }
  239. void symbol__delete(struct symbol *sym)
  240. {
  241. free(((void *)sym) - symbol_conf.priv_size);
  242. }
  243. void symbols__delete(struct rb_root *symbols)
  244. {
  245. struct symbol *pos;
  246. struct rb_node *next = rb_first(symbols);
  247. while (next) {
  248. pos = rb_entry(next, struct symbol, rb_node);
  249. next = rb_next(&pos->rb_node);
  250. rb_erase(&pos->rb_node, symbols);
  251. symbol__delete(pos);
  252. }
  253. }
  254. void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
  255. {
  256. struct rb_node **p = &symbols->rb_node;
  257. struct rb_node *parent = NULL;
  258. const u64 ip = sym->start;
  259. struct symbol *s;
  260. if (kernel) {
  261. const char *name = sym->name;
  262. /*
  263. * ppc64 uses function descriptors and appends a '.' to the
  264. * start of every instruction address. Remove it.
  265. */
  266. if (name[0] == '.')
  267. name++;
  268. sym->idle = symbol__is_idle(name);
  269. }
  270. while (*p != NULL) {
  271. parent = *p;
  272. s = rb_entry(parent, struct symbol, rb_node);
  273. if (ip < s->start)
  274. p = &(*p)->rb_left;
  275. else
  276. p = &(*p)->rb_right;
  277. }
  278. rb_link_node(&sym->rb_node, parent, p);
  279. rb_insert_color(&sym->rb_node, symbols);
  280. }
  281. void symbols__insert(struct rb_root *symbols, struct symbol *sym)
  282. {
  283. __symbols__insert(symbols, sym, false);
  284. }
  285. static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
  286. {
  287. struct rb_node *n;
  288. if (symbols == NULL)
  289. return NULL;
  290. n = symbols->rb_node;
  291. while (n) {
  292. struct symbol *s = rb_entry(n, struct symbol, rb_node);
  293. if (ip < s->start)
  294. n = n->rb_left;
  295. else if (ip > s->end || (ip == s->end && ip != s->start))
  296. n = n->rb_right;
  297. else
  298. return s;
  299. }
  300. return NULL;
  301. }
  302. static struct symbol *symbols__first(struct rb_root *symbols)
  303. {
  304. struct rb_node *n = rb_first(symbols);
  305. if (n)
  306. return rb_entry(n, struct symbol, rb_node);
  307. return NULL;
  308. }
  309. static struct symbol *symbols__last(struct rb_root *symbols)
  310. {
  311. struct rb_node *n = rb_last(symbols);
  312. if (n)
  313. return rb_entry(n, struct symbol, rb_node);
  314. return NULL;
  315. }
  316. static struct symbol *symbols__next(struct symbol *sym)
  317. {
  318. struct rb_node *n = rb_next(&sym->rb_node);
  319. if (n)
  320. return rb_entry(n, struct symbol, rb_node);
  321. return NULL;
  322. }
  323. static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
  324. {
  325. struct rb_node **p = &symbols->rb_node;
  326. struct rb_node *parent = NULL;
  327. struct symbol_name_rb_node *symn, *s;
  328. symn = container_of(sym, struct symbol_name_rb_node, sym);
  329. while (*p != NULL) {
  330. parent = *p;
  331. s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
  332. if (strcmp(sym->name, s->sym.name) < 0)
  333. p = &(*p)->rb_left;
  334. else
  335. p = &(*p)->rb_right;
  336. }
  337. rb_link_node(&symn->rb_node, parent, p);
  338. rb_insert_color(&symn->rb_node, symbols);
  339. }
  340. static void symbols__sort_by_name(struct rb_root *symbols,
  341. struct rb_root *source)
  342. {
  343. struct rb_node *nd;
  344. for (nd = rb_first(source); nd; nd = rb_next(nd)) {
  345. struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
  346. symbols__insert_by_name(symbols, pos);
  347. }
  348. }
  349. int symbol__match_symbol_name(const char *name, const char *str,
  350. enum symbol_tag_include includes)
  351. {
  352. const char *versioning;
  353. if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY &&
  354. (versioning = strstr(name, "@@"))) {
  355. int len = strlen(str);
  356. if (len < versioning - name)
  357. len = versioning - name;
  358. return arch__compare_symbol_names_n(name, str, len);
  359. } else
  360. return arch__compare_symbol_names(name, str);
  361. }
  362. static struct symbol *symbols__find_by_name(struct rb_root *symbols,
  363. const char *name,
  364. enum symbol_tag_include includes)
  365. {
  366. struct rb_node *n;
  367. struct symbol_name_rb_node *s = NULL;
  368. if (symbols == NULL)
  369. return NULL;
  370. n = symbols->rb_node;
  371. while (n) {
  372. int cmp;
  373. s = rb_entry(n, struct symbol_name_rb_node, rb_node);
  374. cmp = symbol__match_symbol_name(s->sym.name, name, includes);
  375. if (cmp > 0)
  376. n = n->rb_left;
  377. else if (cmp < 0)
  378. n = n->rb_right;
  379. else
  380. break;
  381. }
  382. if (n == NULL)
  383. return NULL;
  384. if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY)
  385. /* return first symbol that has same name (if any) */
  386. for (n = rb_prev(n); n; n = rb_prev(n)) {
  387. struct symbol_name_rb_node *tmp;
  388. tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
  389. if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
  390. break;
  391. s = tmp;
  392. }
  393. return &s->sym;
  394. }
  395. void dso__reset_find_symbol_cache(struct dso *dso)
  396. {
  397. enum map_type type;
  398. for (type = MAP__FUNCTION; type <= MAP__VARIABLE; ++type) {
  399. dso->last_find_result[type].addr = 0;
  400. dso->last_find_result[type].symbol = NULL;
  401. }
  402. }
  403. void dso__insert_symbol(struct dso *dso, enum map_type type, struct symbol *sym)
  404. {
  405. __symbols__insert(&dso->symbols[type], sym, dso->kernel);
  406. /* update the symbol cache if necessary */
  407. if (dso->last_find_result[type].addr >= sym->start &&
  408. (dso->last_find_result[type].addr < sym->end ||
  409. sym->start == sym->end)) {
  410. dso->last_find_result[type].symbol = sym;
  411. }
  412. }
  413. struct symbol *dso__find_symbol(struct dso *dso,
  414. enum map_type type, u64 addr)
  415. {
  416. if (dso->last_find_result[type].addr != addr || dso->last_find_result[type].symbol == NULL) {
  417. dso->last_find_result[type].addr = addr;
  418. dso->last_find_result[type].symbol = symbols__find(&dso->symbols[type], addr);
  419. }
  420. return dso->last_find_result[type].symbol;
  421. }
  422. struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
  423. {
  424. return symbols__first(&dso->symbols[type]);
  425. }
  426. struct symbol *dso__last_symbol(struct dso *dso, enum map_type type)
  427. {
  428. return symbols__last(&dso->symbols[type]);
  429. }
  430. struct symbol *dso__next_symbol(struct symbol *sym)
  431. {
  432. return symbols__next(sym);
  433. }
  434. struct symbol *symbol__next_by_name(struct symbol *sym)
  435. {
  436. struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
  437. struct rb_node *n = rb_next(&s->rb_node);
  438. return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
  439. }
  440. /*
  441. * Teturns first symbol that matched with @name.
  442. */
  443. struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
  444. const char *name)
  445. {
  446. struct symbol *s = symbols__find_by_name(&dso->symbol_names[type], name,
  447. SYMBOL_TAG_INCLUDE__NONE);
  448. if (!s)
  449. s = symbols__find_by_name(&dso->symbol_names[type], name,
  450. SYMBOL_TAG_INCLUDE__DEFAULT_ONLY);
  451. return s;
  452. }
  453. void dso__sort_by_name(struct dso *dso, enum map_type type)
  454. {
  455. dso__set_sorted_by_name(dso, type);
  456. return symbols__sort_by_name(&dso->symbol_names[type],
  457. &dso->symbols[type]);
  458. }
  459. int modules__parse(const char *filename, void *arg,
  460. int (*process_module)(void *arg, const char *name,
  461. u64 start, u64 size))
  462. {
  463. char *line = NULL;
  464. size_t n;
  465. FILE *file;
  466. int err = 0;
  467. file = fopen(filename, "r");
  468. if (file == NULL)
  469. return -1;
  470. while (1) {
  471. char name[PATH_MAX];
  472. u64 start, size;
  473. char *sep, *endptr;
  474. ssize_t line_len;
  475. line_len = getline(&line, &n, file);
  476. if (line_len < 0) {
  477. if (feof(file))
  478. break;
  479. err = -1;
  480. goto out;
  481. }
  482. if (!line) {
  483. err = -1;
  484. goto out;
  485. }
  486. line[--line_len] = '\0'; /* \n */
  487. sep = strrchr(line, 'x');
  488. if (sep == NULL)
  489. continue;
  490. hex2u64(sep + 1, &start);
  491. sep = strchr(line, ' ');
  492. if (sep == NULL)
  493. continue;
  494. *sep = '\0';
  495. scnprintf(name, sizeof(name), "[%s]", line);
  496. size = strtoul(sep + 1, &endptr, 0);
  497. if (*endptr != ' ' && *endptr != '\t')
  498. continue;
  499. err = process_module(arg, name, start, size);
  500. if (err)
  501. break;
  502. }
  503. out:
  504. free(line);
  505. fclose(file);
  506. return err;
  507. }
  508. struct process_kallsyms_args {
  509. struct map *map;
  510. struct dso *dso;
  511. };
  512. /*
  513. * These are symbols in the kernel image, so make sure that
  514. * sym is from a kernel DSO.
  515. */
  516. static bool symbol__is_idle(const char *name)
  517. {
  518. const char * const idle_symbols[] = {
  519. "cpu_idle",
  520. "cpu_startup_entry",
  521. "intel_idle",
  522. "default_idle",
  523. "native_safe_halt",
  524. "enter_idle",
  525. "exit_idle",
  526. "mwait_idle",
  527. "mwait_idle_with_hints",
  528. "poll_idle",
  529. "ppc64_runlatch_off",
  530. "pseries_dedicated_idle_sleep",
  531. NULL
  532. };
  533. int i;
  534. for (i = 0; idle_symbols[i]; i++) {
  535. if (!strcmp(idle_symbols[i], name))
  536. return true;
  537. }
  538. return false;
  539. }
  540. static int map__process_kallsym_symbol(void *arg, const char *name,
  541. char type, u64 start)
  542. {
  543. struct symbol *sym;
  544. struct process_kallsyms_args *a = arg;
  545. struct rb_root *root = &a->dso->symbols[a->map->type];
  546. if (!symbol_type__is_a(type, a->map->type))
  547. return 0;
  548. /*
  549. * module symbols are not sorted so we add all
  550. * symbols, setting length to 0, and rely on
  551. * symbols__fixup_end() to fix it up.
  552. */
  553. sym = symbol__new(start, 0, kallsyms2elf_binding(type), name);
  554. if (sym == NULL)
  555. return -ENOMEM;
  556. /*
  557. * We will pass the symbols to the filter later, in
  558. * map__split_kallsyms, when we have split the maps per module
  559. */
  560. __symbols__insert(root, sym, !strchr(name, '['));
  561. return 0;
  562. }
  563. /*
  564. * Loads the function entries in /proc/kallsyms into kernel_map->dso,
  565. * so that we can in the next step set the symbol ->end address and then
  566. * call kernel_maps__split_kallsyms.
  567. */
  568. static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
  569. struct map *map)
  570. {
  571. struct process_kallsyms_args args = { .map = map, .dso = dso, };
  572. return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
  573. }
  574. static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map)
  575. {
  576. struct map_groups *kmaps = map__kmaps(map);
  577. struct map *curr_map;
  578. struct symbol *pos;
  579. int count = 0;
  580. struct rb_root old_root = dso->symbols[map->type];
  581. struct rb_root *root = &dso->symbols[map->type];
  582. struct rb_node *next = rb_first(root);
  583. if (!kmaps)
  584. return -1;
  585. *root = RB_ROOT;
  586. while (next) {
  587. char *module;
  588. pos = rb_entry(next, struct symbol, rb_node);
  589. next = rb_next(&pos->rb_node);
  590. rb_erase_init(&pos->rb_node, &old_root);
  591. module = strchr(pos->name, '\t');
  592. if (module)
  593. *module = '\0';
  594. curr_map = map_groups__find(kmaps, map->type, pos->start);
  595. if (!curr_map) {
  596. symbol__delete(pos);
  597. continue;
  598. }
  599. pos->start -= curr_map->start - curr_map->pgoff;
  600. if (pos->end)
  601. pos->end -= curr_map->start - curr_map->pgoff;
  602. symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
  603. ++count;
  604. }
  605. /* Symbols have been adjusted */
  606. dso->adjust_symbols = 1;
  607. return count;
  608. }
  609. /*
  610. * Split the symbols into maps, making sure there are no overlaps, i.e. the
  611. * kernel range is broken in several maps, named [kernel].N, as we don't have
  612. * the original ELF section names vmlinux have.
  613. */
  614. static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
  615. {
  616. struct map_groups *kmaps = map__kmaps(map);
  617. struct machine *machine;
  618. struct map *curr_map = map;
  619. struct symbol *pos;
  620. int count = 0, moved = 0;
  621. struct rb_root *root = &dso->symbols[map->type];
  622. struct rb_node *next = rb_first(root);
  623. int kernel_range = 0;
  624. if (!kmaps)
  625. return -1;
  626. machine = kmaps->machine;
  627. while (next) {
  628. char *module;
  629. pos = rb_entry(next, struct symbol, rb_node);
  630. next = rb_next(&pos->rb_node);
  631. module = strchr(pos->name, '\t');
  632. if (module) {
  633. if (!symbol_conf.use_modules)
  634. goto discard_symbol;
  635. *module++ = '\0';
  636. if (strcmp(curr_map->dso->short_name, module)) {
  637. if (curr_map != map &&
  638. dso->kernel == DSO_TYPE_GUEST_KERNEL &&
  639. machine__is_default_guest(machine)) {
  640. /*
  641. * We assume all symbols of a module are
  642. * continuous in * kallsyms, so curr_map
  643. * points to a module and all its
  644. * symbols are in its kmap. Mark it as
  645. * loaded.
  646. */
  647. dso__set_loaded(curr_map->dso,
  648. curr_map->type);
  649. }
  650. curr_map = map_groups__find_by_name(kmaps,
  651. map->type, module);
  652. if (curr_map == NULL) {
  653. pr_debug("%s/proc/{kallsyms,modules} "
  654. "inconsistency while looking "
  655. "for \"%s\" module!\n",
  656. machine->root_dir, module);
  657. curr_map = map;
  658. goto discard_symbol;
  659. }
  660. if (curr_map->dso->loaded &&
  661. !machine__is_default_guest(machine))
  662. goto discard_symbol;
  663. }
  664. /*
  665. * So that we look just like we get from .ko files,
  666. * i.e. not prelinked, relative to map->start.
  667. */
  668. pos->start = curr_map->map_ip(curr_map, pos->start);
  669. pos->end = curr_map->map_ip(curr_map, pos->end);
  670. } else if (curr_map != map) {
  671. char dso_name[PATH_MAX];
  672. struct dso *ndso;
  673. if (delta) {
  674. /* Kernel was relocated at boot time */
  675. pos->start -= delta;
  676. pos->end -= delta;
  677. }
  678. if (count == 0) {
  679. curr_map = map;
  680. goto add_symbol;
  681. }
  682. if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  683. snprintf(dso_name, sizeof(dso_name),
  684. "[guest.kernel].%d",
  685. kernel_range++);
  686. else
  687. snprintf(dso_name, sizeof(dso_name),
  688. "[kernel].%d",
  689. kernel_range++);
  690. ndso = dso__new(dso_name);
  691. if (ndso == NULL)
  692. return -1;
  693. ndso->kernel = dso->kernel;
  694. curr_map = map__new2(pos->start, ndso, map->type);
  695. if (curr_map == NULL) {
  696. dso__put(ndso);
  697. return -1;
  698. }
  699. curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
  700. map_groups__insert(kmaps, curr_map);
  701. ++kernel_range;
  702. } else if (delta) {
  703. /* Kernel was relocated at boot time */
  704. pos->start -= delta;
  705. pos->end -= delta;
  706. }
  707. add_symbol:
  708. if (curr_map != map) {
  709. rb_erase(&pos->rb_node, root);
  710. symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
  711. ++moved;
  712. } else
  713. ++count;
  714. continue;
  715. discard_symbol:
  716. rb_erase(&pos->rb_node, root);
  717. symbol__delete(pos);
  718. }
  719. if (curr_map != map &&
  720. dso->kernel == DSO_TYPE_GUEST_KERNEL &&
  721. machine__is_default_guest(kmaps->machine)) {
  722. dso__set_loaded(curr_map->dso, curr_map->type);
  723. }
  724. return count + moved;
  725. }
  726. bool symbol__restricted_filename(const char *filename,
  727. const char *restricted_filename)
  728. {
  729. bool restricted = false;
  730. if (symbol_conf.kptr_restrict) {
  731. char *r = realpath(filename, NULL);
  732. if (r != NULL) {
  733. restricted = strcmp(r, restricted_filename) == 0;
  734. free(r);
  735. return restricted;
  736. }
  737. }
  738. return restricted;
  739. }
  740. struct module_info {
  741. struct rb_node rb_node;
  742. char *name;
  743. u64 start;
  744. };
  745. static void add_module(struct module_info *mi, struct rb_root *modules)
  746. {
  747. struct rb_node **p = &modules->rb_node;
  748. struct rb_node *parent = NULL;
  749. struct module_info *m;
  750. while (*p != NULL) {
  751. parent = *p;
  752. m = rb_entry(parent, struct module_info, rb_node);
  753. if (strcmp(mi->name, m->name) < 0)
  754. p = &(*p)->rb_left;
  755. else
  756. p = &(*p)->rb_right;
  757. }
  758. rb_link_node(&mi->rb_node, parent, p);
  759. rb_insert_color(&mi->rb_node, modules);
  760. }
  761. static void delete_modules(struct rb_root *modules)
  762. {
  763. struct module_info *mi;
  764. struct rb_node *next = rb_first(modules);
  765. while (next) {
  766. mi = rb_entry(next, struct module_info, rb_node);
  767. next = rb_next(&mi->rb_node);
  768. rb_erase(&mi->rb_node, modules);
  769. zfree(&mi->name);
  770. free(mi);
  771. }
  772. }
  773. static struct module_info *find_module(const char *name,
  774. struct rb_root *modules)
  775. {
  776. struct rb_node *n = modules->rb_node;
  777. while (n) {
  778. struct module_info *m;
  779. int cmp;
  780. m = rb_entry(n, struct module_info, rb_node);
  781. cmp = strcmp(name, m->name);
  782. if (cmp < 0)
  783. n = n->rb_left;
  784. else if (cmp > 0)
  785. n = n->rb_right;
  786. else
  787. return m;
  788. }
  789. return NULL;
  790. }
  791. static int __read_proc_modules(void *arg, const char *name, u64 start,
  792. u64 size __maybe_unused)
  793. {
  794. struct rb_root *modules = arg;
  795. struct module_info *mi;
  796. mi = zalloc(sizeof(struct module_info));
  797. if (!mi)
  798. return -ENOMEM;
  799. mi->name = strdup(name);
  800. mi->start = start;
  801. if (!mi->name) {
  802. free(mi);
  803. return -ENOMEM;
  804. }
  805. add_module(mi, modules);
  806. return 0;
  807. }
  808. static int read_proc_modules(const char *filename, struct rb_root *modules)
  809. {
  810. if (symbol__restricted_filename(filename, "/proc/modules"))
  811. return -1;
  812. if (modules__parse(filename, modules, __read_proc_modules)) {
  813. delete_modules(modules);
  814. return -1;
  815. }
  816. return 0;
  817. }
  818. int compare_proc_modules(const char *from, const char *to)
  819. {
  820. struct rb_root from_modules = RB_ROOT;
  821. struct rb_root to_modules = RB_ROOT;
  822. struct rb_node *from_node, *to_node;
  823. struct module_info *from_m, *to_m;
  824. int ret = -1;
  825. if (read_proc_modules(from, &from_modules))
  826. return -1;
  827. if (read_proc_modules(to, &to_modules))
  828. goto out_delete_from;
  829. from_node = rb_first(&from_modules);
  830. to_node = rb_first(&to_modules);
  831. while (from_node) {
  832. if (!to_node)
  833. break;
  834. from_m = rb_entry(from_node, struct module_info, rb_node);
  835. to_m = rb_entry(to_node, struct module_info, rb_node);
  836. if (from_m->start != to_m->start ||
  837. strcmp(from_m->name, to_m->name))
  838. break;
  839. from_node = rb_next(from_node);
  840. to_node = rb_next(to_node);
  841. }
  842. if (!from_node && !to_node)
  843. ret = 0;
  844. delete_modules(&to_modules);
  845. out_delete_from:
  846. delete_modules(&from_modules);
  847. return ret;
  848. }
  849. static int do_validate_kcore_modules(const char *filename, struct map *map,
  850. struct map_groups *kmaps)
  851. {
  852. struct rb_root modules = RB_ROOT;
  853. struct map *old_map;
  854. int err;
  855. err = read_proc_modules(filename, &modules);
  856. if (err)
  857. return err;
  858. old_map = map_groups__first(kmaps, map->type);
  859. while (old_map) {
  860. struct map *next = map_groups__next(old_map);
  861. struct module_info *mi;
  862. if (old_map == map || old_map->start == map->start) {
  863. /* The kernel map */
  864. old_map = next;
  865. continue;
  866. }
  867. /* Module must be in memory at the same address */
  868. mi = find_module(old_map->dso->short_name, &modules);
  869. if (!mi || mi->start != old_map->start) {
  870. err = -EINVAL;
  871. goto out;
  872. }
  873. old_map = next;
  874. }
  875. out:
  876. delete_modules(&modules);
  877. return err;
  878. }
  879. /*
  880. * If kallsyms is referenced by name then we look for filename in the same
  881. * directory.
  882. */
  883. static bool filename_from_kallsyms_filename(char *filename,
  884. const char *base_name,
  885. const char *kallsyms_filename)
  886. {
  887. char *name;
  888. strcpy(filename, kallsyms_filename);
  889. name = strrchr(filename, '/');
  890. if (!name)
  891. return false;
  892. name += 1;
  893. if (!strcmp(name, "kallsyms")) {
  894. strcpy(name, base_name);
  895. return true;
  896. }
  897. return false;
  898. }
  899. static int validate_kcore_modules(const char *kallsyms_filename,
  900. struct map *map)
  901. {
  902. struct map_groups *kmaps = map__kmaps(map);
  903. char modules_filename[PATH_MAX];
  904. if (!kmaps)
  905. return -EINVAL;
  906. if (!filename_from_kallsyms_filename(modules_filename, "modules",
  907. kallsyms_filename))
  908. return -EINVAL;
  909. if (do_validate_kcore_modules(modules_filename, map, kmaps))
  910. return -EINVAL;
  911. return 0;
  912. }
  913. static int validate_kcore_addresses(const char *kallsyms_filename,
  914. struct map *map)
  915. {
  916. struct kmap *kmap = map__kmap(map);
  917. if (!kmap)
  918. return -EINVAL;
  919. if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
  920. u64 start;
  921. if (kallsyms__get_function_start(kallsyms_filename,
  922. kmap->ref_reloc_sym->name, &start))
  923. return -ENOENT;
  924. if (start != kmap->ref_reloc_sym->addr)
  925. return -EINVAL;
  926. }
  927. return validate_kcore_modules(kallsyms_filename, map);
  928. }
  929. struct kcore_mapfn_data {
  930. struct dso *dso;
  931. enum map_type type;
  932. struct list_head maps;
  933. };
  934. static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
  935. {
  936. struct kcore_mapfn_data *md = data;
  937. struct map *map;
  938. map = map__new2(start, md->dso, md->type);
  939. if (map == NULL)
  940. return -ENOMEM;
  941. map->end = map->start + len;
  942. map->pgoff = pgoff;
  943. list_add(&map->node, &md->maps);
  944. return 0;
  945. }
  946. static int dso__load_kcore(struct dso *dso, struct map *map,
  947. const char *kallsyms_filename)
  948. {
  949. struct map_groups *kmaps = map__kmaps(map);
  950. struct machine *machine;
  951. struct kcore_mapfn_data md;
  952. struct map *old_map, *new_map, *replacement_map = NULL;
  953. bool is_64_bit;
  954. int err, fd;
  955. char kcore_filename[PATH_MAX];
  956. struct symbol *sym;
  957. if (!kmaps)
  958. return -EINVAL;
  959. machine = kmaps->machine;
  960. /* This function requires that the map is the kernel map */
  961. if (map != machine->vmlinux_maps[map->type])
  962. return -EINVAL;
  963. if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
  964. kallsyms_filename))
  965. return -EINVAL;
  966. /* Modules and kernel must be present at their original addresses */
  967. if (validate_kcore_addresses(kallsyms_filename, map))
  968. return -EINVAL;
  969. md.dso = dso;
  970. md.type = map->type;
  971. INIT_LIST_HEAD(&md.maps);
  972. fd = open(kcore_filename, O_RDONLY);
  973. if (fd < 0) {
  974. pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
  975. kcore_filename);
  976. return -EINVAL;
  977. }
  978. /* Read new maps into temporary lists */
  979. err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md,
  980. &is_64_bit);
  981. if (err)
  982. goto out_err;
  983. dso->is_64_bit = is_64_bit;
  984. if (list_empty(&md.maps)) {
  985. err = -EINVAL;
  986. goto out_err;
  987. }
  988. /* Remove old maps */
  989. old_map = map_groups__first(kmaps, map->type);
  990. while (old_map) {
  991. struct map *next = map_groups__next(old_map);
  992. if (old_map != map)
  993. map_groups__remove(kmaps, old_map);
  994. old_map = next;
  995. }
  996. /* Find the kernel map using the first symbol */
  997. sym = dso__first_symbol(dso, map->type);
  998. list_for_each_entry(new_map, &md.maps, node) {
  999. if (sym && sym->start >= new_map->start &&
  1000. sym->start < new_map->end) {
  1001. replacement_map = new_map;
  1002. break;
  1003. }
  1004. }
  1005. if (!replacement_map)
  1006. replacement_map = list_entry(md.maps.next, struct map, node);
  1007. /* Add new maps */
  1008. while (!list_empty(&md.maps)) {
  1009. new_map = list_entry(md.maps.next, struct map, node);
  1010. list_del_init(&new_map->node);
  1011. if (new_map == replacement_map) {
  1012. map->start = new_map->start;
  1013. map->end = new_map->end;
  1014. map->pgoff = new_map->pgoff;
  1015. map->map_ip = new_map->map_ip;
  1016. map->unmap_ip = new_map->unmap_ip;
  1017. /* Ensure maps are correctly ordered */
  1018. map__get(map);
  1019. map_groups__remove(kmaps, map);
  1020. map_groups__insert(kmaps, map);
  1021. map__put(map);
  1022. } else {
  1023. map_groups__insert(kmaps, new_map);
  1024. }
  1025. map__put(new_map);
  1026. }
  1027. /*
  1028. * Set the data type and long name so that kcore can be read via
  1029. * dso__data_read_addr().
  1030. */
  1031. if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  1032. dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
  1033. else
  1034. dso->binary_type = DSO_BINARY_TYPE__KCORE;
  1035. dso__set_long_name(dso, strdup(kcore_filename), true);
  1036. close(fd);
  1037. if (map->type == MAP__FUNCTION)
  1038. pr_debug("Using %s for kernel object code\n", kcore_filename);
  1039. else
  1040. pr_debug("Using %s for kernel data\n", kcore_filename);
  1041. return 0;
  1042. out_err:
  1043. while (!list_empty(&md.maps)) {
  1044. map = list_entry(md.maps.next, struct map, node);
  1045. list_del_init(&map->node);
  1046. map__put(map);
  1047. }
  1048. close(fd);
  1049. return -EINVAL;
  1050. }
  1051. /*
  1052. * If the kernel is relocated at boot time, kallsyms won't match. Compute the
  1053. * delta based on the relocation reference symbol.
  1054. */
  1055. static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
  1056. {
  1057. struct kmap *kmap = map__kmap(map);
  1058. u64 addr;
  1059. if (!kmap)
  1060. return -1;
  1061. if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
  1062. return 0;
  1063. if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
  1064. return -1;
  1065. *delta = addr - kmap->ref_reloc_sym->addr;
  1066. return 0;
  1067. }
  1068. int __dso__load_kallsyms(struct dso *dso, const char *filename,
  1069. struct map *map, bool no_kcore)
  1070. {
  1071. u64 delta = 0;
  1072. if (symbol__restricted_filename(filename, "/proc/kallsyms"))
  1073. return -1;
  1074. if (dso__load_all_kallsyms(dso, filename, map) < 0)
  1075. return -1;
  1076. if (kallsyms__delta(map, filename, &delta))
  1077. return -1;
  1078. symbols__fixup_end(&dso->symbols[map->type]);
  1079. symbols__fixup_duplicate(&dso->symbols[map->type]);
  1080. if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  1081. dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
  1082. else
  1083. dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
  1084. if (!no_kcore && !dso__load_kcore(dso, map, filename))
  1085. return dso__split_kallsyms_for_kcore(dso, map);
  1086. else
  1087. return dso__split_kallsyms(dso, map, delta);
  1088. }
  1089. int dso__load_kallsyms(struct dso *dso, const char *filename,
  1090. struct map *map)
  1091. {
  1092. return __dso__load_kallsyms(dso, filename, map, false);
  1093. }
  1094. static int dso__load_perf_map(const char *map_path, struct dso *dso,
  1095. struct map *map)
  1096. {
  1097. char *line = NULL;
  1098. size_t n;
  1099. FILE *file;
  1100. int nr_syms = 0;
  1101. file = fopen(map_path, "r");
  1102. if (file == NULL)
  1103. goto out_failure;
  1104. while (!feof(file)) {
  1105. u64 start, size;
  1106. struct symbol *sym;
  1107. int line_len, len;
  1108. line_len = getline(&line, &n, file);
  1109. if (line_len < 0)
  1110. break;
  1111. if (!line)
  1112. goto out_failure;
  1113. line[--line_len] = '\0'; /* \n */
  1114. len = hex2u64(line, &start);
  1115. len++;
  1116. if (len + 2 >= line_len)
  1117. continue;
  1118. len += hex2u64(line + len, &size);
  1119. len++;
  1120. if (len + 2 >= line_len)
  1121. continue;
  1122. sym = symbol__new(start, size, STB_GLOBAL, line + len);
  1123. if (sym == NULL)
  1124. goto out_delete_line;
  1125. symbols__insert(&dso->symbols[map->type], sym);
  1126. nr_syms++;
  1127. }
  1128. free(line);
  1129. fclose(file);
  1130. return nr_syms;
  1131. out_delete_line:
  1132. free(line);
  1133. out_failure:
  1134. return -1;
  1135. }
  1136. static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
  1137. enum dso_binary_type type)
  1138. {
  1139. switch (type) {
  1140. case DSO_BINARY_TYPE__JAVA_JIT:
  1141. case DSO_BINARY_TYPE__DEBUGLINK:
  1142. case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
  1143. case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
  1144. case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
  1145. case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
  1146. case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
  1147. return !kmod && dso->kernel == DSO_TYPE_USER;
  1148. case DSO_BINARY_TYPE__KALLSYMS:
  1149. case DSO_BINARY_TYPE__VMLINUX:
  1150. case DSO_BINARY_TYPE__KCORE:
  1151. return dso->kernel == DSO_TYPE_KERNEL;
  1152. case DSO_BINARY_TYPE__GUEST_KALLSYMS:
  1153. case DSO_BINARY_TYPE__GUEST_VMLINUX:
  1154. case DSO_BINARY_TYPE__GUEST_KCORE:
  1155. return dso->kernel == DSO_TYPE_GUEST_KERNEL;
  1156. case DSO_BINARY_TYPE__GUEST_KMODULE:
  1157. case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
  1158. case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
  1159. case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
  1160. /*
  1161. * kernel modules know their symtab type - it's set when
  1162. * creating a module dso in machine__findnew_module_map().
  1163. */
  1164. return kmod && dso->symtab_type == type;
  1165. case DSO_BINARY_TYPE__BUILD_ID_CACHE:
  1166. case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
  1167. return true;
  1168. case DSO_BINARY_TYPE__NOT_FOUND:
  1169. default:
  1170. return false;
  1171. }
  1172. }
  1173. /* Checks for the existence of the perf-<pid>.map file in two different
  1174. * locations. First, if the process is a separate mount namespace, check in
  1175. * that namespace using the pid of the innermost pid namespace. If's not in a
  1176. * namespace, or the file can't be found there, try in the mount namespace of
  1177. * the tracing process using our view of its pid.
  1178. */
  1179. static int dso__find_perf_map(char *filebuf, size_t bufsz,
  1180. struct nsinfo **nsip)
  1181. {
  1182. struct nscookie nsc;
  1183. struct nsinfo *nsi;
  1184. struct nsinfo *nnsi;
  1185. int rc = -1;
  1186. nsi = *nsip;
  1187. if (nsi->need_setns) {
  1188. snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsi->nstgid);
  1189. nsinfo__mountns_enter(nsi, &nsc);
  1190. rc = access(filebuf, R_OK);
  1191. nsinfo__mountns_exit(&nsc);
  1192. if (rc == 0)
  1193. return rc;
  1194. }
  1195. nnsi = nsinfo__copy(nsi);
  1196. if (nnsi) {
  1197. nsinfo__put(nsi);
  1198. nnsi->need_setns = false;
  1199. snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nnsi->tgid);
  1200. *nsip = nnsi;
  1201. rc = 0;
  1202. }
  1203. return rc;
  1204. }
  1205. int dso__load(struct dso *dso, struct map *map)
  1206. {
  1207. char *name;
  1208. int ret = -1;
  1209. u_int i;
  1210. struct machine *machine;
  1211. char *root_dir = (char *) "";
  1212. int ss_pos = 0;
  1213. struct symsrc ss_[2];
  1214. struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
  1215. bool kmod;
  1216. bool perfmap;
  1217. unsigned char build_id[BUILD_ID_SIZE];
  1218. struct nscookie nsc;
  1219. char newmapname[PATH_MAX];
  1220. const char *map_path = dso->long_name;
  1221. perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
  1222. if (perfmap) {
  1223. if (dso->nsinfo && (dso__find_perf_map(newmapname,
  1224. sizeof(newmapname), &dso->nsinfo) == 0)) {
  1225. map_path = newmapname;
  1226. }
  1227. }
  1228. nsinfo__mountns_enter(dso->nsinfo, &nsc);
  1229. pthread_mutex_lock(&dso->lock);
  1230. /* check again under the dso->lock */
  1231. if (dso__loaded(dso, map->type)) {
  1232. ret = 1;
  1233. goto out;
  1234. }
  1235. if (dso->kernel) {
  1236. if (dso->kernel == DSO_TYPE_KERNEL)
  1237. ret = dso__load_kernel_sym(dso, map);
  1238. else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  1239. ret = dso__load_guest_kernel_sym(dso, map);
  1240. goto out;
  1241. }
  1242. if (map->groups && map->groups->machine)
  1243. machine = map->groups->machine;
  1244. else
  1245. machine = NULL;
  1246. dso->adjust_symbols = 0;
  1247. if (perfmap) {
  1248. struct stat st;
  1249. if (lstat(map_path, &st) < 0)
  1250. goto out;
  1251. if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) {
  1252. pr_warning("File %s not owned by current user or root, "
  1253. "ignoring it (use -f to override).\n", map_path);
  1254. goto out;
  1255. }
  1256. ret = dso__load_perf_map(map_path, dso, map);
  1257. dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
  1258. DSO_BINARY_TYPE__NOT_FOUND;
  1259. goto out;
  1260. }
  1261. if (machine)
  1262. root_dir = machine->root_dir;
  1263. name = malloc(PATH_MAX);
  1264. if (!name)
  1265. goto out;
  1266. kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
  1267. dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
  1268. dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
  1269. dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
  1270. /*
  1271. * Read the build id if possible. This is required for
  1272. * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
  1273. */
  1274. if (!dso->has_build_id &&
  1275. is_regular_file(dso->long_name)) {
  1276. __symbol__join_symfs(name, PATH_MAX, dso->long_name);
  1277. if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0)
  1278. dso__set_build_id(dso, build_id);
  1279. }
  1280. /*
  1281. * Iterate over candidate debug images.
  1282. * Keep track of "interesting" ones (those which have a symtab, dynsym,
  1283. * and/or opd section) for processing.
  1284. */
  1285. for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
  1286. struct symsrc *ss = &ss_[ss_pos];
  1287. bool next_slot = false;
  1288. bool is_reg;
  1289. bool nsexit;
  1290. int sirc = -1;
  1291. enum dso_binary_type symtab_type = binary_type_symtab[i];
  1292. nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE ||
  1293. symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO);
  1294. if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
  1295. continue;
  1296. if (dso__read_binary_type_filename(dso, symtab_type,
  1297. root_dir, name, PATH_MAX))
  1298. continue;
  1299. if (nsexit)
  1300. nsinfo__mountns_exit(&nsc);
  1301. is_reg = is_regular_file(name);
  1302. if (is_reg)
  1303. sirc = symsrc__init(ss, dso, name, symtab_type);
  1304. if (nsexit)
  1305. nsinfo__mountns_enter(dso->nsinfo, &nsc);
  1306. if (!is_reg || sirc < 0)
  1307. continue;
  1308. if (!syms_ss && symsrc__has_symtab(ss)) {
  1309. syms_ss = ss;
  1310. next_slot = true;
  1311. if (!dso->symsrc_filename)
  1312. dso->symsrc_filename = strdup(name);
  1313. }
  1314. if (!runtime_ss && symsrc__possibly_runtime(ss)) {
  1315. runtime_ss = ss;
  1316. next_slot = true;
  1317. }
  1318. if (next_slot) {
  1319. ss_pos++;
  1320. if (syms_ss && runtime_ss)
  1321. break;
  1322. } else {
  1323. symsrc__destroy(ss);
  1324. }
  1325. }
  1326. if (!runtime_ss && !syms_ss)
  1327. goto out_free;
  1328. if (runtime_ss && !syms_ss) {
  1329. syms_ss = runtime_ss;
  1330. }
  1331. /* We'll have to hope for the best */
  1332. if (!runtime_ss && syms_ss)
  1333. runtime_ss = syms_ss;
  1334. if (syms_ss)
  1335. ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
  1336. else
  1337. ret = -1;
  1338. if (ret > 0) {
  1339. int nr_plt;
  1340. nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map);
  1341. if (nr_plt > 0)
  1342. ret += nr_plt;
  1343. }
  1344. for (; ss_pos > 0; ss_pos--)
  1345. symsrc__destroy(&ss_[ss_pos - 1]);
  1346. out_free:
  1347. free(name);
  1348. if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
  1349. ret = 0;
  1350. out:
  1351. dso__set_loaded(dso, map->type);
  1352. pthread_mutex_unlock(&dso->lock);
  1353. nsinfo__mountns_exit(&nsc);
  1354. return ret;
  1355. }
  1356. struct map *map_groups__find_by_name(struct map_groups *mg,
  1357. enum map_type type, const char *name)
  1358. {
  1359. struct maps *maps = &mg->maps[type];
  1360. struct map *map;
  1361. down_read(&maps->lock);
  1362. for (map = maps__first(maps); map; map = map__next(map)) {
  1363. if (map->dso && strcmp(map->dso->short_name, name) == 0)
  1364. goto out_unlock;
  1365. }
  1366. map = NULL;
  1367. out_unlock:
  1368. up_read(&maps->lock);
  1369. return map;
  1370. }
  1371. int dso__load_vmlinux(struct dso *dso, struct map *map,
  1372. const char *vmlinux, bool vmlinux_allocated)
  1373. {
  1374. int err = -1;
  1375. struct symsrc ss;
  1376. char symfs_vmlinux[PATH_MAX];
  1377. enum dso_binary_type symtab_type;
  1378. if (vmlinux[0] == '/')
  1379. snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
  1380. else
  1381. symbol__join_symfs(symfs_vmlinux, vmlinux);
  1382. if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  1383. symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
  1384. else
  1385. symtab_type = DSO_BINARY_TYPE__VMLINUX;
  1386. if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
  1387. return -1;
  1388. err = dso__load_sym(dso, map, &ss, &ss, 0);
  1389. symsrc__destroy(&ss);
  1390. if (err > 0) {
  1391. if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
  1392. dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
  1393. else
  1394. dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
  1395. dso__set_long_name(dso, vmlinux, vmlinux_allocated);
  1396. dso__set_loaded(dso, map->type);
  1397. pr_debug("Using %s for symbols\n", symfs_vmlinux);
  1398. }
  1399. return err;
  1400. }
  1401. int dso__load_vmlinux_path(struct dso *dso, struct map *map)
  1402. {
  1403. int i, err = 0;
  1404. char *filename = NULL;
  1405. pr_debug("Looking at the vmlinux_path (%d entries long)\n",
  1406. vmlinux_path__nr_entries + 1);
  1407. for (i = 0; i < vmlinux_path__nr_entries; ++i) {
  1408. err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
  1409. if (err > 0)
  1410. goto out;
  1411. }
  1412. if (!symbol_conf.ignore_vmlinux_buildid)
  1413. filename = dso__build_id_filename(dso, NULL, 0, false);
  1414. if (filename != NULL) {
  1415. err = dso__load_vmlinux(dso, map, filename, true);
  1416. if (err > 0)
  1417. goto out;
  1418. free(filename);
  1419. }
  1420. out:
  1421. return err;
  1422. }
  1423. static bool visible_dir_filter(const char *name, struct dirent *d)
  1424. {
  1425. if (d->d_type != DT_DIR)
  1426. return false;
  1427. return lsdir_no_dot_filter(name, d);
  1428. }
  1429. static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
  1430. {
  1431. char kallsyms_filename[PATH_MAX];
  1432. int ret = -1;
  1433. struct strlist *dirs;
  1434. struct str_node *nd;
  1435. dirs = lsdir(dir, visible_dir_filter);
  1436. if (!dirs)
  1437. return -1;
  1438. strlist__for_each_entry(nd, dirs) {
  1439. scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
  1440. "%s/%s/kallsyms", dir, nd->s);
  1441. if (!validate_kcore_addresses(kallsyms_filename, map)) {
  1442. strlcpy(dir, kallsyms_filename, dir_sz);
  1443. ret = 0;
  1444. break;
  1445. }
  1446. }
  1447. strlist__delete(dirs);
  1448. return ret;
  1449. }
  1450. /*
  1451. * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
  1452. * since access(R_OK) only checks with real UID/GID but open() use effective
  1453. * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
  1454. */
  1455. static bool filename__readable(const char *file)
  1456. {
  1457. int fd = open(file, O_RDONLY);
  1458. if (fd < 0)
  1459. return false;
  1460. close(fd);
  1461. return true;
  1462. }
  1463. static char *dso__find_kallsyms(struct dso *dso, struct map *map)
  1464. {
  1465. u8 host_build_id[BUILD_ID_SIZE];
  1466. char sbuild_id[SBUILD_ID_SIZE];
  1467. bool is_host = false;
  1468. char path[PATH_MAX];
  1469. if (!dso->has_build_id) {
  1470. /*
  1471. * Last resort, if we don't have a build-id and couldn't find
  1472. * any vmlinux file, try the running kernel kallsyms table.
  1473. */
  1474. goto proc_kallsyms;
  1475. }
  1476. if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
  1477. sizeof(host_build_id)) == 0)
  1478. is_host = dso__build_id_equal(dso, host_build_id);
  1479. /* Try a fast path for /proc/kallsyms if possible */
  1480. if (is_host) {
  1481. /*
  1482. * Do not check the build-id cache, unless we know we cannot use
  1483. * /proc/kcore or module maps don't match to /proc/kallsyms.
  1484. * To check readability of /proc/kcore, do not use access(R_OK)
  1485. * since /proc/kcore requires CAP_SYS_RAWIO to read and access
  1486. * can't check it.
  1487. */
  1488. if (filename__readable("/proc/kcore") &&
  1489. !validate_kcore_addresses("/proc/kallsyms", map))
  1490. goto proc_kallsyms;
  1491. }
  1492. build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
  1493. /* Find kallsyms in build-id cache with kcore */
  1494. scnprintf(path, sizeof(path), "%s/%s/%s",
  1495. buildid_dir, DSO__NAME_KCORE, sbuild_id);
  1496. if (!find_matching_kcore(map, path, sizeof(path)))
  1497. return strdup(path);
  1498. /* Use current /proc/kallsyms if possible */
  1499. if (is_host) {
  1500. proc_kallsyms:
  1501. return strdup("/proc/kallsyms");
  1502. }
  1503. /* Finally, find a cache of kallsyms */
  1504. if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
  1505. pr_err("No kallsyms or vmlinux with build-id %s was found\n",
  1506. sbuild_id);
  1507. return NULL;
  1508. }
  1509. return strdup(path);
  1510. }
  1511. static int dso__load_kernel_sym(struct dso *dso, struct map *map)
  1512. {
  1513. int err;
  1514. const char *kallsyms_filename = NULL;
  1515. char *kallsyms_allocated_filename = NULL;
  1516. /*
  1517. * Step 1: if the user specified a kallsyms or vmlinux filename, use
  1518. * it and only it, reporting errors to the user if it cannot be used.
  1519. *
  1520. * For instance, try to analyse an ARM perf.data file _without_ a
  1521. * build-id, or if the user specifies the wrong path to the right
  1522. * vmlinux file, obviously we can't fallback to another vmlinux (a
  1523. * x86_86 one, on the machine where analysis is being performed, say),
  1524. * or worse, /proc/kallsyms.
  1525. *
  1526. * If the specified file _has_ a build-id and there is a build-id
  1527. * section in the perf.data file, we will still do the expected
  1528. * validation in dso__load_vmlinux and will bail out if they don't
  1529. * match.
  1530. */
  1531. if (symbol_conf.kallsyms_name != NULL) {
  1532. kallsyms_filename = symbol_conf.kallsyms_name;
  1533. goto do_kallsyms;
  1534. }
  1535. if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
  1536. return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
  1537. }
  1538. if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
  1539. err = dso__load_vmlinux_path(dso, map);
  1540. if (err > 0)
  1541. return err;
  1542. }
  1543. /* do not try local files if a symfs was given */
  1544. if (symbol_conf.symfs[0] != 0)
  1545. return -1;
  1546. kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
  1547. if (!kallsyms_allocated_filename)
  1548. return -1;
  1549. kallsyms_filename = kallsyms_allocated_filename;
  1550. do_kallsyms:
  1551. err = dso__load_kallsyms(dso, kallsyms_filename, map);
  1552. if (err > 0)
  1553. pr_debug("Using %s for symbols\n", kallsyms_filename);
  1554. free(kallsyms_allocated_filename);
  1555. if (err > 0 && !dso__is_kcore(dso)) {
  1556. dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
  1557. dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
  1558. map__fixup_start(map);
  1559. map__fixup_end(map);
  1560. }
  1561. return err;
  1562. }
  1563. static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
  1564. {
  1565. int err;
  1566. const char *kallsyms_filename = NULL;
  1567. struct machine *machine;
  1568. char path[PATH_MAX];
  1569. if (!map->groups) {
  1570. pr_debug("Guest kernel map hasn't the point to groups\n");
  1571. return -1;
  1572. }
  1573. machine = map->groups->machine;
  1574. if (machine__is_default_guest(machine)) {
  1575. /*
  1576. * if the user specified a vmlinux filename, use it and only
  1577. * it, reporting errors to the user if it cannot be used.
  1578. * Or use file guest_kallsyms inputted by user on commandline
  1579. */
  1580. if (symbol_conf.default_guest_vmlinux_name != NULL) {
  1581. err = dso__load_vmlinux(dso, map,
  1582. symbol_conf.default_guest_vmlinux_name,
  1583. false);
  1584. return err;
  1585. }
  1586. kallsyms_filename = symbol_conf.default_guest_kallsyms;
  1587. if (!kallsyms_filename)
  1588. return -1;
  1589. } else {
  1590. sprintf(path, "%s/proc/kallsyms", machine->root_dir);
  1591. kallsyms_filename = path;
  1592. }
  1593. err = dso__load_kallsyms(dso, kallsyms_filename, map);
  1594. if (err > 0)
  1595. pr_debug("Using %s for symbols\n", kallsyms_filename);
  1596. if (err > 0 && !dso__is_kcore(dso)) {
  1597. dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
  1598. dso__set_long_name(dso, machine->mmap_name, false);
  1599. map__fixup_start(map);
  1600. map__fixup_end(map);
  1601. }
  1602. return err;
  1603. }
  1604. static void vmlinux_path__exit(void)
  1605. {
  1606. while (--vmlinux_path__nr_entries >= 0)
  1607. zfree(&vmlinux_path[vmlinux_path__nr_entries]);
  1608. vmlinux_path__nr_entries = 0;
  1609. zfree(&vmlinux_path);
  1610. }
  1611. static const char * const vmlinux_paths[] = {
  1612. "vmlinux",
  1613. "/boot/vmlinux"
  1614. };
  1615. static const char * const vmlinux_paths_upd[] = {
  1616. "/boot/vmlinux-%s",
  1617. "/usr/lib/debug/boot/vmlinux-%s",
  1618. "/lib/modules/%s/build/vmlinux",
  1619. "/usr/lib/debug/lib/modules/%s/vmlinux",
  1620. "/usr/lib/debug/boot/vmlinux-%s.debug"
  1621. };
  1622. static int vmlinux_path__add(const char *new_entry)
  1623. {
  1624. vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
  1625. if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
  1626. return -1;
  1627. ++vmlinux_path__nr_entries;
  1628. return 0;
  1629. }
  1630. static int vmlinux_path__init(struct perf_env *env)
  1631. {
  1632. struct utsname uts;
  1633. char bf[PATH_MAX];
  1634. char *kernel_version;
  1635. unsigned int i;
  1636. vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
  1637. ARRAY_SIZE(vmlinux_paths_upd)));
  1638. if (vmlinux_path == NULL)
  1639. return -1;
  1640. for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
  1641. if (vmlinux_path__add(vmlinux_paths[i]) < 0)
  1642. goto out_fail;
  1643. /* only try kernel version if no symfs was given */
  1644. if (symbol_conf.symfs[0] != 0)
  1645. return 0;
  1646. if (env) {
  1647. kernel_version = env->os_release;
  1648. } else {
  1649. if (uname(&uts) < 0)
  1650. goto out_fail;
  1651. kernel_version = uts.release;
  1652. }
  1653. for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
  1654. snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
  1655. if (vmlinux_path__add(bf) < 0)
  1656. goto out_fail;
  1657. }
  1658. return 0;
  1659. out_fail:
  1660. vmlinux_path__exit();
  1661. return -1;
  1662. }
  1663. int setup_list(struct strlist **list, const char *list_str,
  1664. const char *list_name)
  1665. {
  1666. if (list_str == NULL)
  1667. return 0;
  1668. *list = strlist__new(list_str, NULL);
  1669. if (!*list) {
  1670. pr_err("problems parsing %s list\n", list_name);
  1671. return -1;
  1672. }
  1673. symbol_conf.has_filter = true;
  1674. return 0;
  1675. }
  1676. int setup_intlist(struct intlist **list, const char *list_str,
  1677. const char *list_name)
  1678. {
  1679. if (list_str == NULL)
  1680. return 0;
  1681. *list = intlist__new(list_str);
  1682. if (!*list) {
  1683. pr_err("problems parsing %s list\n", list_name);
  1684. return -1;
  1685. }
  1686. return 0;
  1687. }
  1688. static bool symbol__read_kptr_restrict(void)
  1689. {
  1690. bool value = false;
  1691. FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
  1692. if (fp != NULL) {
  1693. char line[8];
  1694. if (fgets(line, sizeof(line), fp) != NULL)
  1695. value = ((geteuid() != 0) || (getuid() != 0)) ?
  1696. (atoi(line) != 0) :
  1697. (atoi(line) == 2);
  1698. fclose(fp);
  1699. }
  1700. return value;
  1701. }
  1702. int symbol__annotation_init(void)
  1703. {
  1704. if (symbol_conf.init_annotation)
  1705. return 0;
  1706. if (symbol_conf.initialized) {
  1707. pr_err("Annotation needs to be init before symbol__init()\n");
  1708. return -1;
  1709. }
  1710. symbol_conf.priv_size += sizeof(struct annotation);
  1711. symbol_conf.init_annotation = true;
  1712. return 0;
  1713. }
  1714. int symbol__init(struct perf_env *env)
  1715. {
  1716. const char *symfs;
  1717. if (symbol_conf.initialized)
  1718. return 0;
  1719. symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
  1720. symbol__elf_init();
  1721. if (symbol_conf.sort_by_name)
  1722. symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
  1723. sizeof(struct symbol));
  1724. if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
  1725. return -1;
  1726. if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
  1727. pr_err("'.' is the only non valid --field-separator argument\n");
  1728. return -1;
  1729. }
  1730. if (setup_list(&symbol_conf.dso_list,
  1731. symbol_conf.dso_list_str, "dso") < 0)
  1732. return -1;
  1733. if (setup_list(&symbol_conf.comm_list,
  1734. symbol_conf.comm_list_str, "comm") < 0)
  1735. goto out_free_dso_list;
  1736. if (setup_intlist(&symbol_conf.pid_list,
  1737. symbol_conf.pid_list_str, "pid") < 0)
  1738. goto out_free_comm_list;
  1739. if (setup_intlist(&symbol_conf.tid_list,
  1740. symbol_conf.tid_list_str, "tid") < 0)
  1741. goto out_free_pid_list;
  1742. if (setup_list(&symbol_conf.sym_list,
  1743. symbol_conf.sym_list_str, "symbol") < 0)
  1744. goto out_free_tid_list;
  1745. if (setup_list(&symbol_conf.bt_stop_list,
  1746. symbol_conf.bt_stop_list_str, "symbol") < 0)
  1747. goto out_free_sym_list;
  1748. /*
  1749. * A path to symbols of "/" is identical to ""
  1750. * reset here for simplicity.
  1751. */
  1752. symfs = realpath(symbol_conf.symfs, NULL);
  1753. if (symfs == NULL)
  1754. symfs = symbol_conf.symfs;
  1755. if (strcmp(symfs, "/") == 0)
  1756. symbol_conf.symfs = "";
  1757. if (symfs != symbol_conf.symfs)
  1758. free((void *)symfs);
  1759. symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
  1760. symbol_conf.initialized = true;
  1761. return 0;
  1762. out_free_sym_list:
  1763. strlist__delete(symbol_conf.sym_list);
  1764. out_free_tid_list:
  1765. intlist__delete(symbol_conf.tid_list);
  1766. out_free_pid_list:
  1767. intlist__delete(symbol_conf.pid_list);
  1768. out_free_comm_list:
  1769. strlist__delete(symbol_conf.comm_list);
  1770. out_free_dso_list:
  1771. strlist__delete(symbol_conf.dso_list);
  1772. return -1;
  1773. }
  1774. void symbol__exit(void)
  1775. {
  1776. if (!symbol_conf.initialized)
  1777. return;
  1778. strlist__delete(symbol_conf.bt_stop_list);
  1779. strlist__delete(symbol_conf.sym_list);
  1780. strlist__delete(symbol_conf.dso_list);
  1781. strlist__delete(symbol_conf.comm_list);
  1782. intlist__delete(symbol_conf.tid_list);
  1783. intlist__delete(symbol_conf.pid_list);
  1784. vmlinux_path__exit();
  1785. symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
  1786. symbol_conf.bt_stop_list = NULL;
  1787. symbol_conf.initialized = false;
  1788. }
  1789. int symbol__config_symfs(const struct option *opt __maybe_unused,
  1790. const char *dir, int unset __maybe_unused)
  1791. {
  1792. char *bf = NULL;
  1793. int ret;
  1794. symbol_conf.symfs = strdup(dir);
  1795. if (symbol_conf.symfs == NULL)
  1796. return -ENOMEM;
  1797. /* skip the locally configured cache if a symfs is given, and
  1798. * config buildid dir to symfs/.debug
  1799. */
  1800. ret = asprintf(&bf, "%s/%s", dir, ".debug");
  1801. if (ret < 0)
  1802. return -ENOMEM;
  1803. set_buildid_dir(bf);
  1804. free(bf);
  1805. return 0;
  1806. }
  1807. struct mem_info *mem_info__get(struct mem_info *mi)
  1808. {
  1809. if (mi)
  1810. refcount_inc(&mi->refcnt);
  1811. return mi;
  1812. }
  1813. void mem_info__put(struct mem_info *mi)
  1814. {
  1815. if (mi && refcount_dec_and_test(&mi->refcnt))
  1816. free(mi);
  1817. }
  1818. struct mem_info *mem_info__new(void)
  1819. {
  1820. struct mem_info *mi = zalloc(sizeof(*mi));
  1821. if (mi)
  1822. refcount_set(&mi->refcnt, 1);
  1823. return mi;
  1824. }