map.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879
  1. #include "symbol.h"
  2. #include <errno.h>
  3. #include <inttypes.h>
  4. #include <limits.h>
  5. #include <stdlib.h>
  6. #include <string.h>
  7. #include <stdio.h>
  8. #include <unistd.h>
  9. #include "map.h"
  10. #include "thread.h"
  11. #include "strlist.h"
  12. #include "vdso.h"
  13. #include "build-id.h"
  14. #include "util.h"
  15. #include "debug.h"
  16. #include "machine.h"
  17. #include <linux/string.h>
  18. #include "unwind.h"
  19. static void __maps__insert(struct maps *maps, struct map *map);
  20. const char *map_type__name[MAP__NR_TYPES] = {
  21. [MAP__FUNCTION] = "Functions",
  22. [MAP__VARIABLE] = "Variables",
  23. };
  24. static inline int is_anon_memory(const char *filename)
  25. {
  26. return !strcmp(filename, "//anon") ||
  27. !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) ||
  28. !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
  29. }
  30. static inline int is_no_dso_memory(const char *filename)
  31. {
  32. return !strncmp(filename, "[stack", 6) ||
  33. !strncmp(filename, "/SYSV",5) ||
  34. !strcmp(filename, "[heap]");
  35. }
  36. static inline int is_android_lib(const char *filename)
  37. {
  38. return !strncmp(filename, "/data/app-lib", 13) ||
  39. !strncmp(filename, "/system/lib", 11);
  40. }
  41. static inline bool replace_android_lib(const char *filename, char *newfilename)
  42. {
  43. const char *libname;
  44. char *app_abi;
  45. size_t app_abi_length, new_length;
  46. size_t lib_length = 0;
  47. libname = strrchr(filename, '/');
  48. if (libname)
  49. lib_length = strlen(libname);
  50. app_abi = getenv("APP_ABI");
  51. if (!app_abi)
  52. return false;
  53. app_abi_length = strlen(app_abi);
  54. if (!strncmp(filename, "/data/app-lib", 13)) {
  55. char *apk_path;
  56. if (!app_abi_length)
  57. return false;
  58. new_length = 7 + app_abi_length + lib_length;
  59. apk_path = getenv("APK_PATH");
  60. if (apk_path) {
  61. new_length += strlen(apk_path) + 1;
  62. if (new_length > PATH_MAX)
  63. return false;
  64. snprintf(newfilename, new_length,
  65. "%s/libs/%s/%s", apk_path, app_abi, libname);
  66. } else {
  67. if (new_length > PATH_MAX)
  68. return false;
  69. snprintf(newfilename, new_length,
  70. "libs/%s/%s", app_abi, libname);
  71. }
  72. return true;
  73. }
  74. if (!strncmp(filename, "/system/lib/", 11)) {
  75. char *ndk, *app;
  76. const char *arch;
  77. size_t ndk_length;
  78. size_t app_length;
  79. ndk = getenv("NDK_ROOT");
  80. app = getenv("APP_PLATFORM");
  81. if (!(ndk && app))
  82. return false;
  83. ndk_length = strlen(ndk);
  84. app_length = strlen(app);
  85. if (!(ndk_length && app_length && app_abi_length))
  86. return false;
  87. arch = !strncmp(app_abi, "arm", 3) ? "arm" :
  88. !strncmp(app_abi, "mips", 4) ? "mips" :
  89. !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
  90. if (!arch)
  91. return false;
  92. new_length = 27 + ndk_length +
  93. app_length + lib_length
  94. + strlen(arch);
  95. if (new_length > PATH_MAX)
  96. return false;
  97. snprintf(newfilename, new_length,
  98. "%s/platforms/%s/arch-%s/usr/lib/%s",
  99. ndk, app, arch, libname);
  100. return true;
  101. }
  102. return false;
  103. }
  104. void map__init(struct map *map, enum map_type type,
  105. u64 start, u64 end, u64 pgoff, struct dso *dso)
  106. {
  107. map->type = type;
  108. map->start = start;
  109. map->end = end;
  110. map->pgoff = pgoff;
  111. map->reloc = 0;
  112. map->dso = dso__get(dso);
  113. map->map_ip = map__map_ip;
  114. map->unmap_ip = map__unmap_ip;
  115. RB_CLEAR_NODE(&map->rb_node);
  116. map->groups = NULL;
  117. map->erange_warned = false;
  118. atomic_set(&map->refcnt, 1);
  119. }
  120. struct map *map__new(struct machine *machine, u64 start, u64 len,
  121. u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
  122. u64 ino_gen, u32 prot, u32 flags, char *filename,
  123. enum map_type type, struct thread *thread)
  124. {
  125. struct map *map = malloc(sizeof(*map));
  126. if (map != NULL) {
  127. char newfilename[PATH_MAX];
  128. struct dso *dso;
  129. int anon, no_dso, vdso, android;
  130. android = is_android_lib(filename);
  131. anon = is_anon_memory(filename);
  132. vdso = is_vdso_map(filename);
  133. no_dso = is_no_dso_memory(filename);
  134. map->maj = d_maj;
  135. map->min = d_min;
  136. map->ino = ino;
  137. map->ino_generation = ino_gen;
  138. map->prot = prot;
  139. map->flags = flags;
  140. if ((anon || no_dso) && type == MAP__FUNCTION) {
  141. snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
  142. filename = newfilename;
  143. }
  144. if (android) {
  145. if (replace_android_lib(filename, newfilename))
  146. filename = newfilename;
  147. }
  148. if (vdso) {
  149. pgoff = 0;
  150. dso = machine__findnew_vdso(machine, thread);
  151. } else
  152. dso = machine__findnew_dso(machine, filename);
  153. if (dso == NULL)
  154. goto out_delete;
  155. map__init(map, type, start, start + len, pgoff, dso);
  156. if (anon || no_dso) {
  157. map->map_ip = map->unmap_ip = identity__map_ip;
  158. /*
  159. * Set memory without DSO as loaded. All map__find_*
  160. * functions still return NULL, and we avoid the
  161. * unnecessary map__load warning.
  162. */
  163. if (type != MAP__FUNCTION)
  164. dso__set_loaded(dso, map->type);
  165. }
  166. dso__put(dso);
  167. }
  168. return map;
  169. out_delete:
  170. free(map);
  171. return NULL;
  172. }
  173. /*
  174. * Constructor variant for modules (where we know from /proc/modules where
  175. * they are loaded) and for vmlinux, where only after we load all the
  176. * symbols we'll know where it starts and ends.
  177. */
  178. struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
  179. {
  180. struct map *map = calloc(1, (sizeof(*map) +
  181. (dso->kernel ? sizeof(struct kmap) : 0)));
  182. if (map != NULL) {
  183. /*
  184. * ->end will be filled after we load all the symbols
  185. */
  186. map__init(map, type, start, 0, 0, dso);
  187. }
  188. return map;
  189. }
  190. /*
  191. * Use this and __map__is_kmodule() for map instances that are in
  192. * machine->kmaps, and thus have map->groups->machine all properly set, to
  193. * disambiguate between the kernel and modules.
  194. *
  195. * When the need arises, introduce map__is_{kernel,kmodule)() that
  196. * checks (map->groups != NULL && map->groups->machine != NULL &&
  197. * map->dso->kernel) before calling __map__is_{kernel,kmodule}())
  198. */
  199. bool __map__is_kernel(const struct map *map)
  200. {
  201. return __machine__kernel_map(map->groups->machine, map->type) == map;
  202. }
  203. static void map__exit(struct map *map)
  204. {
  205. BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
  206. dso__zput(map->dso);
  207. }
  208. void map__delete(struct map *map)
  209. {
  210. map__exit(map);
  211. free(map);
  212. }
  213. void map__put(struct map *map)
  214. {
  215. if (map && atomic_dec_and_test(&map->refcnt))
  216. map__delete(map);
  217. }
  218. void map__fixup_start(struct map *map)
  219. {
  220. struct rb_root *symbols = &map->dso->symbols[map->type];
  221. struct rb_node *nd = rb_first(symbols);
  222. if (nd != NULL) {
  223. struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
  224. map->start = sym->start;
  225. }
  226. }
  227. void map__fixup_end(struct map *map)
  228. {
  229. struct rb_root *symbols = &map->dso->symbols[map->type];
  230. struct rb_node *nd = rb_last(symbols);
  231. if (nd != NULL) {
  232. struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
  233. map->end = sym->end;
  234. }
  235. }
  236. #define DSO__DELETED "(deleted)"
  237. int map__load(struct map *map, symbol_filter_t filter)
  238. {
  239. const char *name = map->dso->long_name;
  240. int nr;
  241. if (dso__loaded(map->dso, map->type))
  242. return 0;
  243. nr = dso__load(map->dso, map, filter);
  244. if (nr < 0) {
  245. if (map->dso->has_build_id) {
  246. char sbuild_id[SBUILD_ID_SIZE];
  247. build_id__sprintf(map->dso->build_id,
  248. sizeof(map->dso->build_id),
  249. sbuild_id);
  250. pr_warning("%s with build id %s not found",
  251. name, sbuild_id);
  252. } else
  253. pr_warning("Failed to open %s", name);
  254. pr_warning(", continuing without symbols\n");
  255. return -1;
  256. } else if (nr == 0) {
  257. #ifdef HAVE_LIBELF_SUPPORT
  258. const size_t len = strlen(name);
  259. const size_t real_len = len - sizeof(DSO__DELETED);
  260. if (len > sizeof(DSO__DELETED) &&
  261. strcmp(name + real_len + 1, DSO__DELETED) == 0) {
  262. pr_warning("%.*s was updated (is prelink enabled?). "
  263. "Restart the long running apps that use it!\n",
  264. (int)real_len, name);
  265. } else {
  266. pr_warning("no symbols found in %s, maybe install "
  267. "a debug package?\n", name);
  268. }
  269. #endif
  270. return -1;
  271. }
  272. return 0;
  273. }
  274. int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
  275. {
  276. return strcmp(namea, nameb);
  277. }
  278. struct symbol *map__find_symbol(struct map *map, u64 addr,
  279. symbol_filter_t filter)
  280. {
  281. if (map__load(map, filter) < 0)
  282. return NULL;
  283. return dso__find_symbol(map->dso, map->type, addr);
  284. }
  285. struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
  286. symbol_filter_t filter)
  287. {
  288. if (map__load(map, filter) < 0)
  289. return NULL;
  290. if (!dso__sorted_by_name(map->dso, map->type))
  291. dso__sort_by_name(map->dso, map->type);
  292. return dso__find_symbol_by_name(map->dso, map->type, name);
  293. }
  294. struct map *map__clone(struct map *from)
  295. {
  296. struct map *map = memdup(from, sizeof(*map));
  297. if (map != NULL) {
  298. atomic_set(&map->refcnt, 1);
  299. RB_CLEAR_NODE(&map->rb_node);
  300. dso__get(map->dso);
  301. map->groups = NULL;
  302. }
  303. return map;
  304. }
  305. int map__overlap(struct map *l, struct map *r)
  306. {
  307. if (l->start > r->start) {
  308. struct map *t = l;
  309. l = r;
  310. r = t;
  311. }
  312. if (l->end > r->start)
  313. return 1;
  314. return 0;
  315. }
  316. size_t map__fprintf(struct map *map, FILE *fp)
  317. {
  318. return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
  319. map->start, map->end, map->pgoff, map->dso->name);
  320. }
  321. size_t map__fprintf_dsoname(struct map *map, FILE *fp)
  322. {
  323. const char *dsoname = "[unknown]";
  324. if (map && map->dso && (map->dso->name || map->dso->long_name)) {
  325. if (symbol_conf.show_kernel_path && map->dso->long_name)
  326. dsoname = map->dso->long_name;
  327. else if (map->dso->name)
  328. dsoname = map->dso->name;
  329. }
  330. return fprintf(fp, "%s", dsoname);
  331. }
  332. int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
  333. FILE *fp)
  334. {
  335. char *srcline;
  336. int ret = 0;
  337. if (map && map->dso) {
  338. srcline = get_srcline(map->dso,
  339. map__rip_2objdump(map, addr), NULL, true);
  340. if (srcline != SRCLINE_UNKNOWN)
  341. ret = fprintf(fp, "%s%s", prefix, srcline);
  342. free_srcline(srcline);
  343. }
  344. return ret;
  345. }
  346. /**
  347. * map__rip_2objdump - convert symbol start address to objdump address.
  348. * @map: memory map
  349. * @rip: symbol start address
  350. *
  351. * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
  352. * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
  353. * relative to section start.
  354. *
  355. * Return: Address suitable for passing to "objdump --start-address="
  356. */
  357. u64 map__rip_2objdump(struct map *map, u64 rip)
  358. {
  359. if (!map->dso->adjust_symbols)
  360. return rip;
  361. if (map->dso->rel)
  362. return rip - map->pgoff;
  363. /*
  364. * kernel modules also have DSO_TYPE_USER in dso->kernel,
  365. * but all kernel modules are ET_REL, so won't get here.
  366. */
  367. if (map->dso->kernel == DSO_TYPE_USER)
  368. return rip + map->dso->text_offset;
  369. return map->unmap_ip(map, rip) - map->reloc;
  370. }
  371. /**
  372. * map__objdump_2mem - convert objdump address to a memory address.
  373. * @map: memory map
  374. * @ip: objdump address
  375. *
  376. * Closely related to map__rip_2objdump(), this function takes an address from
  377. * objdump and converts it to a memory address. Note this assumes that @map
  378. * contains the address. To be sure the result is valid, check it forwards
  379. * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
  380. *
  381. * Return: Memory address.
  382. */
  383. u64 map__objdump_2mem(struct map *map, u64 ip)
  384. {
  385. if (!map->dso->adjust_symbols)
  386. return map->unmap_ip(map, ip);
  387. if (map->dso->rel)
  388. return map->unmap_ip(map, ip + map->pgoff);
  389. /*
  390. * kernel modules also have DSO_TYPE_USER in dso->kernel,
  391. * but all kernel modules are ET_REL, so won't get here.
  392. */
  393. if (map->dso->kernel == DSO_TYPE_USER)
  394. return map->unmap_ip(map, ip - map->dso->text_offset);
  395. return ip + map->reloc;
  396. }
  397. static void maps__init(struct maps *maps)
  398. {
  399. maps->entries = RB_ROOT;
  400. pthread_rwlock_init(&maps->lock, NULL);
  401. }
  402. void map_groups__init(struct map_groups *mg, struct machine *machine)
  403. {
  404. int i;
  405. for (i = 0; i < MAP__NR_TYPES; ++i) {
  406. maps__init(&mg->maps[i]);
  407. }
  408. mg->machine = machine;
  409. atomic_set(&mg->refcnt, 1);
  410. }
  411. static void __maps__purge(struct maps *maps)
  412. {
  413. struct rb_root *root = &maps->entries;
  414. struct rb_node *next = rb_first(root);
  415. while (next) {
  416. struct map *pos = rb_entry(next, struct map, rb_node);
  417. next = rb_next(&pos->rb_node);
  418. rb_erase_init(&pos->rb_node, root);
  419. map__put(pos);
  420. }
  421. }
  422. static void maps__exit(struct maps *maps)
  423. {
  424. pthread_rwlock_wrlock(&maps->lock);
  425. __maps__purge(maps);
  426. pthread_rwlock_unlock(&maps->lock);
  427. }
  428. void map_groups__exit(struct map_groups *mg)
  429. {
  430. int i;
  431. for (i = 0; i < MAP__NR_TYPES; ++i)
  432. maps__exit(&mg->maps[i]);
  433. }
  434. bool map_groups__empty(struct map_groups *mg)
  435. {
  436. int i;
  437. for (i = 0; i < MAP__NR_TYPES; ++i) {
  438. if (maps__first(&mg->maps[i]))
  439. return false;
  440. }
  441. return true;
  442. }
  443. struct map_groups *map_groups__new(struct machine *machine)
  444. {
  445. struct map_groups *mg = malloc(sizeof(*mg));
  446. if (mg != NULL)
  447. map_groups__init(mg, machine);
  448. return mg;
  449. }
  450. void map_groups__delete(struct map_groups *mg)
  451. {
  452. map_groups__exit(mg);
  453. free(mg);
  454. }
  455. void map_groups__put(struct map_groups *mg)
  456. {
  457. if (mg && atomic_dec_and_test(&mg->refcnt))
  458. map_groups__delete(mg);
  459. }
  460. struct symbol *map_groups__find_symbol(struct map_groups *mg,
  461. enum map_type type, u64 addr,
  462. struct map **mapp,
  463. symbol_filter_t filter)
  464. {
  465. struct map *map = map_groups__find(mg, type, addr);
  466. /* Ensure map is loaded before using map->map_ip */
  467. if (map != NULL && map__load(map, filter) >= 0) {
  468. if (mapp != NULL)
  469. *mapp = map;
  470. return map__find_symbol(map, map->map_ip(map, addr), filter);
  471. }
  472. return NULL;
  473. }
  474. struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
  475. struct map **mapp, symbol_filter_t filter)
  476. {
  477. struct symbol *sym;
  478. struct rb_node *nd;
  479. pthread_rwlock_rdlock(&maps->lock);
  480. for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
  481. struct map *pos = rb_entry(nd, struct map, rb_node);
  482. sym = map__find_symbol_by_name(pos, name, filter);
  483. if (sym == NULL)
  484. continue;
  485. if (mapp != NULL)
  486. *mapp = pos;
  487. goto out;
  488. }
  489. sym = NULL;
  490. out:
  491. pthread_rwlock_unlock(&maps->lock);
  492. return sym;
  493. }
  494. struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
  495. enum map_type type,
  496. const char *name,
  497. struct map **mapp,
  498. symbol_filter_t filter)
  499. {
  500. struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp, filter);
  501. return sym;
  502. }
  503. int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
  504. {
  505. if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
  506. if (ams->map->groups == NULL)
  507. return -1;
  508. ams->map = map_groups__find(ams->map->groups, ams->map->type,
  509. ams->addr);
  510. if (ams->map == NULL)
  511. return -1;
  512. }
  513. ams->al_addr = ams->map->map_ip(ams->map, ams->addr);
  514. ams->sym = map__find_symbol(ams->map, ams->al_addr, filter);
  515. return ams->sym ? 0 : -1;
  516. }
  517. static size_t maps__fprintf(struct maps *maps, FILE *fp)
  518. {
  519. size_t printed = 0;
  520. struct rb_node *nd;
  521. pthread_rwlock_rdlock(&maps->lock);
  522. for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
  523. struct map *pos = rb_entry(nd, struct map, rb_node);
  524. printed += fprintf(fp, "Map:");
  525. printed += map__fprintf(pos, fp);
  526. if (verbose > 2) {
  527. printed += dso__fprintf(pos->dso, pos->type, fp);
  528. printed += fprintf(fp, "--\n");
  529. }
  530. }
  531. pthread_rwlock_unlock(&maps->lock);
  532. return printed;
  533. }
  534. size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
  535. FILE *fp)
  536. {
  537. size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
  538. return printed += maps__fprintf(&mg->maps[type], fp);
  539. }
  540. size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
  541. {
  542. size_t printed = 0, i;
  543. for (i = 0; i < MAP__NR_TYPES; ++i)
  544. printed += __map_groups__fprintf_maps(mg, i, fp);
  545. return printed;
  546. }
  547. static void __map_groups__insert(struct map_groups *mg, struct map *map)
  548. {
  549. __maps__insert(&mg->maps[map->type], map);
  550. map->groups = mg;
  551. }
  552. static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
  553. {
  554. struct rb_root *root;
  555. struct rb_node *next;
  556. int err = 0;
  557. pthread_rwlock_wrlock(&maps->lock);
  558. root = &maps->entries;
  559. next = rb_first(root);
  560. while (next) {
  561. struct map *pos = rb_entry(next, struct map, rb_node);
  562. next = rb_next(&pos->rb_node);
  563. if (!map__overlap(pos, map))
  564. continue;
  565. if (verbose >= 2) {
  566. fputs("overlapping maps:\n", fp);
  567. map__fprintf(map, fp);
  568. map__fprintf(pos, fp);
  569. }
  570. rb_erase_init(&pos->rb_node, root);
  571. /*
  572. * Now check if we need to create new maps for areas not
  573. * overlapped by the new map:
  574. */
  575. if (map->start > pos->start) {
  576. struct map *before = map__clone(pos);
  577. if (before == NULL) {
  578. err = -ENOMEM;
  579. goto put_map;
  580. }
  581. before->end = map->start;
  582. __map_groups__insert(pos->groups, before);
  583. if (verbose >= 2)
  584. map__fprintf(before, fp);
  585. map__put(before);
  586. }
  587. if (map->end < pos->end) {
  588. struct map *after = map__clone(pos);
  589. if (after == NULL) {
  590. err = -ENOMEM;
  591. goto put_map;
  592. }
  593. after->start = map->end;
  594. __map_groups__insert(pos->groups, after);
  595. if (verbose >= 2)
  596. map__fprintf(after, fp);
  597. map__put(after);
  598. }
  599. put_map:
  600. map__put(pos);
  601. if (err)
  602. goto out;
  603. }
  604. err = 0;
  605. out:
  606. pthread_rwlock_unlock(&maps->lock);
  607. return err;
  608. }
  609. int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
  610. FILE *fp)
  611. {
  612. return maps__fixup_overlappings(&mg->maps[map->type], map, fp);
  613. }
  614. /*
  615. * XXX This should not really _copy_ te maps, but refcount them.
  616. */
  617. int map_groups__clone(struct thread *thread,
  618. struct map_groups *parent, enum map_type type)
  619. {
  620. struct map_groups *mg = thread->mg;
  621. int err = -ENOMEM;
  622. struct map *map;
  623. struct maps *maps = &parent->maps[type];
  624. pthread_rwlock_rdlock(&maps->lock);
  625. for (map = maps__first(maps); map; map = map__next(map)) {
  626. struct map *new = map__clone(map);
  627. if (new == NULL)
  628. goto out_unlock;
  629. err = unwind__prepare_access(thread, new, NULL);
  630. if (err)
  631. goto out_unlock;
  632. map_groups__insert(mg, new);
  633. map__put(new);
  634. }
  635. err = 0;
  636. out_unlock:
  637. pthread_rwlock_unlock(&maps->lock);
  638. return err;
  639. }
  640. static void __maps__insert(struct maps *maps, struct map *map)
  641. {
  642. struct rb_node **p = &maps->entries.rb_node;
  643. struct rb_node *parent = NULL;
  644. const u64 ip = map->start;
  645. struct map *m;
  646. while (*p != NULL) {
  647. parent = *p;
  648. m = rb_entry(parent, struct map, rb_node);
  649. if (ip < m->start)
  650. p = &(*p)->rb_left;
  651. else
  652. p = &(*p)->rb_right;
  653. }
  654. rb_link_node(&map->rb_node, parent, p);
  655. rb_insert_color(&map->rb_node, &maps->entries);
  656. map__get(map);
  657. }
  658. void maps__insert(struct maps *maps, struct map *map)
  659. {
  660. pthread_rwlock_wrlock(&maps->lock);
  661. __maps__insert(maps, map);
  662. pthread_rwlock_unlock(&maps->lock);
  663. }
  664. static void __maps__remove(struct maps *maps, struct map *map)
  665. {
  666. rb_erase_init(&map->rb_node, &maps->entries);
  667. map__put(map);
  668. }
  669. void maps__remove(struct maps *maps, struct map *map)
  670. {
  671. pthread_rwlock_wrlock(&maps->lock);
  672. __maps__remove(maps, map);
  673. pthread_rwlock_unlock(&maps->lock);
  674. }
  675. struct map *maps__find(struct maps *maps, u64 ip)
  676. {
  677. struct rb_node **p, *parent = NULL;
  678. struct map *m;
  679. pthread_rwlock_rdlock(&maps->lock);
  680. p = &maps->entries.rb_node;
  681. while (*p != NULL) {
  682. parent = *p;
  683. m = rb_entry(parent, struct map, rb_node);
  684. if (ip < m->start)
  685. p = &(*p)->rb_left;
  686. else if (ip >= m->end)
  687. p = &(*p)->rb_right;
  688. else
  689. goto out;
  690. }
  691. m = NULL;
  692. out:
  693. pthread_rwlock_unlock(&maps->lock);
  694. return m;
  695. }
  696. struct map *maps__first(struct maps *maps)
  697. {
  698. struct rb_node *first = rb_first(&maps->entries);
  699. if (first)
  700. return rb_entry(first, struct map, rb_node);
  701. return NULL;
  702. }
  703. struct map *map__next(struct map *map)
  704. {
  705. struct rb_node *next = rb_next(&map->rb_node);
  706. if (next)
  707. return rb_entry(next, struct map, rb_node);
  708. return NULL;
  709. }
  710. struct kmap *map__kmap(struct map *map)
  711. {
  712. if (!map->dso || !map->dso->kernel) {
  713. pr_err("Internal error: map__kmap with a non-kernel map\n");
  714. return NULL;
  715. }
  716. return (struct kmap *)(map + 1);
  717. }
  718. struct map_groups *map__kmaps(struct map *map)
  719. {
  720. struct kmap *kmap = map__kmap(map);
  721. if (!kmap || !kmap->kmaps) {
  722. pr_err("Internal error: map__kmaps with a non-kernel map\n");
  723. return NULL;
  724. }
  725. return kmap->kmaps;
  726. }