map.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882
  1. #include "symbol.h"
  2. #include <errno.h>
  3. #include <inttypes.h>
  4. #include <limits.h>
  5. #include <stdlib.h>
  6. #include <string.h>
  7. #include <stdio.h>
  8. #include <unistd.h>
  9. #include "map.h"
  10. #include "thread.h"
  11. #include "strlist.h"
  12. #include "vdso.h"
  13. #include "build-id.h"
  14. #include "util.h"
  15. #include "debug.h"
  16. #include "machine.h"
  17. #include <linux/string.h>
  18. #include "unwind.h"
  19. static void __maps__insert(struct maps *maps, struct map *map);
  20. const char *map_type__name[MAP__NR_TYPES] = {
  21. [MAP__FUNCTION] = "Functions",
  22. [MAP__VARIABLE] = "Variables",
  23. };
  24. static inline int is_anon_memory(const char *filename)
  25. {
  26. return !strcmp(filename, "//anon") ||
  27. !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) ||
  28. !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
  29. }
  30. static inline int is_no_dso_memory(const char *filename)
  31. {
  32. return !strncmp(filename, "[stack", 6) ||
  33. !strncmp(filename, "/SYSV",5) ||
  34. !strcmp(filename, "[heap]");
  35. }
  36. static inline int is_android_lib(const char *filename)
  37. {
  38. return !strncmp(filename, "/data/app-lib", 13) ||
  39. !strncmp(filename, "/system/lib", 11);
  40. }
  41. static inline bool replace_android_lib(const char *filename, char *newfilename)
  42. {
  43. const char *libname;
  44. char *app_abi;
  45. size_t app_abi_length, new_length;
  46. size_t lib_length = 0;
  47. libname = strrchr(filename, '/');
  48. if (libname)
  49. lib_length = strlen(libname);
  50. app_abi = getenv("APP_ABI");
  51. if (!app_abi)
  52. return false;
  53. app_abi_length = strlen(app_abi);
  54. if (!strncmp(filename, "/data/app-lib", 13)) {
  55. char *apk_path;
  56. if (!app_abi_length)
  57. return false;
  58. new_length = 7 + app_abi_length + lib_length;
  59. apk_path = getenv("APK_PATH");
  60. if (apk_path) {
  61. new_length += strlen(apk_path) + 1;
  62. if (new_length > PATH_MAX)
  63. return false;
  64. snprintf(newfilename, new_length,
  65. "%s/libs/%s/%s", apk_path, app_abi, libname);
  66. } else {
  67. if (new_length > PATH_MAX)
  68. return false;
  69. snprintf(newfilename, new_length,
  70. "libs/%s/%s", app_abi, libname);
  71. }
  72. return true;
  73. }
  74. if (!strncmp(filename, "/system/lib/", 11)) {
  75. char *ndk, *app;
  76. const char *arch;
  77. size_t ndk_length;
  78. size_t app_length;
  79. ndk = getenv("NDK_ROOT");
  80. app = getenv("APP_PLATFORM");
  81. if (!(ndk && app))
  82. return false;
  83. ndk_length = strlen(ndk);
  84. app_length = strlen(app);
  85. if (!(ndk_length && app_length && app_abi_length))
  86. return false;
  87. arch = !strncmp(app_abi, "arm", 3) ? "arm" :
  88. !strncmp(app_abi, "mips", 4) ? "mips" :
  89. !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
  90. if (!arch)
  91. return false;
  92. new_length = 27 + ndk_length +
  93. app_length + lib_length
  94. + strlen(arch);
  95. if (new_length > PATH_MAX)
  96. return false;
  97. snprintf(newfilename, new_length,
  98. "%s/platforms/%s/arch-%s/usr/lib/%s",
  99. ndk, app, arch, libname);
  100. return true;
  101. }
  102. return false;
  103. }
  104. void map__init(struct map *map, enum map_type type,
  105. u64 start, u64 end, u64 pgoff, struct dso *dso)
  106. {
  107. map->type = type;
  108. map->start = start;
  109. map->end = end;
  110. map->pgoff = pgoff;
  111. map->reloc = 0;
  112. map->dso = dso__get(dso);
  113. map->map_ip = map__map_ip;
  114. map->unmap_ip = map__unmap_ip;
  115. RB_CLEAR_NODE(&map->rb_node);
  116. map->groups = NULL;
  117. map->erange_warned = false;
  118. atomic_set(&map->refcnt, 1);
  119. }
  120. struct map *map__new(struct machine *machine, u64 start, u64 len,
  121. u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
  122. u64 ino_gen, u32 prot, u32 flags, char *filename,
  123. enum map_type type, struct thread *thread)
  124. {
  125. struct map *map = malloc(sizeof(*map));
  126. if (map != NULL) {
  127. char newfilename[PATH_MAX];
  128. struct dso *dso;
  129. int anon, no_dso, vdso, android;
  130. android = is_android_lib(filename);
  131. anon = is_anon_memory(filename);
  132. vdso = is_vdso_map(filename);
  133. no_dso = is_no_dso_memory(filename);
  134. map->maj = d_maj;
  135. map->min = d_min;
  136. map->ino = ino;
  137. map->ino_generation = ino_gen;
  138. map->prot = prot;
  139. map->flags = flags;
  140. if ((anon || no_dso) && type == MAP__FUNCTION) {
  141. snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
  142. filename = newfilename;
  143. }
  144. if (android) {
  145. if (replace_android_lib(filename, newfilename))
  146. filename = newfilename;
  147. }
  148. if (vdso) {
  149. pgoff = 0;
  150. dso = machine__findnew_vdso(machine, thread);
  151. } else
  152. dso = machine__findnew_dso(machine, filename);
  153. if (dso == NULL)
  154. goto out_delete;
  155. map__init(map, type, start, start + len, pgoff, dso);
  156. if (anon || no_dso) {
  157. map->map_ip = map->unmap_ip = identity__map_ip;
  158. /*
  159. * Set memory without DSO as loaded. All map__find_*
  160. * functions still return NULL, and we avoid the
  161. * unnecessary map__load warning.
  162. */
  163. if (type != MAP__FUNCTION)
  164. dso__set_loaded(dso, map->type);
  165. }
  166. dso__put(dso);
  167. }
  168. return map;
  169. out_delete:
  170. free(map);
  171. return NULL;
  172. }
  173. /*
  174. * Constructor variant for modules (where we know from /proc/modules where
  175. * they are loaded) and for vmlinux, where only after we load all the
  176. * symbols we'll know where it starts and ends.
  177. */
  178. struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
  179. {
  180. struct map *map = calloc(1, (sizeof(*map) +
  181. (dso->kernel ? sizeof(struct kmap) : 0)));
  182. if (map != NULL) {
  183. /*
  184. * ->end will be filled after we load all the symbols
  185. */
  186. map__init(map, type, start, 0, 0, dso);
  187. }
  188. return map;
  189. }
  190. /*
  191. * Use this and __map__is_kmodule() for map instances that are in
  192. * machine->kmaps, and thus have map->groups->machine all properly set, to
  193. * disambiguate between the kernel and modules.
  194. *
  195. * When the need arises, introduce map__is_{kernel,kmodule)() that
  196. * checks (map->groups != NULL && map->groups->machine != NULL &&
  197. * map->dso->kernel) before calling __map__is_{kernel,kmodule}())
  198. */
  199. bool __map__is_kernel(const struct map *map)
  200. {
  201. return __machine__kernel_map(map->groups->machine, map->type) == map;
  202. }
  203. static void map__exit(struct map *map)
  204. {
  205. BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
  206. dso__zput(map->dso);
  207. }
  208. void map__delete(struct map *map)
  209. {
  210. map__exit(map);
  211. free(map);
  212. }
  213. void map__put(struct map *map)
  214. {
  215. if (map && atomic_dec_and_test(&map->refcnt))
  216. map__delete(map);
  217. }
  218. void map__fixup_start(struct map *map)
  219. {
  220. struct rb_root *symbols = &map->dso->symbols[map->type];
  221. struct rb_node *nd = rb_first(symbols);
  222. if (nd != NULL) {
  223. struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
  224. map->start = sym->start;
  225. }
  226. }
  227. void map__fixup_end(struct map *map)
  228. {
  229. struct rb_root *symbols = &map->dso->symbols[map->type];
  230. struct rb_node *nd = rb_last(symbols);
  231. if (nd != NULL) {
  232. struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
  233. map->end = sym->end;
  234. }
  235. }
  236. #define DSO__DELETED "(deleted)"
  237. int map__load(struct map *map, symbol_filter_t filter)
  238. {
  239. const char *name = map->dso->long_name;
  240. int nr;
  241. if (dso__loaded(map->dso, map->type))
  242. return 0;
  243. nr = dso__load(map->dso, map, filter);
  244. if (nr < 0) {
  245. if (map->dso->has_build_id) {
  246. char sbuild_id[SBUILD_ID_SIZE];
  247. build_id__sprintf(map->dso->build_id,
  248. sizeof(map->dso->build_id),
  249. sbuild_id);
  250. pr_warning("%s with build id %s not found",
  251. name, sbuild_id);
  252. } else
  253. pr_warning("Failed to open %s", name);
  254. pr_warning(", continuing without symbols\n");
  255. return -1;
  256. } else if (nr == 0) {
  257. #ifdef HAVE_LIBELF_SUPPORT
  258. const size_t len = strlen(name);
  259. const size_t real_len = len - sizeof(DSO__DELETED);
  260. if (len > sizeof(DSO__DELETED) &&
  261. strcmp(name + real_len + 1, DSO__DELETED) == 0) {
  262. pr_warning("%.*s was updated (is prelink enabled?). "
  263. "Restart the long running apps that use it!\n",
  264. (int)real_len, name);
  265. } else if (filter) {
  266. pr_warning("no symbols passed the given filter.\n");
  267. return -2; /* Empty but maybe by the filter */
  268. } else {
  269. pr_warning("no symbols found in %s, maybe install "
  270. "a debug package?\n", name);
  271. }
  272. #endif
  273. return -1;
  274. }
  275. return 0;
  276. }
  277. int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
  278. {
  279. return strcmp(namea, nameb);
  280. }
  281. struct symbol *map__find_symbol(struct map *map, u64 addr,
  282. symbol_filter_t filter)
  283. {
  284. if (map__load(map, filter) < 0)
  285. return NULL;
  286. return dso__find_symbol(map->dso, map->type, addr);
  287. }
  288. struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
  289. symbol_filter_t filter)
  290. {
  291. if (map__load(map, filter) < 0)
  292. return NULL;
  293. if (!dso__sorted_by_name(map->dso, map->type))
  294. dso__sort_by_name(map->dso, map->type);
  295. return dso__find_symbol_by_name(map->dso, map->type, name);
  296. }
  297. struct map *map__clone(struct map *from)
  298. {
  299. struct map *map = memdup(from, sizeof(*map));
  300. if (map != NULL) {
  301. atomic_set(&map->refcnt, 1);
  302. RB_CLEAR_NODE(&map->rb_node);
  303. dso__get(map->dso);
  304. map->groups = NULL;
  305. }
  306. return map;
  307. }
  308. int map__overlap(struct map *l, struct map *r)
  309. {
  310. if (l->start > r->start) {
  311. struct map *t = l;
  312. l = r;
  313. r = t;
  314. }
  315. if (l->end > r->start)
  316. return 1;
  317. return 0;
  318. }
  319. size_t map__fprintf(struct map *map, FILE *fp)
  320. {
  321. return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
  322. map->start, map->end, map->pgoff, map->dso->name);
  323. }
  324. size_t map__fprintf_dsoname(struct map *map, FILE *fp)
  325. {
  326. const char *dsoname = "[unknown]";
  327. if (map && map->dso && (map->dso->name || map->dso->long_name)) {
  328. if (symbol_conf.show_kernel_path && map->dso->long_name)
  329. dsoname = map->dso->long_name;
  330. else if (map->dso->name)
  331. dsoname = map->dso->name;
  332. }
  333. return fprintf(fp, "%s", dsoname);
  334. }
  335. int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
  336. FILE *fp)
  337. {
  338. char *srcline;
  339. int ret = 0;
  340. if (map && map->dso) {
  341. srcline = get_srcline(map->dso,
  342. map__rip_2objdump(map, addr), NULL, true);
  343. if (srcline != SRCLINE_UNKNOWN)
  344. ret = fprintf(fp, "%s%s", prefix, srcline);
  345. free_srcline(srcline);
  346. }
  347. return ret;
  348. }
  349. /**
  350. * map__rip_2objdump - convert symbol start address to objdump address.
  351. * @map: memory map
  352. * @rip: symbol start address
  353. *
  354. * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
  355. * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
  356. * relative to section start.
  357. *
  358. * Return: Address suitable for passing to "objdump --start-address="
  359. */
  360. u64 map__rip_2objdump(struct map *map, u64 rip)
  361. {
  362. if (!map->dso->adjust_symbols)
  363. return rip;
  364. if (map->dso->rel)
  365. return rip - map->pgoff;
  366. /*
  367. * kernel modules also have DSO_TYPE_USER in dso->kernel,
  368. * but all kernel modules are ET_REL, so won't get here.
  369. */
  370. if (map->dso->kernel == DSO_TYPE_USER)
  371. return rip + map->dso->text_offset;
  372. return map->unmap_ip(map, rip) - map->reloc;
  373. }
  374. /**
  375. * map__objdump_2mem - convert objdump address to a memory address.
  376. * @map: memory map
  377. * @ip: objdump address
  378. *
  379. * Closely related to map__rip_2objdump(), this function takes an address from
  380. * objdump and converts it to a memory address. Note this assumes that @map
  381. * contains the address. To be sure the result is valid, check it forwards
  382. * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
  383. *
  384. * Return: Memory address.
  385. */
  386. u64 map__objdump_2mem(struct map *map, u64 ip)
  387. {
  388. if (!map->dso->adjust_symbols)
  389. return map->unmap_ip(map, ip);
  390. if (map->dso->rel)
  391. return map->unmap_ip(map, ip + map->pgoff);
  392. /*
  393. * kernel modules also have DSO_TYPE_USER in dso->kernel,
  394. * but all kernel modules are ET_REL, so won't get here.
  395. */
  396. if (map->dso->kernel == DSO_TYPE_USER)
  397. return map->unmap_ip(map, ip - map->dso->text_offset);
  398. return ip + map->reloc;
  399. }
  400. static void maps__init(struct maps *maps)
  401. {
  402. maps->entries = RB_ROOT;
  403. pthread_rwlock_init(&maps->lock, NULL);
  404. }
  405. void map_groups__init(struct map_groups *mg, struct machine *machine)
  406. {
  407. int i;
  408. for (i = 0; i < MAP__NR_TYPES; ++i) {
  409. maps__init(&mg->maps[i]);
  410. }
  411. mg->machine = machine;
  412. atomic_set(&mg->refcnt, 1);
  413. }
  414. static void __maps__purge(struct maps *maps)
  415. {
  416. struct rb_root *root = &maps->entries;
  417. struct rb_node *next = rb_first(root);
  418. while (next) {
  419. struct map *pos = rb_entry(next, struct map, rb_node);
  420. next = rb_next(&pos->rb_node);
  421. rb_erase_init(&pos->rb_node, root);
  422. map__put(pos);
  423. }
  424. }
  425. static void maps__exit(struct maps *maps)
  426. {
  427. pthread_rwlock_wrlock(&maps->lock);
  428. __maps__purge(maps);
  429. pthread_rwlock_unlock(&maps->lock);
  430. }
  431. void map_groups__exit(struct map_groups *mg)
  432. {
  433. int i;
  434. for (i = 0; i < MAP__NR_TYPES; ++i)
  435. maps__exit(&mg->maps[i]);
  436. }
  437. bool map_groups__empty(struct map_groups *mg)
  438. {
  439. int i;
  440. for (i = 0; i < MAP__NR_TYPES; ++i) {
  441. if (maps__first(&mg->maps[i]))
  442. return false;
  443. }
  444. return true;
  445. }
  446. struct map_groups *map_groups__new(struct machine *machine)
  447. {
  448. struct map_groups *mg = malloc(sizeof(*mg));
  449. if (mg != NULL)
  450. map_groups__init(mg, machine);
  451. return mg;
  452. }
  453. void map_groups__delete(struct map_groups *mg)
  454. {
  455. map_groups__exit(mg);
  456. free(mg);
  457. }
  458. void map_groups__put(struct map_groups *mg)
  459. {
  460. if (mg && atomic_dec_and_test(&mg->refcnt))
  461. map_groups__delete(mg);
  462. }
  463. struct symbol *map_groups__find_symbol(struct map_groups *mg,
  464. enum map_type type, u64 addr,
  465. struct map **mapp,
  466. symbol_filter_t filter)
  467. {
  468. struct map *map = map_groups__find(mg, type, addr);
  469. /* Ensure map is loaded before using map->map_ip */
  470. if (map != NULL && map__load(map, filter) >= 0) {
  471. if (mapp != NULL)
  472. *mapp = map;
  473. return map__find_symbol(map, map->map_ip(map, addr), filter);
  474. }
  475. return NULL;
  476. }
  477. struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
  478. struct map **mapp, symbol_filter_t filter)
  479. {
  480. struct symbol *sym;
  481. struct rb_node *nd;
  482. pthread_rwlock_rdlock(&maps->lock);
  483. for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
  484. struct map *pos = rb_entry(nd, struct map, rb_node);
  485. sym = map__find_symbol_by_name(pos, name, filter);
  486. if (sym == NULL)
  487. continue;
  488. if (mapp != NULL)
  489. *mapp = pos;
  490. goto out;
  491. }
  492. sym = NULL;
  493. out:
  494. pthread_rwlock_unlock(&maps->lock);
  495. return sym;
  496. }
  497. struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
  498. enum map_type type,
  499. const char *name,
  500. struct map **mapp,
  501. symbol_filter_t filter)
  502. {
  503. struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp, filter);
  504. return sym;
  505. }
  506. int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
  507. {
  508. if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
  509. if (ams->map->groups == NULL)
  510. return -1;
  511. ams->map = map_groups__find(ams->map->groups, ams->map->type,
  512. ams->addr);
  513. if (ams->map == NULL)
  514. return -1;
  515. }
  516. ams->al_addr = ams->map->map_ip(ams->map, ams->addr);
  517. ams->sym = map__find_symbol(ams->map, ams->al_addr, filter);
  518. return ams->sym ? 0 : -1;
  519. }
  520. static size_t maps__fprintf(struct maps *maps, FILE *fp)
  521. {
  522. size_t printed = 0;
  523. struct rb_node *nd;
  524. pthread_rwlock_rdlock(&maps->lock);
  525. for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
  526. struct map *pos = rb_entry(nd, struct map, rb_node);
  527. printed += fprintf(fp, "Map:");
  528. printed += map__fprintf(pos, fp);
  529. if (verbose > 2) {
  530. printed += dso__fprintf(pos->dso, pos->type, fp);
  531. printed += fprintf(fp, "--\n");
  532. }
  533. }
  534. pthread_rwlock_unlock(&maps->lock);
  535. return printed;
  536. }
  537. size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
  538. FILE *fp)
  539. {
  540. size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
  541. return printed += maps__fprintf(&mg->maps[type], fp);
  542. }
  543. size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
  544. {
  545. size_t printed = 0, i;
  546. for (i = 0; i < MAP__NR_TYPES; ++i)
  547. printed += __map_groups__fprintf_maps(mg, i, fp);
  548. return printed;
  549. }
  550. static void __map_groups__insert(struct map_groups *mg, struct map *map)
  551. {
  552. __maps__insert(&mg->maps[map->type], map);
  553. map->groups = mg;
  554. }
  555. static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
  556. {
  557. struct rb_root *root;
  558. struct rb_node *next;
  559. int err = 0;
  560. pthread_rwlock_wrlock(&maps->lock);
  561. root = &maps->entries;
  562. next = rb_first(root);
  563. while (next) {
  564. struct map *pos = rb_entry(next, struct map, rb_node);
  565. next = rb_next(&pos->rb_node);
  566. if (!map__overlap(pos, map))
  567. continue;
  568. if (verbose >= 2) {
  569. fputs("overlapping maps:\n", fp);
  570. map__fprintf(map, fp);
  571. map__fprintf(pos, fp);
  572. }
  573. rb_erase_init(&pos->rb_node, root);
  574. /*
  575. * Now check if we need to create new maps for areas not
  576. * overlapped by the new map:
  577. */
  578. if (map->start > pos->start) {
  579. struct map *before = map__clone(pos);
  580. if (before == NULL) {
  581. err = -ENOMEM;
  582. goto put_map;
  583. }
  584. before->end = map->start;
  585. __map_groups__insert(pos->groups, before);
  586. if (verbose >= 2)
  587. map__fprintf(before, fp);
  588. map__put(before);
  589. }
  590. if (map->end < pos->end) {
  591. struct map *after = map__clone(pos);
  592. if (after == NULL) {
  593. err = -ENOMEM;
  594. goto put_map;
  595. }
  596. after->start = map->end;
  597. __map_groups__insert(pos->groups, after);
  598. if (verbose >= 2)
  599. map__fprintf(after, fp);
  600. map__put(after);
  601. }
  602. put_map:
  603. map__put(pos);
  604. if (err)
  605. goto out;
  606. }
  607. err = 0;
  608. out:
  609. pthread_rwlock_unlock(&maps->lock);
  610. return err;
  611. }
  612. int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
  613. FILE *fp)
  614. {
  615. return maps__fixup_overlappings(&mg->maps[map->type], map, fp);
  616. }
  617. /*
  618. * XXX This should not really _copy_ te maps, but refcount them.
  619. */
  620. int map_groups__clone(struct thread *thread,
  621. struct map_groups *parent, enum map_type type)
  622. {
  623. struct map_groups *mg = thread->mg;
  624. int err = -ENOMEM;
  625. struct map *map;
  626. struct maps *maps = &parent->maps[type];
  627. pthread_rwlock_rdlock(&maps->lock);
  628. for (map = maps__first(maps); map; map = map__next(map)) {
  629. struct map *new = map__clone(map);
  630. if (new == NULL)
  631. goto out_unlock;
  632. err = unwind__prepare_access(thread, new, NULL);
  633. if (err)
  634. goto out_unlock;
  635. map_groups__insert(mg, new);
  636. map__put(new);
  637. }
  638. err = 0;
  639. out_unlock:
  640. pthread_rwlock_unlock(&maps->lock);
  641. return err;
  642. }
  643. static void __maps__insert(struct maps *maps, struct map *map)
  644. {
  645. struct rb_node **p = &maps->entries.rb_node;
  646. struct rb_node *parent = NULL;
  647. const u64 ip = map->start;
  648. struct map *m;
  649. while (*p != NULL) {
  650. parent = *p;
  651. m = rb_entry(parent, struct map, rb_node);
  652. if (ip < m->start)
  653. p = &(*p)->rb_left;
  654. else
  655. p = &(*p)->rb_right;
  656. }
  657. rb_link_node(&map->rb_node, parent, p);
  658. rb_insert_color(&map->rb_node, &maps->entries);
  659. map__get(map);
  660. }
  661. void maps__insert(struct maps *maps, struct map *map)
  662. {
  663. pthread_rwlock_wrlock(&maps->lock);
  664. __maps__insert(maps, map);
  665. pthread_rwlock_unlock(&maps->lock);
  666. }
  667. static void __maps__remove(struct maps *maps, struct map *map)
  668. {
  669. rb_erase_init(&map->rb_node, &maps->entries);
  670. map__put(map);
  671. }
  672. void maps__remove(struct maps *maps, struct map *map)
  673. {
  674. pthread_rwlock_wrlock(&maps->lock);
  675. __maps__remove(maps, map);
  676. pthread_rwlock_unlock(&maps->lock);
  677. }
  678. struct map *maps__find(struct maps *maps, u64 ip)
  679. {
  680. struct rb_node **p, *parent = NULL;
  681. struct map *m;
  682. pthread_rwlock_rdlock(&maps->lock);
  683. p = &maps->entries.rb_node;
  684. while (*p != NULL) {
  685. parent = *p;
  686. m = rb_entry(parent, struct map, rb_node);
  687. if (ip < m->start)
  688. p = &(*p)->rb_left;
  689. else if (ip >= m->end)
  690. p = &(*p)->rb_right;
  691. else
  692. goto out;
  693. }
  694. m = NULL;
  695. out:
  696. pthread_rwlock_unlock(&maps->lock);
  697. return m;
  698. }
  699. struct map *maps__first(struct maps *maps)
  700. {
  701. struct rb_node *first = rb_first(&maps->entries);
  702. if (first)
  703. return rb_entry(first, struct map, rb_node);
  704. return NULL;
  705. }
  706. struct map *map__next(struct map *map)
  707. {
  708. struct rb_node *next = rb_next(&map->rb_node);
  709. if (next)
  710. return rb_entry(next, struct map, rb_node);
  711. return NULL;
  712. }
  713. struct kmap *map__kmap(struct map *map)
  714. {
  715. if (!map->dso || !map->dso->kernel) {
  716. pr_err("Internal error: map__kmap with a non-kernel map\n");
  717. return NULL;
  718. }
  719. return (struct kmap *)(map + 1);
  720. }
  721. struct map_groups *map__kmaps(struct map *map)
  722. {
  723. struct kmap *kmap = map__kmap(map);
  724. if (!kmap || !kmap->kmaps) {
  725. pr_err("Internal error: map__kmaps with a non-kernel map\n");
  726. return NULL;
  727. }
  728. return kmap->kmaps;
  729. }