dso.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374
  1. #include <asm/bug.h>
  2. #include <sys/time.h>
  3. #include <sys/resource.h>
  4. #include "symbol.h"
  5. #include "dso.h"
  6. #include "machine.h"
  7. #include "auxtrace.h"
  8. #include "util.h"
  9. #include "debug.h"
  10. char dso__symtab_origin(const struct dso *dso)
  11. {
  12. static const char origin[] = {
  13. [DSO_BINARY_TYPE__KALLSYMS] = 'k',
  14. [DSO_BINARY_TYPE__VMLINUX] = 'v',
  15. [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
  16. [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
  17. [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
  18. [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
  19. [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
  20. [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
  21. [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
  22. [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
  23. [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
  24. [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
  25. [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
  26. [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
  27. [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
  28. [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
  29. };
  30. if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
  31. return '!';
  32. return origin[dso->symtab_type];
  33. }
  34. int dso__read_binary_type_filename(const struct dso *dso,
  35. enum dso_binary_type type,
  36. char *root_dir, char *filename, size_t size)
  37. {
  38. char build_id_hex[BUILD_ID_SIZE * 2 + 1];
  39. int ret = 0;
  40. size_t len;
  41. switch (type) {
  42. case DSO_BINARY_TYPE__DEBUGLINK: {
  43. char *debuglink;
  44. len = __symbol__join_symfs(filename, size, dso->long_name);
  45. debuglink = filename + len;
  46. while (debuglink != filename && *debuglink != '/')
  47. debuglink--;
  48. if (*debuglink == '/')
  49. debuglink++;
  50. ret = -1;
  51. if (!is_regular_file(filename))
  52. break;
  53. ret = filename__read_debuglink(filename, debuglink,
  54. size - (debuglink - filename));
  55. }
  56. break;
  57. case DSO_BINARY_TYPE__BUILD_ID_CACHE:
  58. /* skip the locally configured cache if a symfs is given */
  59. if (symbol_conf.symfs[0] ||
  60. (dso__build_id_filename(dso, filename, size) == NULL))
  61. ret = -1;
  62. break;
  63. case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
  64. len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
  65. snprintf(filename + len, size - len, "%s.debug", dso->long_name);
  66. break;
  67. case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
  68. len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
  69. snprintf(filename + len, size - len, "%s", dso->long_name);
  70. break;
  71. case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
  72. {
  73. const char *last_slash;
  74. size_t dir_size;
  75. last_slash = dso->long_name + dso->long_name_len;
  76. while (last_slash != dso->long_name && *last_slash != '/')
  77. last_slash--;
  78. len = __symbol__join_symfs(filename, size, "");
  79. dir_size = last_slash - dso->long_name + 2;
  80. if (dir_size > (size - len)) {
  81. ret = -1;
  82. break;
  83. }
  84. len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
  85. len += scnprintf(filename + len , size - len, ".debug%s",
  86. last_slash);
  87. break;
  88. }
  89. case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
  90. if (!dso->has_build_id) {
  91. ret = -1;
  92. break;
  93. }
  94. build_id__sprintf(dso->build_id,
  95. sizeof(dso->build_id),
  96. build_id_hex);
  97. len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
  98. snprintf(filename + len, size - len, "%.2s/%s.debug",
  99. build_id_hex, build_id_hex + 2);
  100. break;
  101. case DSO_BINARY_TYPE__VMLINUX:
  102. case DSO_BINARY_TYPE__GUEST_VMLINUX:
  103. case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
  104. __symbol__join_symfs(filename, size, dso->long_name);
  105. break;
  106. case DSO_BINARY_TYPE__GUEST_KMODULE:
  107. case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
  108. path__join3(filename, size, symbol_conf.symfs,
  109. root_dir, dso->long_name);
  110. break;
  111. case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
  112. case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
  113. __symbol__join_symfs(filename, size, dso->long_name);
  114. break;
  115. case DSO_BINARY_TYPE__KCORE:
  116. case DSO_BINARY_TYPE__GUEST_KCORE:
  117. snprintf(filename, size, "%s", dso->long_name);
  118. break;
  119. default:
  120. case DSO_BINARY_TYPE__KALLSYMS:
  121. case DSO_BINARY_TYPE__GUEST_KALLSYMS:
  122. case DSO_BINARY_TYPE__JAVA_JIT:
  123. case DSO_BINARY_TYPE__NOT_FOUND:
  124. ret = -1;
  125. break;
  126. }
  127. return ret;
  128. }
  129. static const struct {
  130. const char *fmt;
  131. int (*decompress)(const char *input, int output);
  132. } compressions[] = {
  133. #ifdef HAVE_ZLIB_SUPPORT
  134. { "gz", gzip_decompress_to_file },
  135. #endif
  136. #ifdef HAVE_LZMA_SUPPORT
  137. { "xz", lzma_decompress_to_file },
  138. #endif
  139. { NULL, NULL },
  140. };
  141. bool is_supported_compression(const char *ext)
  142. {
  143. unsigned i;
  144. for (i = 0; compressions[i].fmt; i++) {
  145. if (!strcmp(ext, compressions[i].fmt))
  146. return true;
  147. }
  148. return false;
  149. }
  150. bool is_kernel_module(const char *pathname, int cpumode)
  151. {
  152. struct kmod_path m;
  153. int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
  154. WARN_ONCE(mode != cpumode,
  155. "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
  156. cpumode);
  157. switch (mode) {
  158. case PERF_RECORD_MISC_USER:
  159. case PERF_RECORD_MISC_HYPERVISOR:
  160. case PERF_RECORD_MISC_GUEST_USER:
  161. return false;
  162. /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
  163. default:
  164. if (kmod_path__parse(&m, pathname)) {
  165. pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
  166. pathname);
  167. return true;
  168. }
  169. }
  170. return m.kmod;
  171. }
  172. bool decompress_to_file(const char *ext, const char *filename, int output_fd)
  173. {
  174. unsigned i;
  175. for (i = 0; compressions[i].fmt; i++) {
  176. if (!strcmp(ext, compressions[i].fmt))
  177. return !compressions[i].decompress(filename,
  178. output_fd);
  179. }
  180. return false;
  181. }
  182. bool dso__needs_decompress(struct dso *dso)
  183. {
  184. return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
  185. dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
  186. }
  187. /*
  188. * Parses kernel module specified in @path and updates
  189. * @m argument like:
  190. *
  191. * @comp - true if @path contains supported compression suffix,
  192. * false otherwise
  193. * @kmod - true if @path contains '.ko' suffix in right position,
  194. * false otherwise
  195. * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
  196. * of the kernel module without suffixes, otherwise strudup-ed
  197. * base name of @path
  198. * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
  199. * the compression suffix
  200. *
  201. * Returns 0 if there's no strdup error, -ENOMEM otherwise.
  202. */
  203. int __kmod_path__parse(struct kmod_path *m, const char *path,
  204. bool alloc_name, bool alloc_ext)
  205. {
  206. const char *name = strrchr(path, '/');
  207. const char *ext = strrchr(path, '.');
  208. bool is_simple_name = false;
  209. memset(m, 0x0, sizeof(*m));
  210. name = name ? name + 1 : path;
  211. /*
  212. * '.' is also a valid character for module name. For example:
  213. * [aaa.bbb] is a valid module name. '[' should have higher
  214. * priority than '.ko' suffix.
  215. *
  216. * The kernel names are from machine__mmap_name. Such
  217. * name should belong to kernel itself, not kernel module.
  218. */
  219. if (name[0] == '[') {
  220. is_simple_name = true;
  221. if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
  222. (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
  223. (strncmp(name, "[vdso]", 6) == 0) ||
  224. (strncmp(name, "[vsyscall]", 10) == 0)) {
  225. m->kmod = false;
  226. } else
  227. m->kmod = true;
  228. }
  229. /* No extension, just return name. */
  230. if ((ext == NULL) || is_simple_name) {
  231. if (alloc_name) {
  232. m->name = strdup(name);
  233. return m->name ? 0 : -ENOMEM;
  234. }
  235. return 0;
  236. }
  237. if (is_supported_compression(ext + 1)) {
  238. m->comp = true;
  239. ext -= 3;
  240. }
  241. /* Check .ko extension only if there's enough name left. */
  242. if (ext > name)
  243. m->kmod = !strncmp(ext, ".ko", 3);
  244. if (alloc_name) {
  245. if (m->kmod) {
  246. if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
  247. return -ENOMEM;
  248. } else {
  249. if (asprintf(&m->name, "%s", name) == -1)
  250. return -ENOMEM;
  251. }
  252. strxfrchar(m->name, '-', '_');
  253. }
  254. if (alloc_ext && m->comp) {
  255. m->ext = strdup(ext + 4);
  256. if (!m->ext) {
  257. free((void *) m->name);
  258. return -ENOMEM;
  259. }
  260. }
  261. return 0;
  262. }
  263. /*
  264. * Global list of open DSOs and the counter.
  265. */
  266. static LIST_HEAD(dso__data_open);
  267. static long dso__data_open_cnt;
  268. static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
  269. static void dso__list_add(struct dso *dso)
  270. {
  271. list_add_tail(&dso->data.open_entry, &dso__data_open);
  272. dso__data_open_cnt++;
  273. }
  274. static void dso__list_del(struct dso *dso)
  275. {
  276. list_del(&dso->data.open_entry);
  277. WARN_ONCE(dso__data_open_cnt <= 0,
  278. "DSO data fd counter out of bounds.");
  279. dso__data_open_cnt--;
  280. }
  281. static void close_first_dso(void);
  282. static int do_open(char *name)
  283. {
  284. int fd;
  285. char sbuf[STRERR_BUFSIZE];
  286. do {
  287. fd = open(name, O_RDONLY);
  288. if (fd >= 0)
  289. return fd;
  290. pr_debug("dso open failed: %s\n",
  291. strerror_r(errno, sbuf, sizeof(sbuf)));
  292. if (!dso__data_open_cnt || errno != EMFILE)
  293. break;
  294. close_first_dso();
  295. } while (1);
  296. return -1;
  297. }
  298. static int __open_dso(struct dso *dso, struct machine *machine)
  299. {
  300. int fd;
  301. char *root_dir = (char *)"";
  302. char *name = malloc(PATH_MAX);
  303. if (!name)
  304. return -ENOMEM;
  305. if (machine)
  306. root_dir = machine->root_dir;
  307. if (dso__read_binary_type_filename(dso, dso->binary_type,
  308. root_dir, name, PATH_MAX)) {
  309. free(name);
  310. return -EINVAL;
  311. }
  312. fd = do_open(name);
  313. free(name);
  314. return fd;
  315. }
  316. static void check_data_close(void);
  317. /**
  318. * dso_close - Open DSO data file
  319. * @dso: dso object
  320. *
  321. * Open @dso's data file descriptor and updates
  322. * list/count of open DSO objects.
  323. */
  324. static int open_dso(struct dso *dso, struct machine *machine)
  325. {
  326. int fd = __open_dso(dso, machine);
  327. if (fd >= 0) {
  328. dso__list_add(dso);
  329. /*
  330. * Check if we crossed the allowed number
  331. * of opened DSOs and close one if needed.
  332. */
  333. check_data_close();
  334. }
  335. return fd;
  336. }
  337. static void close_data_fd(struct dso *dso)
  338. {
  339. if (dso->data.fd >= 0) {
  340. close(dso->data.fd);
  341. dso->data.fd = -1;
  342. dso->data.file_size = 0;
  343. dso__list_del(dso);
  344. }
  345. }
  346. /**
  347. * dso_close - Close DSO data file
  348. * @dso: dso object
  349. *
  350. * Close @dso's data file descriptor and updates
  351. * list/count of open DSO objects.
  352. */
  353. static void close_dso(struct dso *dso)
  354. {
  355. close_data_fd(dso);
  356. }
  357. static void close_first_dso(void)
  358. {
  359. struct dso *dso;
  360. dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
  361. close_dso(dso);
  362. }
  363. static rlim_t get_fd_limit(void)
  364. {
  365. struct rlimit l;
  366. rlim_t limit = 0;
  367. /* Allow half of the current open fd limit. */
  368. if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
  369. if (l.rlim_cur == RLIM_INFINITY)
  370. limit = l.rlim_cur;
  371. else
  372. limit = l.rlim_cur / 2;
  373. } else {
  374. pr_err("failed to get fd limit\n");
  375. limit = 1;
  376. }
  377. return limit;
  378. }
  379. static bool may_cache_fd(void)
  380. {
  381. static rlim_t limit;
  382. if (!limit)
  383. limit = get_fd_limit();
  384. if (limit == RLIM_INFINITY)
  385. return true;
  386. return limit > (rlim_t) dso__data_open_cnt;
  387. }
  388. /*
  389. * Check and close LRU dso if we crossed allowed limit
  390. * for opened dso file descriptors. The limit is half
  391. * of the RLIMIT_NOFILE files opened.
  392. */
  393. static void check_data_close(void)
  394. {
  395. bool cache_fd = may_cache_fd();
  396. if (!cache_fd)
  397. close_first_dso();
  398. }
  399. /**
  400. * dso__data_close - Close DSO data file
  401. * @dso: dso object
  402. *
  403. * External interface to close @dso's data file descriptor.
  404. */
  405. void dso__data_close(struct dso *dso)
  406. {
  407. pthread_mutex_lock(&dso__data_open_lock);
  408. close_dso(dso);
  409. pthread_mutex_unlock(&dso__data_open_lock);
  410. }
  411. static void try_to_open_dso(struct dso *dso, struct machine *machine)
  412. {
  413. enum dso_binary_type binary_type_data[] = {
  414. DSO_BINARY_TYPE__BUILD_ID_CACHE,
  415. DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
  416. DSO_BINARY_TYPE__NOT_FOUND,
  417. };
  418. int i = 0;
  419. if (dso->data.fd >= 0)
  420. return;
  421. if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
  422. dso->data.fd = open_dso(dso, machine);
  423. goto out;
  424. }
  425. do {
  426. dso->binary_type = binary_type_data[i++];
  427. dso->data.fd = open_dso(dso, machine);
  428. if (dso->data.fd >= 0)
  429. goto out;
  430. } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
  431. out:
  432. if (dso->data.fd >= 0)
  433. dso->data.status = DSO_DATA_STATUS_OK;
  434. else
  435. dso->data.status = DSO_DATA_STATUS_ERROR;
  436. }
  437. /**
  438. * dso__data_get_fd - Get dso's data file descriptor
  439. * @dso: dso object
  440. * @machine: machine object
  441. *
  442. * External interface to find dso's file, open it and
  443. * returns file descriptor. It should be paired with
  444. * dso__data_put_fd() if it returns non-negative value.
  445. */
  446. int dso__data_get_fd(struct dso *dso, struct machine *machine)
  447. {
  448. if (dso->data.status == DSO_DATA_STATUS_ERROR)
  449. return -1;
  450. if (pthread_mutex_lock(&dso__data_open_lock) < 0)
  451. return -1;
  452. try_to_open_dso(dso, machine);
  453. if (dso->data.fd < 0)
  454. pthread_mutex_unlock(&dso__data_open_lock);
  455. return dso->data.fd;
  456. }
  457. void dso__data_put_fd(struct dso *dso __maybe_unused)
  458. {
  459. pthread_mutex_unlock(&dso__data_open_lock);
  460. }
  461. bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
  462. {
  463. u32 flag = 1 << by;
  464. if (dso->data.status_seen & flag)
  465. return true;
  466. dso->data.status_seen |= flag;
  467. return false;
  468. }
  469. static void
  470. dso_cache__free(struct dso *dso)
  471. {
  472. struct rb_root *root = &dso->data.cache;
  473. struct rb_node *next = rb_first(root);
  474. pthread_mutex_lock(&dso->lock);
  475. while (next) {
  476. struct dso_cache *cache;
  477. cache = rb_entry(next, struct dso_cache, rb_node);
  478. next = rb_next(&cache->rb_node);
  479. rb_erase(&cache->rb_node, root);
  480. free(cache);
  481. }
  482. pthread_mutex_unlock(&dso->lock);
  483. }
  484. static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
  485. {
  486. const struct rb_root *root = &dso->data.cache;
  487. struct rb_node * const *p = &root->rb_node;
  488. const struct rb_node *parent = NULL;
  489. struct dso_cache *cache;
  490. while (*p != NULL) {
  491. u64 end;
  492. parent = *p;
  493. cache = rb_entry(parent, struct dso_cache, rb_node);
  494. end = cache->offset + DSO__DATA_CACHE_SIZE;
  495. if (offset < cache->offset)
  496. p = &(*p)->rb_left;
  497. else if (offset >= end)
  498. p = &(*p)->rb_right;
  499. else
  500. return cache;
  501. }
  502. return NULL;
  503. }
  504. static struct dso_cache *
  505. dso_cache__insert(struct dso *dso, struct dso_cache *new)
  506. {
  507. struct rb_root *root = &dso->data.cache;
  508. struct rb_node **p = &root->rb_node;
  509. struct rb_node *parent = NULL;
  510. struct dso_cache *cache;
  511. u64 offset = new->offset;
  512. pthread_mutex_lock(&dso->lock);
  513. while (*p != NULL) {
  514. u64 end;
  515. parent = *p;
  516. cache = rb_entry(parent, struct dso_cache, rb_node);
  517. end = cache->offset + DSO__DATA_CACHE_SIZE;
  518. if (offset < cache->offset)
  519. p = &(*p)->rb_left;
  520. else if (offset >= end)
  521. p = &(*p)->rb_right;
  522. else
  523. goto out;
  524. }
  525. rb_link_node(&new->rb_node, parent, p);
  526. rb_insert_color(&new->rb_node, root);
  527. cache = NULL;
  528. out:
  529. pthread_mutex_unlock(&dso->lock);
  530. return cache;
  531. }
  532. static ssize_t
  533. dso_cache__memcpy(struct dso_cache *cache, u64 offset,
  534. u8 *data, u64 size)
  535. {
  536. u64 cache_offset = offset - cache->offset;
  537. u64 cache_size = min(cache->size - cache_offset, size);
  538. memcpy(data, cache->data + cache_offset, cache_size);
  539. return cache_size;
  540. }
  541. static ssize_t
  542. dso_cache__read(struct dso *dso, struct machine *machine,
  543. u64 offset, u8 *data, ssize_t size)
  544. {
  545. struct dso_cache *cache;
  546. struct dso_cache *old;
  547. ssize_t ret;
  548. do {
  549. u64 cache_offset;
  550. cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
  551. if (!cache)
  552. return -ENOMEM;
  553. pthread_mutex_lock(&dso__data_open_lock);
  554. /*
  555. * dso->data.fd might be closed if other thread opened another
  556. * file (dso) due to open file limit (RLIMIT_NOFILE).
  557. */
  558. try_to_open_dso(dso, machine);
  559. if (dso->data.fd < 0) {
  560. ret = -errno;
  561. dso->data.status = DSO_DATA_STATUS_ERROR;
  562. break;
  563. }
  564. cache_offset = offset & DSO__DATA_CACHE_MASK;
  565. ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
  566. if (ret <= 0)
  567. break;
  568. cache->offset = cache_offset;
  569. cache->size = ret;
  570. } while (0);
  571. pthread_mutex_unlock(&dso__data_open_lock);
  572. if (ret > 0) {
  573. old = dso_cache__insert(dso, cache);
  574. if (old) {
  575. /* we lose the race */
  576. free(cache);
  577. cache = old;
  578. }
  579. ret = dso_cache__memcpy(cache, offset, data, size);
  580. }
  581. if (ret <= 0)
  582. free(cache);
  583. return ret;
  584. }
  585. static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
  586. u64 offset, u8 *data, ssize_t size)
  587. {
  588. struct dso_cache *cache;
  589. cache = dso_cache__find(dso, offset);
  590. if (cache)
  591. return dso_cache__memcpy(cache, offset, data, size);
  592. else
  593. return dso_cache__read(dso, machine, offset, data, size);
  594. }
  595. /*
  596. * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
  597. * in the rb_tree. Any read to already cached data is served
  598. * by cached data.
  599. */
  600. static ssize_t cached_read(struct dso *dso, struct machine *machine,
  601. u64 offset, u8 *data, ssize_t size)
  602. {
  603. ssize_t r = 0;
  604. u8 *p = data;
  605. do {
  606. ssize_t ret;
  607. ret = dso_cache_read(dso, machine, offset, p, size);
  608. if (ret < 0)
  609. return ret;
  610. /* Reached EOF, return what we have. */
  611. if (!ret)
  612. break;
  613. BUG_ON(ret > size);
  614. r += ret;
  615. p += ret;
  616. offset += ret;
  617. size -= ret;
  618. } while (size);
  619. return r;
  620. }
  621. static int data_file_size(struct dso *dso, struct machine *machine)
  622. {
  623. int ret = 0;
  624. struct stat st;
  625. char sbuf[STRERR_BUFSIZE];
  626. if (dso->data.file_size)
  627. return 0;
  628. if (dso->data.status == DSO_DATA_STATUS_ERROR)
  629. return -1;
  630. pthread_mutex_lock(&dso__data_open_lock);
  631. /*
  632. * dso->data.fd might be closed if other thread opened another
  633. * file (dso) due to open file limit (RLIMIT_NOFILE).
  634. */
  635. try_to_open_dso(dso, machine);
  636. if (dso->data.fd < 0) {
  637. ret = -errno;
  638. dso->data.status = DSO_DATA_STATUS_ERROR;
  639. goto out;
  640. }
  641. if (fstat(dso->data.fd, &st) < 0) {
  642. ret = -errno;
  643. pr_err("dso cache fstat failed: %s\n",
  644. strerror_r(errno, sbuf, sizeof(sbuf)));
  645. dso->data.status = DSO_DATA_STATUS_ERROR;
  646. goto out;
  647. }
  648. dso->data.file_size = st.st_size;
  649. out:
  650. pthread_mutex_unlock(&dso__data_open_lock);
  651. return ret;
  652. }
  653. /**
  654. * dso__data_size - Return dso data size
  655. * @dso: dso object
  656. * @machine: machine object
  657. *
  658. * Return: dso data size
  659. */
  660. off_t dso__data_size(struct dso *dso, struct machine *machine)
  661. {
  662. if (data_file_size(dso, machine))
  663. return -1;
  664. /* For now just estimate dso data size is close to file size */
  665. return dso->data.file_size;
  666. }
  667. static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
  668. u64 offset, u8 *data, ssize_t size)
  669. {
  670. if (data_file_size(dso, machine))
  671. return -1;
  672. /* Check the offset sanity. */
  673. if (offset > dso->data.file_size)
  674. return -1;
  675. if (offset + size < offset)
  676. return -1;
  677. return cached_read(dso, machine, offset, data, size);
  678. }
  679. /**
  680. * dso__data_read_offset - Read data from dso file offset
  681. * @dso: dso object
  682. * @machine: machine object
  683. * @offset: file offset
  684. * @data: buffer to store data
  685. * @size: size of the @data buffer
  686. *
  687. * External interface to read data from dso file offset. Open
  688. * dso data file and use cached_read to get the data.
  689. */
  690. ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
  691. u64 offset, u8 *data, ssize_t size)
  692. {
  693. if (dso->data.status == DSO_DATA_STATUS_ERROR)
  694. return -1;
  695. return data_read_offset(dso, machine, offset, data, size);
  696. }
  697. /**
  698. * dso__data_read_addr - Read data from dso address
  699. * @dso: dso object
  700. * @machine: machine object
  701. * @add: virtual memory address
  702. * @data: buffer to store data
  703. * @size: size of the @data buffer
  704. *
  705. * External interface to read data from dso address.
  706. */
  707. ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
  708. struct machine *machine, u64 addr,
  709. u8 *data, ssize_t size)
  710. {
  711. u64 offset = map->map_ip(map, addr);
  712. return dso__data_read_offset(dso, machine, offset, data, size);
  713. }
  714. struct map *dso__new_map(const char *name)
  715. {
  716. struct map *map = NULL;
  717. struct dso *dso = dso__new(name);
  718. if (dso)
  719. map = map__new2(0, dso, MAP__FUNCTION);
  720. return map;
  721. }
  722. struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
  723. const char *short_name, int dso_type)
  724. {
  725. /*
  726. * The kernel dso could be created by build_id processing.
  727. */
  728. struct dso *dso = machine__findnew_dso(machine, name);
  729. /*
  730. * We need to run this in all cases, since during the build_id
  731. * processing we had no idea this was the kernel dso.
  732. */
  733. if (dso != NULL) {
  734. dso__set_short_name(dso, short_name, false);
  735. dso->kernel = dso_type;
  736. }
  737. return dso;
  738. }
  739. /*
  740. * Find a matching entry and/or link current entry to RB tree.
  741. * Either one of the dso or name parameter must be non-NULL or the
  742. * function will not work.
  743. */
  744. static struct dso *__dso__findlink_by_longname(struct rb_root *root,
  745. struct dso *dso, const char *name)
  746. {
  747. struct rb_node **p = &root->rb_node;
  748. struct rb_node *parent = NULL;
  749. if (!name)
  750. name = dso->long_name;
  751. /*
  752. * Find node with the matching name
  753. */
  754. while (*p) {
  755. struct dso *this = rb_entry(*p, struct dso, rb_node);
  756. int rc = strcmp(name, this->long_name);
  757. parent = *p;
  758. if (rc == 0) {
  759. /*
  760. * In case the new DSO is a duplicate of an existing
  761. * one, print an one-time warning & put the new entry
  762. * at the end of the list of duplicates.
  763. */
  764. if (!dso || (dso == this))
  765. return this; /* Find matching dso */
  766. /*
  767. * The core kernel DSOs may have duplicated long name.
  768. * In this case, the short name should be different.
  769. * Comparing the short names to differentiate the DSOs.
  770. */
  771. rc = strcmp(dso->short_name, this->short_name);
  772. if (rc == 0) {
  773. pr_err("Duplicated dso name: %s\n", name);
  774. return NULL;
  775. }
  776. }
  777. if (rc < 0)
  778. p = &parent->rb_left;
  779. else
  780. p = &parent->rb_right;
  781. }
  782. if (dso) {
  783. /* Add new node and rebalance tree */
  784. rb_link_node(&dso->rb_node, parent, p);
  785. rb_insert_color(&dso->rb_node, root);
  786. dso->root = root;
  787. }
  788. return NULL;
  789. }
  790. static inline struct dso *__dso__find_by_longname(struct rb_root *root,
  791. const char *name)
  792. {
  793. return __dso__findlink_by_longname(root, NULL, name);
  794. }
  795. void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
  796. {
  797. struct rb_root *root = dso->root;
  798. if (name == NULL)
  799. return;
  800. if (dso->long_name_allocated)
  801. free((char *)dso->long_name);
  802. if (root) {
  803. rb_erase(&dso->rb_node, root);
  804. /*
  805. * __dso__findlink_by_longname() isn't guaranteed to add it
  806. * back, so a clean removal is required here.
  807. */
  808. RB_CLEAR_NODE(&dso->rb_node);
  809. dso->root = NULL;
  810. }
  811. dso->long_name = name;
  812. dso->long_name_len = strlen(name);
  813. dso->long_name_allocated = name_allocated;
  814. if (root)
  815. __dso__findlink_by_longname(root, dso, NULL);
  816. }
  817. void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
  818. {
  819. if (name == NULL)
  820. return;
  821. if (dso->short_name_allocated)
  822. free((char *)dso->short_name);
  823. dso->short_name = name;
  824. dso->short_name_len = strlen(name);
  825. dso->short_name_allocated = name_allocated;
  826. }
  827. static void dso__set_basename(struct dso *dso)
  828. {
  829. /*
  830. * basename() may modify path buffer, so we must pass
  831. * a copy.
  832. */
  833. char *base, *lname = strdup(dso->long_name);
  834. if (!lname)
  835. return;
  836. /*
  837. * basename() may return a pointer to internal
  838. * storage which is reused in subsequent calls
  839. * so copy the result.
  840. */
  841. base = strdup(basename(lname));
  842. free(lname);
  843. if (!base)
  844. return;
  845. dso__set_short_name(dso, base, true);
  846. }
  847. int dso__name_len(const struct dso *dso)
  848. {
  849. if (!dso)
  850. return strlen("[unknown]");
  851. if (verbose)
  852. return dso->long_name_len;
  853. return dso->short_name_len;
  854. }
  855. bool dso__loaded(const struct dso *dso, enum map_type type)
  856. {
  857. return dso->loaded & (1 << type);
  858. }
  859. bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
  860. {
  861. return dso->sorted_by_name & (1 << type);
  862. }
  863. void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
  864. {
  865. dso->sorted_by_name |= (1 << type);
  866. }
  867. struct dso *dso__new(const char *name)
  868. {
  869. struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
  870. if (dso != NULL) {
  871. int i;
  872. strcpy(dso->name, name);
  873. dso__set_long_name(dso, dso->name, false);
  874. dso__set_short_name(dso, dso->name, false);
  875. for (i = 0; i < MAP__NR_TYPES; ++i)
  876. dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
  877. dso->data.cache = RB_ROOT;
  878. dso->data.fd = -1;
  879. dso->data.status = DSO_DATA_STATUS_UNKNOWN;
  880. dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
  881. dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
  882. dso->is_64_bit = (sizeof(void *) == 8);
  883. dso->loaded = 0;
  884. dso->rel = 0;
  885. dso->sorted_by_name = 0;
  886. dso->has_build_id = 0;
  887. dso->has_srcline = 1;
  888. dso->a2l_fails = 1;
  889. dso->kernel = DSO_TYPE_USER;
  890. dso->needs_swap = DSO_SWAP__UNSET;
  891. RB_CLEAR_NODE(&dso->rb_node);
  892. dso->root = NULL;
  893. INIT_LIST_HEAD(&dso->node);
  894. INIT_LIST_HEAD(&dso->data.open_entry);
  895. pthread_mutex_init(&dso->lock, NULL);
  896. atomic_set(&dso->refcnt, 1);
  897. }
  898. return dso;
  899. }
  900. void dso__delete(struct dso *dso)
  901. {
  902. int i;
  903. if (!RB_EMPTY_NODE(&dso->rb_node))
  904. pr_err("DSO %s is still in rbtree when being deleted!\n",
  905. dso->long_name);
  906. for (i = 0; i < MAP__NR_TYPES; ++i)
  907. symbols__delete(&dso->symbols[i]);
  908. if (dso->short_name_allocated) {
  909. zfree((char **)&dso->short_name);
  910. dso->short_name_allocated = false;
  911. }
  912. if (dso->long_name_allocated) {
  913. zfree((char **)&dso->long_name);
  914. dso->long_name_allocated = false;
  915. }
  916. dso__data_close(dso);
  917. auxtrace_cache__free(dso->auxtrace_cache);
  918. dso_cache__free(dso);
  919. dso__free_a2l(dso);
  920. zfree(&dso->symsrc_filename);
  921. pthread_mutex_destroy(&dso->lock);
  922. free(dso);
  923. }
  924. struct dso *dso__get(struct dso *dso)
  925. {
  926. if (dso)
  927. atomic_inc(&dso->refcnt);
  928. return dso;
  929. }
  930. void dso__put(struct dso *dso)
  931. {
  932. if (dso && atomic_dec_and_test(&dso->refcnt))
  933. dso__delete(dso);
  934. }
  935. void dso__set_build_id(struct dso *dso, void *build_id)
  936. {
  937. memcpy(dso->build_id, build_id, sizeof(dso->build_id));
  938. dso->has_build_id = 1;
  939. }
  940. bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
  941. {
  942. return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
  943. }
  944. void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
  945. {
  946. char path[PATH_MAX];
  947. if (machine__is_default_guest(machine))
  948. return;
  949. sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
  950. if (sysfs__read_build_id(path, dso->build_id,
  951. sizeof(dso->build_id)) == 0)
  952. dso->has_build_id = true;
  953. }
  954. int dso__kernel_module_get_build_id(struct dso *dso,
  955. const char *root_dir)
  956. {
  957. char filename[PATH_MAX];
  958. /*
  959. * kernel module short names are of the form "[module]" and
  960. * we need just "module" here.
  961. */
  962. const char *name = dso->short_name + 1;
  963. snprintf(filename, sizeof(filename),
  964. "%s/sys/module/%.*s/notes/.note.gnu.build-id",
  965. root_dir, (int)strlen(name) - 1, name);
  966. if (sysfs__read_build_id(filename, dso->build_id,
  967. sizeof(dso->build_id)) == 0)
  968. dso->has_build_id = true;
  969. return 0;
  970. }
  971. bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
  972. {
  973. bool have_build_id = false;
  974. struct dso *pos;
  975. list_for_each_entry(pos, head, node) {
  976. if (with_hits && !pos->hit)
  977. continue;
  978. if (pos->has_build_id) {
  979. have_build_id = true;
  980. continue;
  981. }
  982. if (filename__read_build_id(pos->long_name, pos->build_id,
  983. sizeof(pos->build_id)) > 0) {
  984. have_build_id = true;
  985. pos->has_build_id = true;
  986. }
  987. }
  988. return have_build_id;
  989. }
  990. void __dsos__add(struct dsos *dsos, struct dso *dso)
  991. {
  992. list_add_tail(&dso->node, &dsos->head);
  993. __dso__findlink_by_longname(&dsos->root, dso, NULL);
  994. /*
  995. * It is now in the linked list, grab a reference, then garbage collect
  996. * this when needing memory, by looking at LRU dso instances in the
  997. * list with atomic_read(&dso->refcnt) == 1, i.e. no references
  998. * anywhere besides the one for the list, do, under a lock for the
  999. * list: remove it from the list, then a dso__put(), that probably will
  1000. * be the last and will then call dso__delete(), end of life.
  1001. *
  1002. * That, or at the end of the 'struct machine' lifetime, when all
  1003. * 'struct dso' instances will be removed from the list, in
  1004. * dsos__exit(), if they have no other reference from some other data
  1005. * structure.
  1006. *
  1007. * E.g.: after processing a 'perf.data' file and storing references
  1008. * to objects instantiated while processing events, we will have
  1009. * references to the 'thread', 'map', 'dso' structs all from 'struct
  1010. * hist_entry' instances, but we may not need anything not referenced,
  1011. * so we might as well call machines__exit()/machines__delete() and
  1012. * garbage collect it.
  1013. */
  1014. dso__get(dso);
  1015. }
  1016. void dsos__add(struct dsos *dsos, struct dso *dso)
  1017. {
  1018. pthread_rwlock_wrlock(&dsos->lock);
  1019. __dsos__add(dsos, dso);
  1020. pthread_rwlock_unlock(&dsos->lock);
  1021. }
  1022. struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
  1023. {
  1024. struct dso *pos;
  1025. if (cmp_short) {
  1026. list_for_each_entry(pos, &dsos->head, node)
  1027. if (strcmp(pos->short_name, name) == 0)
  1028. return pos;
  1029. return NULL;
  1030. }
  1031. return __dso__find_by_longname(&dsos->root, name);
  1032. }
  1033. struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
  1034. {
  1035. struct dso *dso;
  1036. pthread_rwlock_rdlock(&dsos->lock);
  1037. dso = __dsos__find(dsos, name, cmp_short);
  1038. pthread_rwlock_unlock(&dsos->lock);
  1039. return dso;
  1040. }
  1041. struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
  1042. {
  1043. struct dso *dso = dso__new(name);
  1044. if (dso != NULL) {
  1045. __dsos__add(dsos, dso);
  1046. dso__set_basename(dso);
  1047. /* Put dso here because __dsos_add already got it */
  1048. dso__put(dso);
  1049. }
  1050. return dso;
  1051. }
  1052. struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
  1053. {
  1054. struct dso *dso = __dsos__find(dsos, name, false);
  1055. return dso ? dso : __dsos__addnew(dsos, name);
  1056. }
  1057. struct dso *dsos__findnew(struct dsos *dsos, const char *name)
  1058. {
  1059. struct dso *dso;
  1060. pthread_rwlock_wrlock(&dsos->lock);
  1061. dso = dso__get(__dsos__findnew(dsos, name));
  1062. pthread_rwlock_unlock(&dsos->lock);
  1063. return dso;
  1064. }
  1065. size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
  1066. bool (skip)(struct dso *dso, int parm), int parm)
  1067. {
  1068. struct dso *pos;
  1069. size_t ret = 0;
  1070. list_for_each_entry(pos, head, node) {
  1071. if (skip && skip(pos, parm))
  1072. continue;
  1073. ret += dso__fprintf_buildid(pos, fp);
  1074. ret += fprintf(fp, " %s\n", pos->long_name);
  1075. }
  1076. return ret;
  1077. }
  1078. size_t __dsos__fprintf(struct list_head *head, FILE *fp)
  1079. {
  1080. struct dso *pos;
  1081. size_t ret = 0;
  1082. list_for_each_entry(pos, head, node) {
  1083. int i;
  1084. for (i = 0; i < MAP__NR_TYPES; ++i)
  1085. ret += dso__fprintf(pos, i, fp);
  1086. }
  1087. return ret;
  1088. }
  1089. size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
  1090. {
  1091. char sbuild_id[BUILD_ID_SIZE * 2 + 1];
  1092. build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
  1093. return fprintf(fp, "%s", sbuild_id);
  1094. }
  1095. size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
  1096. {
  1097. struct rb_node *nd;
  1098. size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
  1099. if (dso->short_name != dso->long_name)
  1100. ret += fprintf(fp, "%s, ", dso->long_name);
  1101. ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
  1102. dso__loaded(dso, type) ? "" : "NOT ");
  1103. ret += dso__fprintf_buildid(dso, fp);
  1104. ret += fprintf(fp, ")\n");
  1105. for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
  1106. struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
  1107. ret += symbol__fprintf(pos, fp);
  1108. }
  1109. return ret;
  1110. }
  1111. enum dso_type dso__type(struct dso *dso, struct machine *machine)
  1112. {
  1113. int fd;
  1114. enum dso_type type = DSO__TYPE_UNKNOWN;
  1115. fd = dso__data_get_fd(dso, machine);
  1116. if (fd >= 0) {
  1117. type = dso__type_fd(fd);
  1118. dso__data_put_fd(dso);
  1119. }
  1120. return type;
  1121. }
  1122. int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
  1123. {
  1124. int idx, errnum = dso->load_errno;
  1125. /*
  1126. * This must have a same ordering as the enum dso_load_errno.
  1127. */
  1128. static const char *dso_load__error_str[] = {
  1129. "Internal tools/perf/ library error",
  1130. "Invalid ELF file",
  1131. "Can not read build id",
  1132. "Mismatching build id",
  1133. "Decompression failure",
  1134. };
  1135. BUG_ON(buflen == 0);
  1136. if (errnum >= 0) {
  1137. const char *err = strerror_r(errnum, buf, buflen);
  1138. if (err != buf)
  1139. scnprintf(buf, buflen, "%s", err);
  1140. return 0;
  1141. }
  1142. if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
  1143. return -1;
  1144. idx = errnum - __DSO_LOAD_ERRNO__START;
  1145. scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
  1146. return 0;
  1147. }