dso.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383
  1. #include <asm/bug.h>
  2. #include <sys/time.h>
  3. #include <sys/resource.h>
  4. #include "symbol.h"
  5. #include "dso.h"
  6. #include "machine.h"
  7. #include "auxtrace.h"
  8. #include "util.h"
  9. #include "debug.h"
  10. #include "vdso.h"
  11. char dso__symtab_origin(const struct dso *dso)
  12. {
  13. static const char origin[] = {
  14. [DSO_BINARY_TYPE__KALLSYMS] = 'k',
  15. [DSO_BINARY_TYPE__VMLINUX] = 'v',
  16. [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
  17. [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
  18. [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
  19. [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
  20. [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
  21. [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
  22. [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
  23. [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
  24. [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
  25. [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
  26. [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
  27. [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
  28. [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
  29. [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
  30. };
  31. if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
  32. return '!';
  33. return origin[dso->symtab_type];
  34. }
  35. int dso__read_binary_type_filename(const struct dso *dso,
  36. enum dso_binary_type type,
  37. char *root_dir, char *filename, size_t size)
  38. {
  39. char build_id_hex[SBUILD_ID_SIZE];
  40. int ret = 0;
  41. size_t len;
  42. switch (type) {
  43. case DSO_BINARY_TYPE__DEBUGLINK: {
  44. char *debuglink;
  45. len = __symbol__join_symfs(filename, size, dso->long_name);
  46. debuglink = filename + len;
  47. while (debuglink != filename && *debuglink != '/')
  48. debuglink--;
  49. if (*debuglink == '/')
  50. debuglink++;
  51. ret = -1;
  52. if (!is_regular_file(filename))
  53. break;
  54. ret = filename__read_debuglink(filename, debuglink,
  55. size - (debuglink - filename));
  56. }
  57. break;
  58. case DSO_BINARY_TYPE__BUILD_ID_CACHE:
  59. if (dso__build_id_filename(dso, filename, size) == NULL)
  60. ret = -1;
  61. break;
  62. case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
  63. len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
  64. snprintf(filename + len, size - len, "%s.debug", dso->long_name);
  65. break;
  66. case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
  67. len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
  68. snprintf(filename + len, size - len, "%s", dso->long_name);
  69. break;
  70. case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
  71. {
  72. const char *last_slash;
  73. size_t dir_size;
  74. last_slash = dso->long_name + dso->long_name_len;
  75. while (last_slash != dso->long_name && *last_slash != '/')
  76. last_slash--;
  77. len = __symbol__join_symfs(filename, size, "");
  78. dir_size = last_slash - dso->long_name + 2;
  79. if (dir_size > (size - len)) {
  80. ret = -1;
  81. break;
  82. }
  83. len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
  84. len += scnprintf(filename + len , size - len, ".debug%s",
  85. last_slash);
  86. break;
  87. }
  88. case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
  89. if (!dso->has_build_id) {
  90. ret = -1;
  91. break;
  92. }
  93. build_id__sprintf(dso->build_id,
  94. sizeof(dso->build_id),
  95. build_id_hex);
  96. len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
  97. snprintf(filename + len, size - len, "%.2s/%s.debug",
  98. build_id_hex, build_id_hex + 2);
  99. break;
  100. case DSO_BINARY_TYPE__VMLINUX:
  101. case DSO_BINARY_TYPE__GUEST_VMLINUX:
  102. case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
  103. __symbol__join_symfs(filename, size, dso->long_name);
  104. break;
  105. case DSO_BINARY_TYPE__GUEST_KMODULE:
  106. case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
  107. path__join3(filename, size, symbol_conf.symfs,
  108. root_dir, dso->long_name);
  109. break;
  110. case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
  111. case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
  112. __symbol__join_symfs(filename, size, dso->long_name);
  113. break;
  114. case DSO_BINARY_TYPE__KCORE:
  115. case DSO_BINARY_TYPE__GUEST_KCORE:
  116. snprintf(filename, size, "%s", dso->long_name);
  117. break;
  118. default:
  119. case DSO_BINARY_TYPE__KALLSYMS:
  120. case DSO_BINARY_TYPE__GUEST_KALLSYMS:
  121. case DSO_BINARY_TYPE__JAVA_JIT:
  122. case DSO_BINARY_TYPE__NOT_FOUND:
  123. ret = -1;
  124. break;
  125. }
  126. return ret;
  127. }
  128. static const struct {
  129. const char *fmt;
  130. int (*decompress)(const char *input, int output);
  131. } compressions[] = {
  132. #ifdef HAVE_ZLIB_SUPPORT
  133. { "gz", gzip_decompress_to_file },
  134. #endif
  135. #ifdef HAVE_LZMA_SUPPORT
  136. { "xz", lzma_decompress_to_file },
  137. #endif
  138. { NULL, NULL },
  139. };
  140. bool is_supported_compression(const char *ext)
  141. {
  142. unsigned i;
  143. for (i = 0; compressions[i].fmt; i++) {
  144. if (!strcmp(ext, compressions[i].fmt))
  145. return true;
  146. }
  147. return false;
  148. }
  149. bool is_kernel_module(const char *pathname, int cpumode)
  150. {
  151. struct kmod_path m;
  152. int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
  153. WARN_ONCE(mode != cpumode,
  154. "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
  155. cpumode);
  156. switch (mode) {
  157. case PERF_RECORD_MISC_USER:
  158. case PERF_RECORD_MISC_HYPERVISOR:
  159. case PERF_RECORD_MISC_GUEST_USER:
  160. return false;
  161. /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
  162. default:
  163. if (kmod_path__parse(&m, pathname)) {
  164. pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
  165. pathname);
  166. return true;
  167. }
  168. }
  169. return m.kmod;
  170. }
  171. bool decompress_to_file(const char *ext, const char *filename, int output_fd)
  172. {
  173. unsigned i;
  174. for (i = 0; compressions[i].fmt; i++) {
  175. if (!strcmp(ext, compressions[i].fmt))
  176. return !compressions[i].decompress(filename,
  177. output_fd);
  178. }
  179. return false;
  180. }
  181. bool dso__needs_decompress(struct dso *dso)
  182. {
  183. return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
  184. dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
  185. }
  186. /*
  187. * Parses kernel module specified in @path and updates
  188. * @m argument like:
  189. *
  190. * @comp - true if @path contains supported compression suffix,
  191. * false otherwise
  192. * @kmod - true if @path contains '.ko' suffix in right position,
  193. * false otherwise
  194. * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
  195. * of the kernel module without suffixes, otherwise strudup-ed
  196. * base name of @path
  197. * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
  198. * the compression suffix
  199. *
  200. * Returns 0 if there's no strdup error, -ENOMEM otherwise.
  201. */
  202. int __kmod_path__parse(struct kmod_path *m, const char *path,
  203. bool alloc_name, bool alloc_ext)
  204. {
  205. const char *name = strrchr(path, '/');
  206. const char *ext = strrchr(path, '.');
  207. bool is_simple_name = false;
  208. memset(m, 0x0, sizeof(*m));
  209. name = name ? name + 1 : path;
  210. /*
  211. * '.' is also a valid character for module name. For example:
  212. * [aaa.bbb] is a valid module name. '[' should have higher
  213. * priority than '.ko' suffix.
  214. *
  215. * The kernel names are from machine__mmap_name. Such
  216. * name should belong to kernel itself, not kernel module.
  217. */
  218. if (name[0] == '[') {
  219. is_simple_name = true;
  220. if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
  221. (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
  222. (strncmp(name, "[vdso]", 6) == 0) ||
  223. (strncmp(name, "[vsyscall]", 10) == 0)) {
  224. m->kmod = false;
  225. } else
  226. m->kmod = true;
  227. }
  228. /* No extension, just return name. */
  229. if ((ext == NULL) || is_simple_name) {
  230. if (alloc_name) {
  231. m->name = strdup(name);
  232. return m->name ? 0 : -ENOMEM;
  233. }
  234. return 0;
  235. }
  236. if (is_supported_compression(ext + 1)) {
  237. m->comp = true;
  238. ext -= 3;
  239. }
  240. /* Check .ko extension only if there's enough name left. */
  241. if (ext > name)
  242. m->kmod = !strncmp(ext, ".ko", 3);
  243. if (alloc_name) {
  244. if (m->kmod) {
  245. if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
  246. return -ENOMEM;
  247. } else {
  248. if (asprintf(&m->name, "%s", name) == -1)
  249. return -ENOMEM;
  250. }
  251. strxfrchar(m->name, '-', '_');
  252. }
  253. if (alloc_ext && m->comp) {
  254. m->ext = strdup(ext + 4);
  255. if (!m->ext) {
  256. free((void *) m->name);
  257. return -ENOMEM;
  258. }
  259. }
  260. return 0;
  261. }
  262. /*
  263. * Global list of open DSOs and the counter.
  264. */
  265. static LIST_HEAD(dso__data_open);
  266. static long dso__data_open_cnt;
  267. static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
  268. static void dso__list_add(struct dso *dso)
  269. {
  270. list_add_tail(&dso->data.open_entry, &dso__data_open);
  271. dso__data_open_cnt++;
  272. }
  273. static void dso__list_del(struct dso *dso)
  274. {
  275. list_del(&dso->data.open_entry);
  276. WARN_ONCE(dso__data_open_cnt <= 0,
  277. "DSO data fd counter out of bounds.");
  278. dso__data_open_cnt--;
  279. }
  280. static void close_first_dso(void);
  281. static int do_open(char *name)
  282. {
  283. int fd;
  284. char sbuf[STRERR_BUFSIZE];
  285. do {
  286. fd = open(name, O_RDONLY);
  287. if (fd >= 0)
  288. return fd;
  289. pr_debug("dso open failed: %s\n",
  290. str_error_r(errno, sbuf, sizeof(sbuf)));
  291. if (!dso__data_open_cnt || errno != EMFILE)
  292. break;
  293. close_first_dso();
  294. } while (1);
  295. return -1;
  296. }
  297. static int __open_dso(struct dso *dso, struct machine *machine)
  298. {
  299. int fd;
  300. char *root_dir = (char *)"";
  301. char *name = malloc(PATH_MAX);
  302. if (!name)
  303. return -ENOMEM;
  304. if (machine)
  305. root_dir = machine->root_dir;
  306. if (dso__read_binary_type_filename(dso, dso->binary_type,
  307. root_dir, name, PATH_MAX)) {
  308. free(name);
  309. return -EINVAL;
  310. }
  311. fd = do_open(name);
  312. free(name);
  313. return fd;
  314. }
  315. static void check_data_close(void);
  316. /**
  317. * dso_close - Open DSO data file
  318. * @dso: dso object
  319. *
  320. * Open @dso's data file descriptor and updates
  321. * list/count of open DSO objects.
  322. */
  323. static int open_dso(struct dso *dso, struct machine *machine)
  324. {
  325. int fd = __open_dso(dso, machine);
  326. if (fd >= 0) {
  327. dso__list_add(dso);
  328. /*
  329. * Check if we crossed the allowed number
  330. * of opened DSOs and close one if needed.
  331. */
  332. check_data_close();
  333. }
  334. return fd;
  335. }
  336. static void close_data_fd(struct dso *dso)
  337. {
  338. if (dso->data.fd >= 0) {
  339. close(dso->data.fd);
  340. dso->data.fd = -1;
  341. dso->data.file_size = 0;
  342. dso__list_del(dso);
  343. }
  344. }
  345. /**
  346. * dso_close - Close DSO data file
  347. * @dso: dso object
  348. *
  349. * Close @dso's data file descriptor and updates
  350. * list/count of open DSO objects.
  351. */
  352. static void close_dso(struct dso *dso)
  353. {
  354. close_data_fd(dso);
  355. }
  356. static void close_first_dso(void)
  357. {
  358. struct dso *dso;
  359. dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
  360. close_dso(dso);
  361. }
  362. static rlim_t get_fd_limit(void)
  363. {
  364. struct rlimit l;
  365. rlim_t limit = 0;
  366. /* Allow half of the current open fd limit. */
  367. if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
  368. if (l.rlim_cur == RLIM_INFINITY)
  369. limit = l.rlim_cur;
  370. else
  371. limit = l.rlim_cur / 2;
  372. } else {
  373. pr_err("failed to get fd limit\n");
  374. limit = 1;
  375. }
  376. return limit;
  377. }
  378. static rlim_t fd_limit;
  379. /*
  380. * Used only by tests/dso-data.c to reset the environment
  381. * for tests. I dont expect we should change this during
  382. * standard runtime.
  383. */
  384. void reset_fd_limit(void)
  385. {
  386. fd_limit = 0;
  387. }
  388. static bool may_cache_fd(void)
  389. {
  390. if (!fd_limit)
  391. fd_limit = get_fd_limit();
  392. if (fd_limit == RLIM_INFINITY)
  393. return true;
  394. return fd_limit > (rlim_t) dso__data_open_cnt;
  395. }
  396. /*
  397. * Check and close LRU dso if we crossed allowed limit
  398. * for opened dso file descriptors. The limit is half
  399. * of the RLIMIT_NOFILE files opened.
  400. */
  401. static void check_data_close(void)
  402. {
  403. bool cache_fd = may_cache_fd();
  404. if (!cache_fd)
  405. close_first_dso();
  406. }
  407. /**
  408. * dso__data_close - Close DSO data file
  409. * @dso: dso object
  410. *
  411. * External interface to close @dso's data file descriptor.
  412. */
  413. void dso__data_close(struct dso *dso)
  414. {
  415. pthread_mutex_lock(&dso__data_open_lock);
  416. close_dso(dso);
  417. pthread_mutex_unlock(&dso__data_open_lock);
  418. }
  419. static void try_to_open_dso(struct dso *dso, struct machine *machine)
  420. {
  421. enum dso_binary_type binary_type_data[] = {
  422. DSO_BINARY_TYPE__BUILD_ID_CACHE,
  423. DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
  424. DSO_BINARY_TYPE__NOT_FOUND,
  425. };
  426. int i = 0;
  427. if (dso->data.fd >= 0)
  428. return;
  429. if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
  430. dso->data.fd = open_dso(dso, machine);
  431. goto out;
  432. }
  433. do {
  434. dso->binary_type = binary_type_data[i++];
  435. dso->data.fd = open_dso(dso, machine);
  436. if (dso->data.fd >= 0)
  437. goto out;
  438. } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
  439. out:
  440. if (dso->data.fd >= 0)
  441. dso->data.status = DSO_DATA_STATUS_OK;
  442. else
  443. dso->data.status = DSO_DATA_STATUS_ERROR;
  444. }
  445. /**
  446. * dso__data_get_fd - Get dso's data file descriptor
  447. * @dso: dso object
  448. * @machine: machine object
  449. *
  450. * External interface to find dso's file, open it and
  451. * returns file descriptor. It should be paired with
  452. * dso__data_put_fd() if it returns non-negative value.
  453. */
  454. int dso__data_get_fd(struct dso *dso, struct machine *machine)
  455. {
  456. if (dso->data.status == DSO_DATA_STATUS_ERROR)
  457. return -1;
  458. if (pthread_mutex_lock(&dso__data_open_lock) < 0)
  459. return -1;
  460. try_to_open_dso(dso, machine);
  461. if (dso->data.fd < 0)
  462. pthread_mutex_unlock(&dso__data_open_lock);
  463. return dso->data.fd;
  464. }
  465. void dso__data_put_fd(struct dso *dso __maybe_unused)
  466. {
  467. pthread_mutex_unlock(&dso__data_open_lock);
  468. }
  469. bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
  470. {
  471. u32 flag = 1 << by;
  472. if (dso->data.status_seen & flag)
  473. return true;
  474. dso->data.status_seen |= flag;
  475. return false;
  476. }
  477. static void
  478. dso_cache__free(struct dso *dso)
  479. {
  480. struct rb_root *root = &dso->data.cache;
  481. struct rb_node *next = rb_first(root);
  482. pthread_mutex_lock(&dso->lock);
  483. while (next) {
  484. struct dso_cache *cache;
  485. cache = rb_entry(next, struct dso_cache, rb_node);
  486. next = rb_next(&cache->rb_node);
  487. rb_erase(&cache->rb_node, root);
  488. free(cache);
  489. }
  490. pthread_mutex_unlock(&dso->lock);
  491. }
  492. static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
  493. {
  494. const struct rb_root *root = &dso->data.cache;
  495. struct rb_node * const *p = &root->rb_node;
  496. const struct rb_node *parent = NULL;
  497. struct dso_cache *cache;
  498. while (*p != NULL) {
  499. u64 end;
  500. parent = *p;
  501. cache = rb_entry(parent, struct dso_cache, rb_node);
  502. end = cache->offset + DSO__DATA_CACHE_SIZE;
  503. if (offset < cache->offset)
  504. p = &(*p)->rb_left;
  505. else if (offset >= end)
  506. p = &(*p)->rb_right;
  507. else
  508. return cache;
  509. }
  510. return NULL;
  511. }
  512. static struct dso_cache *
  513. dso_cache__insert(struct dso *dso, struct dso_cache *new)
  514. {
  515. struct rb_root *root = &dso->data.cache;
  516. struct rb_node **p = &root->rb_node;
  517. struct rb_node *parent = NULL;
  518. struct dso_cache *cache;
  519. u64 offset = new->offset;
  520. pthread_mutex_lock(&dso->lock);
  521. while (*p != NULL) {
  522. u64 end;
  523. parent = *p;
  524. cache = rb_entry(parent, struct dso_cache, rb_node);
  525. end = cache->offset + DSO__DATA_CACHE_SIZE;
  526. if (offset < cache->offset)
  527. p = &(*p)->rb_left;
  528. else if (offset >= end)
  529. p = &(*p)->rb_right;
  530. else
  531. goto out;
  532. }
  533. rb_link_node(&new->rb_node, parent, p);
  534. rb_insert_color(&new->rb_node, root);
  535. cache = NULL;
  536. out:
  537. pthread_mutex_unlock(&dso->lock);
  538. return cache;
  539. }
  540. static ssize_t
  541. dso_cache__memcpy(struct dso_cache *cache, u64 offset,
  542. u8 *data, u64 size)
  543. {
  544. u64 cache_offset = offset - cache->offset;
  545. u64 cache_size = min(cache->size - cache_offset, size);
  546. memcpy(data, cache->data + cache_offset, cache_size);
  547. return cache_size;
  548. }
  549. static ssize_t
  550. dso_cache__read(struct dso *dso, struct machine *machine,
  551. u64 offset, u8 *data, ssize_t size)
  552. {
  553. struct dso_cache *cache;
  554. struct dso_cache *old;
  555. ssize_t ret;
  556. do {
  557. u64 cache_offset;
  558. cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
  559. if (!cache)
  560. return -ENOMEM;
  561. pthread_mutex_lock(&dso__data_open_lock);
  562. /*
  563. * dso->data.fd might be closed if other thread opened another
  564. * file (dso) due to open file limit (RLIMIT_NOFILE).
  565. */
  566. try_to_open_dso(dso, machine);
  567. if (dso->data.fd < 0) {
  568. ret = -errno;
  569. dso->data.status = DSO_DATA_STATUS_ERROR;
  570. break;
  571. }
  572. cache_offset = offset & DSO__DATA_CACHE_MASK;
  573. ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
  574. if (ret <= 0)
  575. break;
  576. cache->offset = cache_offset;
  577. cache->size = ret;
  578. } while (0);
  579. pthread_mutex_unlock(&dso__data_open_lock);
  580. if (ret > 0) {
  581. old = dso_cache__insert(dso, cache);
  582. if (old) {
  583. /* we lose the race */
  584. free(cache);
  585. cache = old;
  586. }
  587. ret = dso_cache__memcpy(cache, offset, data, size);
  588. }
  589. if (ret <= 0)
  590. free(cache);
  591. return ret;
  592. }
  593. static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
  594. u64 offset, u8 *data, ssize_t size)
  595. {
  596. struct dso_cache *cache;
  597. cache = dso_cache__find(dso, offset);
  598. if (cache)
  599. return dso_cache__memcpy(cache, offset, data, size);
  600. else
  601. return dso_cache__read(dso, machine, offset, data, size);
  602. }
  603. /*
  604. * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
  605. * in the rb_tree. Any read to already cached data is served
  606. * by cached data.
  607. */
  608. static ssize_t cached_read(struct dso *dso, struct machine *machine,
  609. u64 offset, u8 *data, ssize_t size)
  610. {
  611. ssize_t r = 0;
  612. u8 *p = data;
  613. do {
  614. ssize_t ret;
  615. ret = dso_cache_read(dso, machine, offset, p, size);
  616. if (ret < 0)
  617. return ret;
  618. /* Reached EOF, return what we have. */
  619. if (!ret)
  620. break;
  621. BUG_ON(ret > size);
  622. r += ret;
  623. p += ret;
  624. offset += ret;
  625. size -= ret;
  626. } while (size);
  627. return r;
  628. }
  629. static int data_file_size(struct dso *dso, struct machine *machine)
  630. {
  631. int ret = 0;
  632. struct stat st;
  633. char sbuf[STRERR_BUFSIZE];
  634. if (dso->data.file_size)
  635. return 0;
  636. if (dso->data.status == DSO_DATA_STATUS_ERROR)
  637. return -1;
  638. pthread_mutex_lock(&dso__data_open_lock);
  639. /*
  640. * dso->data.fd might be closed if other thread opened another
  641. * file (dso) due to open file limit (RLIMIT_NOFILE).
  642. */
  643. try_to_open_dso(dso, machine);
  644. if (dso->data.fd < 0) {
  645. ret = -errno;
  646. dso->data.status = DSO_DATA_STATUS_ERROR;
  647. goto out;
  648. }
  649. if (fstat(dso->data.fd, &st) < 0) {
  650. ret = -errno;
  651. pr_err("dso cache fstat failed: %s\n",
  652. str_error_r(errno, sbuf, sizeof(sbuf)));
  653. dso->data.status = DSO_DATA_STATUS_ERROR;
  654. goto out;
  655. }
  656. dso->data.file_size = st.st_size;
  657. out:
  658. pthread_mutex_unlock(&dso__data_open_lock);
  659. return ret;
  660. }
  661. /**
  662. * dso__data_size - Return dso data size
  663. * @dso: dso object
  664. * @machine: machine object
  665. *
  666. * Return: dso data size
  667. */
  668. off_t dso__data_size(struct dso *dso, struct machine *machine)
  669. {
  670. if (data_file_size(dso, machine))
  671. return -1;
  672. /* For now just estimate dso data size is close to file size */
  673. return dso->data.file_size;
  674. }
  675. static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
  676. u64 offset, u8 *data, ssize_t size)
  677. {
  678. if (data_file_size(dso, machine))
  679. return -1;
  680. /* Check the offset sanity. */
  681. if (offset > dso->data.file_size)
  682. return -1;
  683. if (offset + size < offset)
  684. return -1;
  685. return cached_read(dso, machine, offset, data, size);
  686. }
  687. /**
  688. * dso__data_read_offset - Read data from dso file offset
  689. * @dso: dso object
  690. * @machine: machine object
  691. * @offset: file offset
  692. * @data: buffer to store data
  693. * @size: size of the @data buffer
  694. *
  695. * External interface to read data from dso file offset. Open
  696. * dso data file and use cached_read to get the data.
  697. */
  698. ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
  699. u64 offset, u8 *data, ssize_t size)
  700. {
  701. if (dso->data.status == DSO_DATA_STATUS_ERROR)
  702. return -1;
  703. return data_read_offset(dso, machine, offset, data, size);
  704. }
  705. /**
  706. * dso__data_read_addr - Read data from dso address
  707. * @dso: dso object
  708. * @machine: machine object
  709. * @add: virtual memory address
  710. * @data: buffer to store data
  711. * @size: size of the @data buffer
  712. *
  713. * External interface to read data from dso address.
  714. */
  715. ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
  716. struct machine *machine, u64 addr,
  717. u8 *data, ssize_t size)
  718. {
  719. u64 offset = map->map_ip(map, addr);
  720. return dso__data_read_offset(dso, machine, offset, data, size);
  721. }
  722. struct map *dso__new_map(const char *name)
  723. {
  724. struct map *map = NULL;
  725. struct dso *dso = dso__new(name);
  726. if (dso)
  727. map = map__new2(0, dso, MAP__FUNCTION);
  728. return map;
  729. }
  730. struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
  731. const char *short_name, int dso_type)
  732. {
  733. /*
  734. * The kernel dso could be created by build_id processing.
  735. */
  736. struct dso *dso = machine__findnew_dso(machine, name);
  737. /*
  738. * We need to run this in all cases, since during the build_id
  739. * processing we had no idea this was the kernel dso.
  740. */
  741. if (dso != NULL) {
  742. dso__set_short_name(dso, short_name, false);
  743. dso->kernel = dso_type;
  744. }
  745. return dso;
  746. }
  747. /*
  748. * Find a matching entry and/or link current entry to RB tree.
  749. * Either one of the dso or name parameter must be non-NULL or the
  750. * function will not work.
  751. */
  752. static struct dso *__dso__findlink_by_longname(struct rb_root *root,
  753. struct dso *dso, const char *name)
  754. {
  755. struct rb_node **p = &root->rb_node;
  756. struct rb_node *parent = NULL;
  757. if (!name)
  758. name = dso->long_name;
  759. /*
  760. * Find node with the matching name
  761. */
  762. while (*p) {
  763. struct dso *this = rb_entry(*p, struct dso, rb_node);
  764. int rc = strcmp(name, this->long_name);
  765. parent = *p;
  766. if (rc == 0) {
  767. /*
  768. * In case the new DSO is a duplicate of an existing
  769. * one, print an one-time warning & put the new entry
  770. * at the end of the list of duplicates.
  771. */
  772. if (!dso || (dso == this))
  773. return this; /* Find matching dso */
  774. /*
  775. * The core kernel DSOs may have duplicated long name.
  776. * In this case, the short name should be different.
  777. * Comparing the short names to differentiate the DSOs.
  778. */
  779. rc = strcmp(dso->short_name, this->short_name);
  780. if (rc == 0) {
  781. pr_err("Duplicated dso name: %s\n", name);
  782. return NULL;
  783. }
  784. }
  785. if (rc < 0)
  786. p = &parent->rb_left;
  787. else
  788. p = &parent->rb_right;
  789. }
  790. if (dso) {
  791. /* Add new node and rebalance tree */
  792. rb_link_node(&dso->rb_node, parent, p);
  793. rb_insert_color(&dso->rb_node, root);
  794. dso->root = root;
  795. }
  796. return NULL;
  797. }
  798. static inline struct dso *__dso__find_by_longname(struct rb_root *root,
  799. const char *name)
  800. {
  801. return __dso__findlink_by_longname(root, NULL, name);
  802. }
  803. void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
  804. {
  805. struct rb_root *root = dso->root;
  806. if (name == NULL)
  807. return;
  808. if (dso->long_name_allocated)
  809. free((char *)dso->long_name);
  810. if (root) {
  811. rb_erase(&dso->rb_node, root);
  812. /*
  813. * __dso__findlink_by_longname() isn't guaranteed to add it
  814. * back, so a clean removal is required here.
  815. */
  816. RB_CLEAR_NODE(&dso->rb_node);
  817. dso->root = NULL;
  818. }
  819. dso->long_name = name;
  820. dso->long_name_len = strlen(name);
  821. dso->long_name_allocated = name_allocated;
  822. if (root)
  823. __dso__findlink_by_longname(root, dso, NULL);
  824. }
  825. void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
  826. {
  827. if (name == NULL)
  828. return;
  829. if (dso->short_name_allocated)
  830. free((char *)dso->short_name);
  831. dso->short_name = name;
  832. dso->short_name_len = strlen(name);
  833. dso->short_name_allocated = name_allocated;
  834. }
  835. static void dso__set_basename(struct dso *dso)
  836. {
  837. /*
  838. * basename() may modify path buffer, so we must pass
  839. * a copy.
  840. */
  841. char *base, *lname = strdup(dso->long_name);
  842. if (!lname)
  843. return;
  844. /*
  845. * basename() may return a pointer to internal
  846. * storage which is reused in subsequent calls
  847. * so copy the result.
  848. */
  849. base = strdup(basename(lname));
  850. free(lname);
  851. if (!base)
  852. return;
  853. dso__set_short_name(dso, base, true);
  854. }
  855. int dso__name_len(const struct dso *dso)
  856. {
  857. if (!dso)
  858. return strlen("[unknown]");
  859. if (verbose)
  860. return dso->long_name_len;
  861. return dso->short_name_len;
  862. }
  863. bool dso__loaded(const struct dso *dso, enum map_type type)
  864. {
  865. return dso->loaded & (1 << type);
  866. }
  867. bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
  868. {
  869. return dso->sorted_by_name & (1 << type);
  870. }
  871. void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
  872. {
  873. dso->sorted_by_name |= (1 << type);
  874. }
  875. struct dso *dso__new(const char *name)
  876. {
  877. struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
  878. if (dso != NULL) {
  879. int i;
  880. strcpy(dso->name, name);
  881. dso__set_long_name(dso, dso->name, false);
  882. dso__set_short_name(dso, dso->name, false);
  883. for (i = 0; i < MAP__NR_TYPES; ++i)
  884. dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
  885. dso->data.cache = RB_ROOT;
  886. dso->data.fd = -1;
  887. dso->data.status = DSO_DATA_STATUS_UNKNOWN;
  888. dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
  889. dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
  890. dso->is_64_bit = (sizeof(void *) == 8);
  891. dso->loaded = 0;
  892. dso->rel = 0;
  893. dso->sorted_by_name = 0;
  894. dso->has_build_id = 0;
  895. dso->has_srcline = 1;
  896. dso->a2l_fails = 1;
  897. dso->kernel = DSO_TYPE_USER;
  898. dso->needs_swap = DSO_SWAP__UNSET;
  899. RB_CLEAR_NODE(&dso->rb_node);
  900. dso->root = NULL;
  901. INIT_LIST_HEAD(&dso->node);
  902. INIT_LIST_HEAD(&dso->data.open_entry);
  903. pthread_mutex_init(&dso->lock, NULL);
  904. atomic_set(&dso->refcnt, 1);
  905. }
  906. return dso;
  907. }
  908. void dso__delete(struct dso *dso)
  909. {
  910. int i;
  911. if (!RB_EMPTY_NODE(&dso->rb_node))
  912. pr_err("DSO %s is still in rbtree when being deleted!\n",
  913. dso->long_name);
  914. for (i = 0; i < MAP__NR_TYPES; ++i)
  915. symbols__delete(&dso->symbols[i]);
  916. if (dso->short_name_allocated) {
  917. zfree((char **)&dso->short_name);
  918. dso->short_name_allocated = false;
  919. }
  920. if (dso->long_name_allocated) {
  921. zfree((char **)&dso->long_name);
  922. dso->long_name_allocated = false;
  923. }
  924. dso__data_close(dso);
  925. auxtrace_cache__free(dso->auxtrace_cache);
  926. dso_cache__free(dso);
  927. dso__free_a2l(dso);
  928. zfree(&dso->symsrc_filename);
  929. pthread_mutex_destroy(&dso->lock);
  930. free(dso);
  931. }
  932. struct dso *dso__get(struct dso *dso)
  933. {
  934. if (dso)
  935. atomic_inc(&dso->refcnt);
  936. return dso;
  937. }
  938. void dso__put(struct dso *dso)
  939. {
  940. if (dso && atomic_dec_and_test(&dso->refcnt))
  941. dso__delete(dso);
  942. }
  943. void dso__set_build_id(struct dso *dso, void *build_id)
  944. {
  945. memcpy(dso->build_id, build_id, sizeof(dso->build_id));
  946. dso->has_build_id = 1;
  947. }
  948. bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
  949. {
  950. return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
  951. }
  952. void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
  953. {
  954. char path[PATH_MAX];
  955. if (machine__is_default_guest(machine))
  956. return;
  957. sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
  958. if (sysfs__read_build_id(path, dso->build_id,
  959. sizeof(dso->build_id)) == 0)
  960. dso->has_build_id = true;
  961. }
  962. int dso__kernel_module_get_build_id(struct dso *dso,
  963. const char *root_dir)
  964. {
  965. char filename[PATH_MAX];
  966. /*
  967. * kernel module short names are of the form "[module]" and
  968. * we need just "module" here.
  969. */
  970. const char *name = dso->short_name + 1;
  971. snprintf(filename, sizeof(filename),
  972. "%s/sys/module/%.*s/notes/.note.gnu.build-id",
  973. root_dir, (int)strlen(name) - 1, name);
  974. if (sysfs__read_build_id(filename, dso->build_id,
  975. sizeof(dso->build_id)) == 0)
  976. dso->has_build_id = true;
  977. return 0;
  978. }
  979. bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
  980. {
  981. bool have_build_id = false;
  982. struct dso *pos;
  983. list_for_each_entry(pos, head, node) {
  984. if (with_hits && !pos->hit && !dso__is_vdso(pos))
  985. continue;
  986. if (pos->has_build_id) {
  987. have_build_id = true;
  988. continue;
  989. }
  990. if (filename__read_build_id(pos->long_name, pos->build_id,
  991. sizeof(pos->build_id)) > 0) {
  992. have_build_id = true;
  993. pos->has_build_id = true;
  994. }
  995. }
  996. return have_build_id;
  997. }
  998. void __dsos__add(struct dsos *dsos, struct dso *dso)
  999. {
  1000. list_add_tail(&dso->node, &dsos->head);
  1001. __dso__findlink_by_longname(&dsos->root, dso, NULL);
  1002. /*
  1003. * It is now in the linked list, grab a reference, then garbage collect
  1004. * this when needing memory, by looking at LRU dso instances in the
  1005. * list with atomic_read(&dso->refcnt) == 1, i.e. no references
  1006. * anywhere besides the one for the list, do, under a lock for the
  1007. * list: remove it from the list, then a dso__put(), that probably will
  1008. * be the last and will then call dso__delete(), end of life.
  1009. *
  1010. * That, or at the end of the 'struct machine' lifetime, when all
  1011. * 'struct dso' instances will be removed from the list, in
  1012. * dsos__exit(), if they have no other reference from some other data
  1013. * structure.
  1014. *
  1015. * E.g.: after processing a 'perf.data' file and storing references
  1016. * to objects instantiated while processing events, we will have
  1017. * references to the 'thread', 'map', 'dso' structs all from 'struct
  1018. * hist_entry' instances, but we may not need anything not referenced,
  1019. * so we might as well call machines__exit()/machines__delete() and
  1020. * garbage collect it.
  1021. */
  1022. dso__get(dso);
  1023. }
  1024. void dsos__add(struct dsos *dsos, struct dso *dso)
  1025. {
  1026. pthread_rwlock_wrlock(&dsos->lock);
  1027. __dsos__add(dsos, dso);
  1028. pthread_rwlock_unlock(&dsos->lock);
  1029. }
  1030. struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
  1031. {
  1032. struct dso *pos;
  1033. if (cmp_short) {
  1034. list_for_each_entry(pos, &dsos->head, node)
  1035. if (strcmp(pos->short_name, name) == 0)
  1036. return pos;
  1037. return NULL;
  1038. }
  1039. return __dso__find_by_longname(&dsos->root, name);
  1040. }
  1041. struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
  1042. {
  1043. struct dso *dso;
  1044. pthread_rwlock_rdlock(&dsos->lock);
  1045. dso = __dsos__find(dsos, name, cmp_short);
  1046. pthread_rwlock_unlock(&dsos->lock);
  1047. return dso;
  1048. }
  1049. struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
  1050. {
  1051. struct dso *dso = dso__new(name);
  1052. if (dso != NULL) {
  1053. __dsos__add(dsos, dso);
  1054. dso__set_basename(dso);
  1055. /* Put dso here because __dsos_add already got it */
  1056. dso__put(dso);
  1057. }
  1058. return dso;
  1059. }
  1060. struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
  1061. {
  1062. struct dso *dso = __dsos__find(dsos, name, false);
  1063. return dso ? dso : __dsos__addnew(dsos, name);
  1064. }
  1065. struct dso *dsos__findnew(struct dsos *dsos, const char *name)
  1066. {
  1067. struct dso *dso;
  1068. pthread_rwlock_wrlock(&dsos->lock);
  1069. dso = dso__get(__dsos__findnew(dsos, name));
  1070. pthread_rwlock_unlock(&dsos->lock);
  1071. return dso;
  1072. }
  1073. size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
  1074. bool (skip)(struct dso *dso, int parm), int parm)
  1075. {
  1076. struct dso *pos;
  1077. size_t ret = 0;
  1078. list_for_each_entry(pos, head, node) {
  1079. if (skip && skip(pos, parm))
  1080. continue;
  1081. ret += dso__fprintf_buildid(pos, fp);
  1082. ret += fprintf(fp, " %s\n", pos->long_name);
  1083. }
  1084. return ret;
  1085. }
  1086. size_t __dsos__fprintf(struct list_head *head, FILE *fp)
  1087. {
  1088. struct dso *pos;
  1089. size_t ret = 0;
  1090. list_for_each_entry(pos, head, node) {
  1091. int i;
  1092. for (i = 0; i < MAP__NR_TYPES; ++i)
  1093. ret += dso__fprintf(pos, i, fp);
  1094. }
  1095. return ret;
  1096. }
  1097. size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
  1098. {
  1099. char sbuild_id[SBUILD_ID_SIZE];
  1100. build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
  1101. return fprintf(fp, "%s", sbuild_id);
  1102. }
  1103. size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
  1104. {
  1105. struct rb_node *nd;
  1106. size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
  1107. if (dso->short_name != dso->long_name)
  1108. ret += fprintf(fp, "%s, ", dso->long_name);
  1109. ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
  1110. dso__loaded(dso, type) ? "" : "NOT ");
  1111. ret += dso__fprintf_buildid(dso, fp);
  1112. ret += fprintf(fp, ")\n");
  1113. for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
  1114. struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
  1115. ret += symbol__fprintf(pos, fp);
  1116. }
  1117. return ret;
  1118. }
  1119. enum dso_type dso__type(struct dso *dso, struct machine *machine)
  1120. {
  1121. int fd;
  1122. enum dso_type type = DSO__TYPE_UNKNOWN;
  1123. fd = dso__data_get_fd(dso, machine);
  1124. if (fd >= 0) {
  1125. type = dso__type_fd(fd);
  1126. dso__data_put_fd(dso);
  1127. }
  1128. return type;
  1129. }
  1130. int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
  1131. {
  1132. int idx, errnum = dso->load_errno;
  1133. /*
  1134. * This must have a same ordering as the enum dso_load_errno.
  1135. */
  1136. static const char *dso_load__error_str[] = {
  1137. "Internal tools/perf/ library error",
  1138. "Invalid ELF file",
  1139. "Can not read build id",
  1140. "Mismatching build id",
  1141. "Decompression failure",
  1142. };
  1143. BUG_ON(buflen == 0);
  1144. if (errnum >= 0) {
  1145. const char *err = str_error_r(errnum, buf, buflen);
  1146. if (err != buf)
  1147. scnprintf(buf, buflen, "%s", err);
  1148. return 0;
  1149. }
  1150. if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
  1151. return -1;
  1152. idx = errnum - __DSO_LOAD_ERRNO__START;
  1153. scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
  1154. return 0;
  1155. }