dso.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386
  1. #include <asm/bug.h>
  2. #include <sys/time.h>
  3. #include <sys/resource.h>
  4. #include "symbol.h"
  5. #include "dso.h"
  6. #include "machine.h"
  7. #include "auxtrace.h"
  8. #include "util.h"
  9. #include "debug.h"
  10. #include "vdso.h"
  11. char dso__symtab_origin(const struct dso *dso)
  12. {
  13. static const char origin[] = {
  14. [DSO_BINARY_TYPE__KALLSYMS] = 'k',
  15. [DSO_BINARY_TYPE__VMLINUX] = 'v',
  16. [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
  17. [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
  18. [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
  19. [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
  20. [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
  21. [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
  22. [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
  23. [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
  24. [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
  25. [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
  26. [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
  27. [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
  28. [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
  29. [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
  30. };
  31. if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
  32. return '!';
  33. return origin[dso->symtab_type];
  34. }
  35. int dso__read_binary_type_filename(const struct dso *dso,
  36. enum dso_binary_type type,
  37. char *root_dir, char *filename, size_t size)
  38. {
  39. char build_id_hex[SBUILD_ID_SIZE];
  40. int ret = 0;
  41. size_t len;
  42. switch (type) {
  43. case DSO_BINARY_TYPE__DEBUGLINK: {
  44. char *debuglink;
  45. len = __symbol__join_symfs(filename, size, dso->long_name);
  46. debuglink = filename + len;
  47. while (debuglink != filename && *debuglink != '/')
  48. debuglink--;
  49. if (*debuglink == '/')
  50. debuglink++;
  51. ret = -1;
  52. if (!is_regular_file(filename))
  53. break;
  54. ret = filename__read_debuglink(filename, debuglink,
  55. size - (debuglink - filename));
  56. }
  57. break;
  58. case DSO_BINARY_TYPE__BUILD_ID_CACHE:
  59. if (dso__build_id_filename(dso, filename, size) == NULL)
  60. ret = -1;
  61. break;
  62. case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
  63. len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
  64. snprintf(filename + len, size - len, "%s.debug", dso->long_name);
  65. break;
  66. case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
  67. len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
  68. snprintf(filename + len, size - len, "%s", dso->long_name);
  69. break;
  70. case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
  71. {
  72. const char *last_slash;
  73. size_t dir_size;
  74. last_slash = dso->long_name + dso->long_name_len;
  75. while (last_slash != dso->long_name && *last_slash != '/')
  76. last_slash--;
  77. len = __symbol__join_symfs(filename, size, "");
  78. dir_size = last_slash - dso->long_name + 2;
  79. if (dir_size > (size - len)) {
  80. ret = -1;
  81. break;
  82. }
  83. len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
  84. len += scnprintf(filename + len , size - len, ".debug%s",
  85. last_slash);
  86. break;
  87. }
  88. case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
  89. if (!dso->has_build_id) {
  90. ret = -1;
  91. break;
  92. }
  93. build_id__sprintf(dso->build_id,
  94. sizeof(dso->build_id),
  95. build_id_hex);
  96. len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
  97. snprintf(filename + len, size - len, "%.2s/%s.debug",
  98. build_id_hex, build_id_hex + 2);
  99. break;
  100. case DSO_BINARY_TYPE__VMLINUX:
  101. case DSO_BINARY_TYPE__GUEST_VMLINUX:
  102. case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
  103. __symbol__join_symfs(filename, size, dso->long_name);
  104. break;
  105. case DSO_BINARY_TYPE__GUEST_KMODULE:
  106. case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
  107. path__join3(filename, size, symbol_conf.symfs,
  108. root_dir, dso->long_name);
  109. break;
  110. case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
  111. case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
  112. __symbol__join_symfs(filename, size, dso->long_name);
  113. break;
  114. case DSO_BINARY_TYPE__KCORE:
  115. case DSO_BINARY_TYPE__GUEST_KCORE:
  116. snprintf(filename, size, "%s", dso->long_name);
  117. break;
  118. default:
  119. case DSO_BINARY_TYPE__KALLSYMS:
  120. case DSO_BINARY_TYPE__GUEST_KALLSYMS:
  121. case DSO_BINARY_TYPE__JAVA_JIT:
  122. case DSO_BINARY_TYPE__NOT_FOUND:
  123. ret = -1;
  124. break;
  125. }
  126. return ret;
  127. }
  128. static const struct {
  129. const char *fmt;
  130. int (*decompress)(const char *input, int output);
  131. } compressions[] = {
  132. #ifdef HAVE_ZLIB_SUPPORT
  133. { "gz", gzip_decompress_to_file },
  134. #endif
  135. #ifdef HAVE_LZMA_SUPPORT
  136. { "xz", lzma_decompress_to_file },
  137. #endif
  138. { NULL, NULL },
  139. };
  140. bool is_supported_compression(const char *ext)
  141. {
  142. unsigned i;
  143. for (i = 0; compressions[i].fmt; i++) {
  144. if (!strcmp(ext, compressions[i].fmt))
  145. return true;
  146. }
  147. return false;
  148. }
  149. bool is_kernel_module(const char *pathname, int cpumode)
  150. {
  151. struct kmod_path m;
  152. int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
  153. WARN_ONCE(mode != cpumode,
  154. "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
  155. cpumode);
  156. switch (mode) {
  157. case PERF_RECORD_MISC_USER:
  158. case PERF_RECORD_MISC_HYPERVISOR:
  159. case PERF_RECORD_MISC_GUEST_USER:
  160. return false;
  161. /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
  162. default:
  163. if (kmod_path__parse(&m, pathname)) {
  164. pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
  165. pathname);
  166. return true;
  167. }
  168. }
  169. return m.kmod;
  170. }
  171. bool decompress_to_file(const char *ext, const char *filename, int output_fd)
  172. {
  173. unsigned i;
  174. for (i = 0; compressions[i].fmt; i++) {
  175. if (!strcmp(ext, compressions[i].fmt))
  176. return !compressions[i].decompress(filename,
  177. output_fd);
  178. }
  179. return false;
  180. }
  181. bool dso__needs_decompress(struct dso *dso)
  182. {
  183. return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
  184. dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
  185. }
  186. /*
  187. * Parses kernel module specified in @path and updates
  188. * @m argument like:
  189. *
  190. * @comp - true if @path contains supported compression suffix,
  191. * false otherwise
  192. * @kmod - true if @path contains '.ko' suffix in right position,
  193. * false otherwise
  194. * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
  195. * of the kernel module without suffixes, otherwise strudup-ed
  196. * base name of @path
  197. * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
  198. * the compression suffix
  199. *
  200. * Returns 0 if there's no strdup error, -ENOMEM otherwise.
  201. */
  202. int __kmod_path__parse(struct kmod_path *m, const char *path,
  203. bool alloc_name, bool alloc_ext)
  204. {
  205. const char *name = strrchr(path, '/');
  206. const char *ext = strrchr(path, '.');
  207. bool is_simple_name = false;
  208. memset(m, 0x0, sizeof(*m));
  209. name = name ? name + 1 : path;
  210. /*
  211. * '.' is also a valid character for module name. For example:
  212. * [aaa.bbb] is a valid module name. '[' should have higher
  213. * priority than '.ko' suffix.
  214. *
  215. * The kernel names are from machine__mmap_name. Such
  216. * name should belong to kernel itself, not kernel module.
  217. */
  218. if (name[0] == '[') {
  219. is_simple_name = true;
  220. if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
  221. (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
  222. (strncmp(name, "[vdso]", 6) == 0) ||
  223. (strncmp(name, "[vsyscall]", 10) == 0)) {
  224. m->kmod = false;
  225. } else
  226. m->kmod = true;
  227. }
  228. /* No extension, just return name. */
  229. if ((ext == NULL) || is_simple_name) {
  230. if (alloc_name) {
  231. m->name = strdup(name);
  232. return m->name ? 0 : -ENOMEM;
  233. }
  234. return 0;
  235. }
  236. if (is_supported_compression(ext + 1)) {
  237. m->comp = true;
  238. ext -= 3;
  239. }
  240. /* Check .ko extension only if there's enough name left. */
  241. if (ext > name)
  242. m->kmod = !strncmp(ext, ".ko", 3);
  243. if (alloc_name) {
  244. if (m->kmod) {
  245. if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
  246. return -ENOMEM;
  247. } else {
  248. if (asprintf(&m->name, "%s", name) == -1)
  249. return -ENOMEM;
  250. }
  251. strxfrchar(m->name, '-', '_');
  252. }
  253. if (alloc_ext && m->comp) {
  254. m->ext = strdup(ext + 4);
  255. if (!m->ext) {
  256. free((void *) m->name);
  257. return -ENOMEM;
  258. }
  259. }
  260. return 0;
  261. }
  262. /*
  263. * Global list of open DSOs and the counter.
  264. */
  265. static LIST_HEAD(dso__data_open);
  266. static long dso__data_open_cnt;
  267. static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
  268. static void dso__list_add(struct dso *dso)
  269. {
  270. list_add_tail(&dso->data.open_entry, &dso__data_open);
  271. dso__data_open_cnt++;
  272. }
  273. static void dso__list_del(struct dso *dso)
  274. {
  275. list_del(&dso->data.open_entry);
  276. WARN_ONCE(dso__data_open_cnt <= 0,
  277. "DSO data fd counter out of bounds.");
  278. dso__data_open_cnt--;
  279. }
  280. static void close_first_dso(void);
  281. static int do_open(char *name)
  282. {
  283. int fd;
  284. char sbuf[STRERR_BUFSIZE];
  285. do {
  286. fd = open(name, O_RDONLY);
  287. if (fd >= 0)
  288. return fd;
  289. pr_debug("dso open failed: %s\n",
  290. str_error_r(errno, sbuf, sizeof(sbuf)));
  291. if (!dso__data_open_cnt || errno != EMFILE)
  292. break;
  293. close_first_dso();
  294. } while (1);
  295. return -1;
  296. }
  297. static int __open_dso(struct dso *dso, struct machine *machine)
  298. {
  299. int fd;
  300. char *root_dir = (char *)"";
  301. char *name = malloc(PATH_MAX);
  302. if (!name)
  303. return -ENOMEM;
  304. if (machine)
  305. root_dir = machine->root_dir;
  306. if (dso__read_binary_type_filename(dso, dso->binary_type,
  307. root_dir, name, PATH_MAX)) {
  308. free(name);
  309. return -EINVAL;
  310. }
  311. if (!is_regular_file(name))
  312. return -EINVAL;
  313. fd = do_open(name);
  314. free(name);
  315. return fd;
  316. }
  317. static void check_data_close(void);
  318. /**
  319. * dso_close - Open DSO data file
  320. * @dso: dso object
  321. *
  322. * Open @dso's data file descriptor and updates
  323. * list/count of open DSO objects.
  324. */
  325. static int open_dso(struct dso *dso, struct machine *machine)
  326. {
  327. int fd = __open_dso(dso, machine);
  328. if (fd >= 0) {
  329. dso__list_add(dso);
  330. /*
  331. * Check if we crossed the allowed number
  332. * of opened DSOs and close one if needed.
  333. */
  334. check_data_close();
  335. }
  336. return fd;
  337. }
  338. static void close_data_fd(struct dso *dso)
  339. {
  340. if (dso->data.fd >= 0) {
  341. close(dso->data.fd);
  342. dso->data.fd = -1;
  343. dso->data.file_size = 0;
  344. dso__list_del(dso);
  345. }
  346. }
  347. /**
  348. * dso_close - Close DSO data file
  349. * @dso: dso object
  350. *
  351. * Close @dso's data file descriptor and updates
  352. * list/count of open DSO objects.
  353. */
  354. static void close_dso(struct dso *dso)
  355. {
  356. close_data_fd(dso);
  357. }
  358. static void close_first_dso(void)
  359. {
  360. struct dso *dso;
  361. dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
  362. close_dso(dso);
  363. }
  364. static rlim_t get_fd_limit(void)
  365. {
  366. struct rlimit l;
  367. rlim_t limit = 0;
  368. /* Allow half of the current open fd limit. */
  369. if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
  370. if (l.rlim_cur == RLIM_INFINITY)
  371. limit = l.rlim_cur;
  372. else
  373. limit = l.rlim_cur / 2;
  374. } else {
  375. pr_err("failed to get fd limit\n");
  376. limit = 1;
  377. }
  378. return limit;
  379. }
  380. static rlim_t fd_limit;
  381. /*
  382. * Used only by tests/dso-data.c to reset the environment
  383. * for tests. I dont expect we should change this during
  384. * standard runtime.
  385. */
  386. void reset_fd_limit(void)
  387. {
  388. fd_limit = 0;
  389. }
  390. static bool may_cache_fd(void)
  391. {
  392. if (!fd_limit)
  393. fd_limit = get_fd_limit();
  394. if (fd_limit == RLIM_INFINITY)
  395. return true;
  396. return fd_limit > (rlim_t) dso__data_open_cnt;
  397. }
  398. /*
  399. * Check and close LRU dso if we crossed allowed limit
  400. * for opened dso file descriptors. The limit is half
  401. * of the RLIMIT_NOFILE files opened.
  402. */
  403. static void check_data_close(void)
  404. {
  405. bool cache_fd = may_cache_fd();
  406. if (!cache_fd)
  407. close_first_dso();
  408. }
  409. /**
  410. * dso__data_close - Close DSO data file
  411. * @dso: dso object
  412. *
  413. * External interface to close @dso's data file descriptor.
  414. */
  415. void dso__data_close(struct dso *dso)
  416. {
  417. pthread_mutex_lock(&dso__data_open_lock);
  418. close_dso(dso);
  419. pthread_mutex_unlock(&dso__data_open_lock);
  420. }
  421. static void try_to_open_dso(struct dso *dso, struct machine *machine)
  422. {
  423. enum dso_binary_type binary_type_data[] = {
  424. DSO_BINARY_TYPE__BUILD_ID_CACHE,
  425. DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
  426. DSO_BINARY_TYPE__NOT_FOUND,
  427. };
  428. int i = 0;
  429. if (dso->data.fd >= 0)
  430. return;
  431. if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
  432. dso->data.fd = open_dso(dso, machine);
  433. goto out;
  434. }
  435. do {
  436. dso->binary_type = binary_type_data[i++];
  437. dso->data.fd = open_dso(dso, machine);
  438. if (dso->data.fd >= 0)
  439. goto out;
  440. } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
  441. out:
  442. if (dso->data.fd >= 0)
  443. dso->data.status = DSO_DATA_STATUS_OK;
  444. else
  445. dso->data.status = DSO_DATA_STATUS_ERROR;
  446. }
  447. /**
  448. * dso__data_get_fd - Get dso's data file descriptor
  449. * @dso: dso object
  450. * @machine: machine object
  451. *
  452. * External interface to find dso's file, open it and
  453. * returns file descriptor. It should be paired with
  454. * dso__data_put_fd() if it returns non-negative value.
  455. */
  456. int dso__data_get_fd(struct dso *dso, struct machine *machine)
  457. {
  458. if (dso->data.status == DSO_DATA_STATUS_ERROR)
  459. return -1;
  460. if (pthread_mutex_lock(&dso__data_open_lock) < 0)
  461. return -1;
  462. try_to_open_dso(dso, machine);
  463. if (dso->data.fd < 0)
  464. pthread_mutex_unlock(&dso__data_open_lock);
  465. return dso->data.fd;
  466. }
  467. void dso__data_put_fd(struct dso *dso __maybe_unused)
  468. {
  469. pthread_mutex_unlock(&dso__data_open_lock);
  470. }
  471. bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
  472. {
  473. u32 flag = 1 << by;
  474. if (dso->data.status_seen & flag)
  475. return true;
  476. dso->data.status_seen |= flag;
  477. return false;
  478. }
  479. static void
  480. dso_cache__free(struct dso *dso)
  481. {
  482. struct rb_root *root = &dso->data.cache;
  483. struct rb_node *next = rb_first(root);
  484. pthread_mutex_lock(&dso->lock);
  485. while (next) {
  486. struct dso_cache *cache;
  487. cache = rb_entry(next, struct dso_cache, rb_node);
  488. next = rb_next(&cache->rb_node);
  489. rb_erase(&cache->rb_node, root);
  490. free(cache);
  491. }
  492. pthread_mutex_unlock(&dso->lock);
  493. }
  494. static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
  495. {
  496. const struct rb_root *root = &dso->data.cache;
  497. struct rb_node * const *p = &root->rb_node;
  498. const struct rb_node *parent = NULL;
  499. struct dso_cache *cache;
  500. while (*p != NULL) {
  501. u64 end;
  502. parent = *p;
  503. cache = rb_entry(parent, struct dso_cache, rb_node);
  504. end = cache->offset + DSO__DATA_CACHE_SIZE;
  505. if (offset < cache->offset)
  506. p = &(*p)->rb_left;
  507. else if (offset >= end)
  508. p = &(*p)->rb_right;
  509. else
  510. return cache;
  511. }
  512. return NULL;
  513. }
  514. static struct dso_cache *
  515. dso_cache__insert(struct dso *dso, struct dso_cache *new)
  516. {
  517. struct rb_root *root = &dso->data.cache;
  518. struct rb_node **p = &root->rb_node;
  519. struct rb_node *parent = NULL;
  520. struct dso_cache *cache;
  521. u64 offset = new->offset;
  522. pthread_mutex_lock(&dso->lock);
  523. while (*p != NULL) {
  524. u64 end;
  525. parent = *p;
  526. cache = rb_entry(parent, struct dso_cache, rb_node);
  527. end = cache->offset + DSO__DATA_CACHE_SIZE;
  528. if (offset < cache->offset)
  529. p = &(*p)->rb_left;
  530. else if (offset >= end)
  531. p = &(*p)->rb_right;
  532. else
  533. goto out;
  534. }
  535. rb_link_node(&new->rb_node, parent, p);
  536. rb_insert_color(&new->rb_node, root);
  537. cache = NULL;
  538. out:
  539. pthread_mutex_unlock(&dso->lock);
  540. return cache;
  541. }
  542. static ssize_t
  543. dso_cache__memcpy(struct dso_cache *cache, u64 offset,
  544. u8 *data, u64 size)
  545. {
  546. u64 cache_offset = offset - cache->offset;
  547. u64 cache_size = min(cache->size - cache_offset, size);
  548. memcpy(data, cache->data + cache_offset, cache_size);
  549. return cache_size;
  550. }
  551. static ssize_t
  552. dso_cache__read(struct dso *dso, struct machine *machine,
  553. u64 offset, u8 *data, ssize_t size)
  554. {
  555. struct dso_cache *cache;
  556. struct dso_cache *old;
  557. ssize_t ret;
  558. do {
  559. u64 cache_offset;
  560. cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
  561. if (!cache)
  562. return -ENOMEM;
  563. pthread_mutex_lock(&dso__data_open_lock);
  564. /*
  565. * dso->data.fd might be closed if other thread opened another
  566. * file (dso) due to open file limit (RLIMIT_NOFILE).
  567. */
  568. try_to_open_dso(dso, machine);
  569. if (dso->data.fd < 0) {
  570. ret = -errno;
  571. dso->data.status = DSO_DATA_STATUS_ERROR;
  572. break;
  573. }
  574. cache_offset = offset & DSO__DATA_CACHE_MASK;
  575. ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
  576. if (ret <= 0)
  577. break;
  578. cache->offset = cache_offset;
  579. cache->size = ret;
  580. } while (0);
  581. pthread_mutex_unlock(&dso__data_open_lock);
  582. if (ret > 0) {
  583. old = dso_cache__insert(dso, cache);
  584. if (old) {
  585. /* we lose the race */
  586. free(cache);
  587. cache = old;
  588. }
  589. ret = dso_cache__memcpy(cache, offset, data, size);
  590. }
  591. if (ret <= 0)
  592. free(cache);
  593. return ret;
  594. }
  595. static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
  596. u64 offset, u8 *data, ssize_t size)
  597. {
  598. struct dso_cache *cache;
  599. cache = dso_cache__find(dso, offset);
  600. if (cache)
  601. return dso_cache__memcpy(cache, offset, data, size);
  602. else
  603. return dso_cache__read(dso, machine, offset, data, size);
  604. }
  605. /*
  606. * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
  607. * in the rb_tree. Any read to already cached data is served
  608. * by cached data.
  609. */
  610. static ssize_t cached_read(struct dso *dso, struct machine *machine,
  611. u64 offset, u8 *data, ssize_t size)
  612. {
  613. ssize_t r = 0;
  614. u8 *p = data;
  615. do {
  616. ssize_t ret;
  617. ret = dso_cache_read(dso, machine, offset, p, size);
  618. if (ret < 0)
  619. return ret;
  620. /* Reached EOF, return what we have. */
  621. if (!ret)
  622. break;
  623. BUG_ON(ret > size);
  624. r += ret;
  625. p += ret;
  626. offset += ret;
  627. size -= ret;
  628. } while (size);
  629. return r;
  630. }
  631. static int data_file_size(struct dso *dso, struct machine *machine)
  632. {
  633. int ret = 0;
  634. struct stat st;
  635. char sbuf[STRERR_BUFSIZE];
  636. if (dso->data.file_size)
  637. return 0;
  638. if (dso->data.status == DSO_DATA_STATUS_ERROR)
  639. return -1;
  640. pthread_mutex_lock(&dso__data_open_lock);
  641. /*
  642. * dso->data.fd might be closed if other thread opened another
  643. * file (dso) due to open file limit (RLIMIT_NOFILE).
  644. */
  645. try_to_open_dso(dso, machine);
  646. if (dso->data.fd < 0) {
  647. ret = -errno;
  648. dso->data.status = DSO_DATA_STATUS_ERROR;
  649. goto out;
  650. }
  651. if (fstat(dso->data.fd, &st) < 0) {
  652. ret = -errno;
  653. pr_err("dso cache fstat failed: %s\n",
  654. str_error_r(errno, sbuf, sizeof(sbuf)));
  655. dso->data.status = DSO_DATA_STATUS_ERROR;
  656. goto out;
  657. }
  658. dso->data.file_size = st.st_size;
  659. out:
  660. pthread_mutex_unlock(&dso__data_open_lock);
  661. return ret;
  662. }
  663. /**
  664. * dso__data_size - Return dso data size
  665. * @dso: dso object
  666. * @machine: machine object
  667. *
  668. * Return: dso data size
  669. */
  670. off_t dso__data_size(struct dso *dso, struct machine *machine)
  671. {
  672. if (data_file_size(dso, machine))
  673. return -1;
  674. /* For now just estimate dso data size is close to file size */
  675. return dso->data.file_size;
  676. }
  677. static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
  678. u64 offset, u8 *data, ssize_t size)
  679. {
  680. if (data_file_size(dso, machine))
  681. return -1;
  682. /* Check the offset sanity. */
  683. if (offset > dso->data.file_size)
  684. return -1;
  685. if (offset + size < offset)
  686. return -1;
  687. return cached_read(dso, machine, offset, data, size);
  688. }
  689. /**
  690. * dso__data_read_offset - Read data from dso file offset
  691. * @dso: dso object
  692. * @machine: machine object
  693. * @offset: file offset
  694. * @data: buffer to store data
  695. * @size: size of the @data buffer
  696. *
  697. * External interface to read data from dso file offset. Open
  698. * dso data file and use cached_read to get the data.
  699. */
  700. ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
  701. u64 offset, u8 *data, ssize_t size)
  702. {
  703. if (dso->data.status == DSO_DATA_STATUS_ERROR)
  704. return -1;
  705. return data_read_offset(dso, machine, offset, data, size);
  706. }
  707. /**
  708. * dso__data_read_addr - Read data from dso address
  709. * @dso: dso object
  710. * @machine: machine object
  711. * @add: virtual memory address
  712. * @data: buffer to store data
  713. * @size: size of the @data buffer
  714. *
  715. * External interface to read data from dso address.
  716. */
  717. ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
  718. struct machine *machine, u64 addr,
  719. u8 *data, ssize_t size)
  720. {
  721. u64 offset = map->map_ip(map, addr);
  722. return dso__data_read_offset(dso, machine, offset, data, size);
  723. }
  724. struct map *dso__new_map(const char *name)
  725. {
  726. struct map *map = NULL;
  727. struct dso *dso = dso__new(name);
  728. if (dso)
  729. map = map__new2(0, dso, MAP__FUNCTION);
  730. return map;
  731. }
  732. struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
  733. const char *short_name, int dso_type)
  734. {
  735. /*
  736. * The kernel dso could be created by build_id processing.
  737. */
  738. struct dso *dso = machine__findnew_dso(machine, name);
  739. /*
  740. * We need to run this in all cases, since during the build_id
  741. * processing we had no idea this was the kernel dso.
  742. */
  743. if (dso != NULL) {
  744. dso__set_short_name(dso, short_name, false);
  745. dso->kernel = dso_type;
  746. }
  747. return dso;
  748. }
  749. /*
  750. * Find a matching entry and/or link current entry to RB tree.
  751. * Either one of the dso or name parameter must be non-NULL or the
  752. * function will not work.
  753. */
  754. static struct dso *__dso__findlink_by_longname(struct rb_root *root,
  755. struct dso *dso, const char *name)
  756. {
  757. struct rb_node **p = &root->rb_node;
  758. struct rb_node *parent = NULL;
  759. if (!name)
  760. name = dso->long_name;
  761. /*
  762. * Find node with the matching name
  763. */
  764. while (*p) {
  765. struct dso *this = rb_entry(*p, struct dso, rb_node);
  766. int rc = strcmp(name, this->long_name);
  767. parent = *p;
  768. if (rc == 0) {
  769. /*
  770. * In case the new DSO is a duplicate of an existing
  771. * one, print an one-time warning & put the new entry
  772. * at the end of the list of duplicates.
  773. */
  774. if (!dso || (dso == this))
  775. return this; /* Find matching dso */
  776. /*
  777. * The core kernel DSOs may have duplicated long name.
  778. * In this case, the short name should be different.
  779. * Comparing the short names to differentiate the DSOs.
  780. */
  781. rc = strcmp(dso->short_name, this->short_name);
  782. if (rc == 0) {
  783. pr_err("Duplicated dso name: %s\n", name);
  784. return NULL;
  785. }
  786. }
  787. if (rc < 0)
  788. p = &parent->rb_left;
  789. else
  790. p = &parent->rb_right;
  791. }
  792. if (dso) {
  793. /* Add new node and rebalance tree */
  794. rb_link_node(&dso->rb_node, parent, p);
  795. rb_insert_color(&dso->rb_node, root);
  796. dso->root = root;
  797. }
  798. return NULL;
  799. }
  800. static inline struct dso *__dso__find_by_longname(struct rb_root *root,
  801. const char *name)
  802. {
  803. return __dso__findlink_by_longname(root, NULL, name);
  804. }
  805. void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
  806. {
  807. struct rb_root *root = dso->root;
  808. if (name == NULL)
  809. return;
  810. if (dso->long_name_allocated)
  811. free((char *)dso->long_name);
  812. if (root) {
  813. rb_erase(&dso->rb_node, root);
  814. /*
  815. * __dso__findlink_by_longname() isn't guaranteed to add it
  816. * back, so a clean removal is required here.
  817. */
  818. RB_CLEAR_NODE(&dso->rb_node);
  819. dso->root = NULL;
  820. }
  821. dso->long_name = name;
  822. dso->long_name_len = strlen(name);
  823. dso->long_name_allocated = name_allocated;
  824. if (root)
  825. __dso__findlink_by_longname(root, dso, NULL);
  826. }
  827. void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
  828. {
  829. if (name == NULL)
  830. return;
  831. if (dso->short_name_allocated)
  832. free((char *)dso->short_name);
  833. dso->short_name = name;
  834. dso->short_name_len = strlen(name);
  835. dso->short_name_allocated = name_allocated;
  836. }
  837. static void dso__set_basename(struct dso *dso)
  838. {
  839. /*
  840. * basename() may modify path buffer, so we must pass
  841. * a copy.
  842. */
  843. char *base, *lname = strdup(dso->long_name);
  844. if (!lname)
  845. return;
  846. /*
  847. * basename() may return a pointer to internal
  848. * storage which is reused in subsequent calls
  849. * so copy the result.
  850. */
  851. base = strdup(basename(lname));
  852. free(lname);
  853. if (!base)
  854. return;
  855. dso__set_short_name(dso, base, true);
  856. }
  857. int dso__name_len(const struct dso *dso)
  858. {
  859. if (!dso)
  860. return strlen("[unknown]");
  861. if (verbose)
  862. return dso->long_name_len;
  863. return dso->short_name_len;
  864. }
  865. bool dso__loaded(const struct dso *dso, enum map_type type)
  866. {
  867. return dso->loaded & (1 << type);
  868. }
  869. bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
  870. {
  871. return dso->sorted_by_name & (1 << type);
  872. }
  873. void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
  874. {
  875. dso->sorted_by_name |= (1 << type);
  876. }
  877. struct dso *dso__new(const char *name)
  878. {
  879. struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
  880. if (dso != NULL) {
  881. int i;
  882. strcpy(dso->name, name);
  883. dso__set_long_name(dso, dso->name, false);
  884. dso__set_short_name(dso, dso->name, false);
  885. for (i = 0; i < MAP__NR_TYPES; ++i)
  886. dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
  887. dso->data.cache = RB_ROOT;
  888. dso->data.fd = -1;
  889. dso->data.status = DSO_DATA_STATUS_UNKNOWN;
  890. dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
  891. dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
  892. dso->is_64_bit = (sizeof(void *) == 8);
  893. dso->loaded = 0;
  894. dso->rel = 0;
  895. dso->sorted_by_name = 0;
  896. dso->has_build_id = 0;
  897. dso->has_srcline = 1;
  898. dso->a2l_fails = 1;
  899. dso->kernel = DSO_TYPE_USER;
  900. dso->needs_swap = DSO_SWAP__UNSET;
  901. RB_CLEAR_NODE(&dso->rb_node);
  902. dso->root = NULL;
  903. INIT_LIST_HEAD(&dso->node);
  904. INIT_LIST_HEAD(&dso->data.open_entry);
  905. pthread_mutex_init(&dso->lock, NULL);
  906. atomic_set(&dso->refcnt, 1);
  907. }
  908. return dso;
  909. }
  910. void dso__delete(struct dso *dso)
  911. {
  912. int i;
  913. if (!RB_EMPTY_NODE(&dso->rb_node))
  914. pr_err("DSO %s is still in rbtree when being deleted!\n",
  915. dso->long_name);
  916. for (i = 0; i < MAP__NR_TYPES; ++i)
  917. symbols__delete(&dso->symbols[i]);
  918. if (dso->short_name_allocated) {
  919. zfree((char **)&dso->short_name);
  920. dso->short_name_allocated = false;
  921. }
  922. if (dso->long_name_allocated) {
  923. zfree((char **)&dso->long_name);
  924. dso->long_name_allocated = false;
  925. }
  926. dso__data_close(dso);
  927. auxtrace_cache__free(dso->auxtrace_cache);
  928. dso_cache__free(dso);
  929. dso__free_a2l(dso);
  930. zfree(&dso->symsrc_filename);
  931. pthread_mutex_destroy(&dso->lock);
  932. free(dso);
  933. }
  934. struct dso *dso__get(struct dso *dso)
  935. {
  936. if (dso)
  937. atomic_inc(&dso->refcnt);
  938. return dso;
  939. }
  940. void dso__put(struct dso *dso)
  941. {
  942. if (dso && atomic_dec_and_test(&dso->refcnt))
  943. dso__delete(dso);
  944. }
  945. void dso__set_build_id(struct dso *dso, void *build_id)
  946. {
  947. memcpy(dso->build_id, build_id, sizeof(dso->build_id));
  948. dso->has_build_id = 1;
  949. }
  950. bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
  951. {
  952. return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
  953. }
  954. void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
  955. {
  956. char path[PATH_MAX];
  957. if (machine__is_default_guest(machine))
  958. return;
  959. sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
  960. if (sysfs__read_build_id(path, dso->build_id,
  961. sizeof(dso->build_id)) == 0)
  962. dso->has_build_id = true;
  963. }
  964. int dso__kernel_module_get_build_id(struct dso *dso,
  965. const char *root_dir)
  966. {
  967. char filename[PATH_MAX];
  968. /*
  969. * kernel module short names are of the form "[module]" and
  970. * we need just "module" here.
  971. */
  972. const char *name = dso->short_name + 1;
  973. snprintf(filename, sizeof(filename),
  974. "%s/sys/module/%.*s/notes/.note.gnu.build-id",
  975. root_dir, (int)strlen(name) - 1, name);
  976. if (sysfs__read_build_id(filename, dso->build_id,
  977. sizeof(dso->build_id)) == 0)
  978. dso->has_build_id = true;
  979. return 0;
  980. }
  981. bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
  982. {
  983. bool have_build_id = false;
  984. struct dso *pos;
  985. list_for_each_entry(pos, head, node) {
  986. if (with_hits && !pos->hit && !dso__is_vdso(pos))
  987. continue;
  988. if (pos->has_build_id) {
  989. have_build_id = true;
  990. continue;
  991. }
  992. if (filename__read_build_id(pos->long_name, pos->build_id,
  993. sizeof(pos->build_id)) > 0) {
  994. have_build_id = true;
  995. pos->has_build_id = true;
  996. }
  997. }
  998. return have_build_id;
  999. }
  1000. void __dsos__add(struct dsos *dsos, struct dso *dso)
  1001. {
  1002. list_add_tail(&dso->node, &dsos->head);
  1003. __dso__findlink_by_longname(&dsos->root, dso, NULL);
  1004. /*
  1005. * It is now in the linked list, grab a reference, then garbage collect
  1006. * this when needing memory, by looking at LRU dso instances in the
  1007. * list with atomic_read(&dso->refcnt) == 1, i.e. no references
  1008. * anywhere besides the one for the list, do, under a lock for the
  1009. * list: remove it from the list, then a dso__put(), that probably will
  1010. * be the last and will then call dso__delete(), end of life.
  1011. *
  1012. * That, or at the end of the 'struct machine' lifetime, when all
  1013. * 'struct dso' instances will be removed from the list, in
  1014. * dsos__exit(), if they have no other reference from some other data
  1015. * structure.
  1016. *
  1017. * E.g.: after processing a 'perf.data' file and storing references
  1018. * to objects instantiated while processing events, we will have
  1019. * references to the 'thread', 'map', 'dso' structs all from 'struct
  1020. * hist_entry' instances, but we may not need anything not referenced,
  1021. * so we might as well call machines__exit()/machines__delete() and
  1022. * garbage collect it.
  1023. */
  1024. dso__get(dso);
  1025. }
  1026. void dsos__add(struct dsos *dsos, struct dso *dso)
  1027. {
  1028. pthread_rwlock_wrlock(&dsos->lock);
  1029. __dsos__add(dsos, dso);
  1030. pthread_rwlock_unlock(&dsos->lock);
  1031. }
  1032. struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
  1033. {
  1034. struct dso *pos;
  1035. if (cmp_short) {
  1036. list_for_each_entry(pos, &dsos->head, node)
  1037. if (strcmp(pos->short_name, name) == 0)
  1038. return pos;
  1039. return NULL;
  1040. }
  1041. return __dso__find_by_longname(&dsos->root, name);
  1042. }
  1043. struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
  1044. {
  1045. struct dso *dso;
  1046. pthread_rwlock_rdlock(&dsos->lock);
  1047. dso = __dsos__find(dsos, name, cmp_short);
  1048. pthread_rwlock_unlock(&dsos->lock);
  1049. return dso;
  1050. }
  1051. struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
  1052. {
  1053. struct dso *dso = dso__new(name);
  1054. if (dso != NULL) {
  1055. __dsos__add(dsos, dso);
  1056. dso__set_basename(dso);
  1057. /* Put dso here because __dsos_add already got it */
  1058. dso__put(dso);
  1059. }
  1060. return dso;
  1061. }
  1062. struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
  1063. {
  1064. struct dso *dso = __dsos__find(dsos, name, false);
  1065. return dso ? dso : __dsos__addnew(dsos, name);
  1066. }
  1067. struct dso *dsos__findnew(struct dsos *dsos, const char *name)
  1068. {
  1069. struct dso *dso;
  1070. pthread_rwlock_wrlock(&dsos->lock);
  1071. dso = dso__get(__dsos__findnew(dsos, name));
  1072. pthread_rwlock_unlock(&dsos->lock);
  1073. return dso;
  1074. }
  1075. size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
  1076. bool (skip)(struct dso *dso, int parm), int parm)
  1077. {
  1078. struct dso *pos;
  1079. size_t ret = 0;
  1080. list_for_each_entry(pos, head, node) {
  1081. if (skip && skip(pos, parm))
  1082. continue;
  1083. ret += dso__fprintf_buildid(pos, fp);
  1084. ret += fprintf(fp, " %s\n", pos->long_name);
  1085. }
  1086. return ret;
  1087. }
  1088. size_t __dsos__fprintf(struct list_head *head, FILE *fp)
  1089. {
  1090. struct dso *pos;
  1091. size_t ret = 0;
  1092. list_for_each_entry(pos, head, node) {
  1093. int i;
  1094. for (i = 0; i < MAP__NR_TYPES; ++i)
  1095. ret += dso__fprintf(pos, i, fp);
  1096. }
  1097. return ret;
  1098. }
  1099. size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
  1100. {
  1101. char sbuild_id[SBUILD_ID_SIZE];
  1102. build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
  1103. return fprintf(fp, "%s", sbuild_id);
  1104. }
  1105. size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
  1106. {
  1107. struct rb_node *nd;
  1108. size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
  1109. if (dso->short_name != dso->long_name)
  1110. ret += fprintf(fp, "%s, ", dso->long_name);
  1111. ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
  1112. dso__loaded(dso, type) ? "" : "NOT ");
  1113. ret += dso__fprintf_buildid(dso, fp);
  1114. ret += fprintf(fp, ")\n");
  1115. for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
  1116. struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
  1117. ret += symbol__fprintf(pos, fp);
  1118. }
  1119. return ret;
  1120. }
  1121. enum dso_type dso__type(struct dso *dso, struct machine *machine)
  1122. {
  1123. int fd;
  1124. enum dso_type type = DSO__TYPE_UNKNOWN;
  1125. fd = dso__data_get_fd(dso, machine);
  1126. if (fd >= 0) {
  1127. type = dso__type_fd(fd);
  1128. dso__data_put_fd(dso);
  1129. }
  1130. return type;
  1131. }
  1132. int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
  1133. {
  1134. int idx, errnum = dso->load_errno;
  1135. /*
  1136. * This must have a same ordering as the enum dso_load_errno.
  1137. */
  1138. static const char *dso_load__error_str[] = {
  1139. "Internal tools/perf/ library error",
  1140. "Invalid ELF file",
  1141. "Can not read build id",
  1142. "Mismatching build id",
  1143. "Decompression failure",
  1144. };
  1145. BUG_ON(buflen == 0);
  1146. if (errnum >= 0) {
  1147. const char *err = str_error_r(errnum, buf, buflen);
  1148. if (err != buf)
  1149. scnprintf(buf, buflen, "%s", err);
  1150. return 0;
  1151. }
  1152. if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
  1153. return -1;
  1154. idx = errnum - __DSO_LOAD_ERRNO__START;
  1155. scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
  1156. return 0;
  1157. }