map.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877
  1. #include "symbol.h"
  2. #include <errno.h>
  3. #include <inttypes.h>
  4. #include <limits.h>
  5. #include <stdlib.h>
  6. #include <string.h>
  7. #include <stdio.h>
  8. #include <unistd.h>
  9. #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
  10. #include "map.h"
  11. #include "thread.h"
  12. #include "strlist.h"
  13. #include "vdso.h"
  14. #include "build-id.h"
  15. #include "util.h"
  16. #include "debug.h"
  17. #include "machine.h"
  18. #include <linux/string.h>
  19. #include "unwind.h"
  20. static void __maps__insert(struct maps *maps, struct map *map);
  21. const char *map_type__name[MAP__NR_TYPES] = {
  22. [MAP__FUNCTION] = "Functions",
  23. [MAP__VARIABLE] = "Variables",
  24. };
  25. static inline int is_anon_memory(const char *filename, u32 flags)
  26. {
  27. return flags & MAP_HUGETLB ||
  28. !strcmp(filename, "//anon") ||
  29. !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) ||
  30. !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
  31. }
  32. static inline int is_no_dso_memory(const char *filename)
  33. {
  34. return !strncmp(filename, "[stack", 6) ||
  35. !strncmp(filename, "/SYSV",5) ||
  36. !strcmp(filename, "[heap]");
  37. }
  38. static inline int is_android_lib(const char *filename)
  39. {
  40. return !strncmp(filename, "/data/app-lib", 13) ||
  41. !strncmp(filename, "/system/lib", 11);
  42. }
  43. static inline bool replace_android_lib(const char *filename, char *newfilename)
  44. {
  45. const char *libname;
  46. char *app_abi;
  47. size_t app_abi_length, new_length;
  48. size_t lib_length = 0;
  49. libname = strrchr(filename, '/');
  50. if (libname)
  51. lib_length = strlen(libname);
  52. app_abi = getenv("APP_ABI");
  53. if (!app_abi)
  54. return false;
  55. app_abi_length = strlen(app_abi);
  56. if (!strncmp(filename, "/data/app-lib", 13)) {
  57. char *apk_path;
  58. if (!app_abi_length)
  59. return false;
  60. new_length = 7 + app_abi_length + lib_length;
  61. apk_path = getenv("APK_PATH");
  62. if (apk_path) {
  63. new_length += strlen(apk_path) + 1;
  64. if (new_length > PATH_MAX)
  65. return false;
  66. snprintf(newfilename, new_length,
  67. "%s/libs/%s/%s", apk_path, app_abi, libname);
  68. } else {
  69. if (new_length > PATH_MAX)
  70. return false;
  71. snprintf(newfilename, new_length,
  72. "libs/%s/%s", app_abi, libname);
  73. }
  74. return true;
  75. }
  76. if (!strncmp(filename, "/system/lib/", 11)) {
  77. char *ndk, *app;
  78. const char *arch;
  79. size_t ndk_length;
  80. size_t app_length;
  81. ndk = getenv("NDK_ROOT");
  82. app = getenv("APP_PLATFORM");
  83. if (!(ndk && app))
  84. return false;
  85. ndk_length = strlen(ndk);
  86. app_length = strlen(app);
  87. if (!(ndk_length && app_length && app_abi_length))
  88. return false;
  89. arch = !strncmp(app_abi, "arm", 3) ? "arm" :
  90. !strncmp(app_abi, "mips", 4) ? "mips" :
  91. !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
  92. if (!arch)
  93. return false;
  94. new_length = 27 + ndk_length +
  95. app_length + lib_length
  96. + strlen(arch);
  97. if (new_length > PATH_MAX)
  98. return false;
  99. snprintf(newfilename, new_length,
  100. "%s/platforms/%s/arch-%s/usr/lib/%s",
  101. ndk, app, arch, libname);
  102. return true;
  103. }
  104. return false;
  105. }
  106. void map__init(struct map *map, enum map_type type,
  107. u64 start, u64 end, u64 pgoff, struct dso *dso)
  108. {
  109. map->type = type;
  110. map->start = start;
  111. map->end = end;
  112. map->pgoff = pgoff;
  113. map->reloc = 0;
  114. map->dso = dso__get(dso);
  115. map->map_ip = map__map_ip;
  116. map->unmap_ip = map__unmap_ip;
  117. RB_CLEAR_NODE(&map->rb_node);
  118. map->groups = NULL;
  119. map->erange_warned = false;
  120. atomic_set(&map->refcnt, 1);
  121. }
  122. struct map *map__new(struct machine *machine, u64 start, u64 len,
  123. u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
  124. u64 ino_gen, u32 prot, u32 flags, char *filename,
  125. enum map_type type, struct thread *thread)
  126. {
  127. struct map *map = malloc(sizeof(*map));
  128. if (map != NULL) {
  129. char newfilename[PATH_MAX];
  130. struct dso *dso;
  131. int anon, no_dso, vdso, android;
  132. android = is_android_lib(filename);
  133. anon = is_anon_memory(filename, flags);
  134. vdso = is_vdso_map(filename);
  135. no_dso = is_no_dso_memory(filename);
  136. map->maj = d_maj;
  137. map->min = d_min;
  138. map->ino = ino;
  139. map->ino_generation = ino_gen;
  140. map->prot = prot;
  141. map->flags = flags;
  142. if ((anon || no_dso) && type == MAP__FUNCTION) {
  143. snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
  144. filename = newfilename;
  145. }
  146. if (android) {
  147. if (replace_android_lib(filename, newfilename))
  148. filename = newfilename;
  149. }
  150. if (vdso) {
  151. pgoff = 0;
  152. dso = machine__findnew_vdso(machine, thread);
  153. } else
  154. dso = machine__findnew_dso(machine, filename);
  155. if (dso == NULL)
  156. goto out_delete;
  157. map__init(map, type, start, start + len, pgoff, dso);
  158. if (anon || no_dso) {
  159. map->map_ip = map->unmap_ip = identity__map_ip;
  160. /*
  161. * Set memory without DSO as loaded. All map__find_*
  162. * functions still return NULL, and we avoid the
  163. * unnecessary map__load warning.
  164. */
  165. if (type != MAP__FUNCTION)
  166. dso__set_loaded(dso, map->type);
  167. }
  168. dso__put(dso);
  169. }
  170. return map;
  171. out_delete:
  172. free(map);
  173. return NULL;
  174. }
  175. /*
  176. * Constructor variant for modules (where we know from /proc/modules where
  177. * they are loaded) and for vmlinux, where only after we load all the
  178. * symbols we'll know where it starts and ends.
  179. */
  180. struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
  181. {
  182. struct map *map = calloc(1, (sizeof(*map) +
  183. (dso->kernel ? sizeof(struct kmap) : 0)));
  184. if (map != NULL) {
  185. /*
  186. * ->end will be filled after we load all the symbols
  187. */
  188. map__init(map, type, start, 0, 0, dso);
  189. }
  190. return map;
  191. }
  192. /*
  193. * Use this and __map__is_kmodule() for map instances that are in
  194. * machine->kmaps, and thus have map->groups->machine all properly set, to
  195. * disambiguate between the kernel and modules.
  196. *
  197. * When the need arises, introduce map__is_{kernel,kmodule)() that
  198. * checks (map->groups != NULL && map->groups->machine != NULL &&
  199. * map->dso->kernel) before calling __map__is_{kernel,kmodule}())
  200. */
  201. bool __map__is_kernel(const struct map *map)
  202. {
  203. return __machine__kernel_map(map->groups->machine, map->type) == map;
  204. }
  205. static void map__exit(struct map *map)
  206. {
  207. BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
  208. dso__zput(map->dso);
  209. }
  210. void map__delete(struct map *map)
  211. {
  212. map__exit(map);
  213. free(map);
  214. }
  215. void map__put(struct map *map)
  216. {
  217. if (map && atomic_dec_and_test(&map->refcnt))
  218. map__delete(map);
  219. }
  220. void map__fixup_start(struct map *map)
  221. {
  222. struct rb_root *symbols = &map->dso->symbols[map->type];
  223. struct rb_node *nd = rb_first(symbols);
  224. if (nd != NULL) {
  225. struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
  226. map->start = sym->start;
  227. }
  228. }
  229. void map__fixup_end(struct map *map)
  230. {
  231. struct rb_root *symbols = &map->dso->symbols[map->type];
  232. struct rb_node *nd = rb_last(symbols);
  233. if (nd != NULL) {
  234. struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
  235. map->end = sym->end;
  236. }
  237. }
  238. #define DSO__DELETED "(deleted)"
  239. int map__load(struct map *map)
  240. {
  241. const char *name = map->dso->long_name;
  242. int nr;
  243. if (dso__loaded(map->dso, map->type))
  244. return 0;
  245. nr = dso__load(map->dso, map);
  246. if (nr < 0) {
  247. if (map->dso->has_build_id) {
  248. char sbuild_id[SBUILD_ID_SIZE];
  249. build_id__sprintf(map->dso->build_id,
  250. sizeof(map->dso->build_id),
  251. sbuild_id);
  252. pr_warning("%s with build id %s not found",
  253. name, sbuild_id);
  254. } else
  255. pr_warning("Failed to open %s", name);
  256. pr_warning(", continuing without symbols\n");
  257. return -1;
  258. } else if (nr == 0) {
  259. #ifdef HAVE_LIBELF_SUPPORT
  260. const size_t len = strlen(name);
  261. const size_t real_len = len - sizeof(DSO__DELETED);
  262. if (len > sizeof(DSO__DELETED) &&
  263. strcmp(name + real_len + 1, DSO__DELETED) == 0) {
  264. pr_warning("%.*s was updated (is prelink enabled?). "
  265. "Restart the long running apps that use it!\n",
  266. (int)real_len, name);
  267. } else {
  268. pr_warning("no symbols found in %s, maybe install "
  269. "a debug package?\n", name);
  270. }
  271. #endif
  272. return -1;
  273. }
  274. return 0;
  275. }
  276. int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
  277. {
  278. return strcmp(namea, nameb);
  279. }
  280. struct symbol *map__find_symbol(struct map *map, u64 addr)
  281. {
  282. if (map__load(map) < 0)
  283. return NULL;
  284. return dso__find_symbol(map->dso, map->type, addr);
  285. }
  286. struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
  287. {
  288. if (map__load(map) < 0)
  289. return NULL;
  290. if (!dso__sorted_by_name(map->dso, map->type))
  291. dso__sort_by_name(map->dso, map->type);
  292. return dso__find_symbol_by_name(map->dso, map->type, name);
  293. }
  294. struct map *map__clone(struct map *from)
  295. {
  296. struct map *map = memdup(from, sizeof(*map));
  297. if (map != NULL) {
  298. atomic_set(&map->refcnt, 1);
  299. RB_CLEAR_NODE(&map->rb_node);
  300. dso__get(map->dso);
  301. map->groups = NULL;
  302. }
  303. return map;
  304. }
  305. int map__overlap(struct map *l, struct map *r)
  306. {
  307. if (l->start > r->start) {
  308. struct map *t = l;
  309. l = r;
  310. r = t;
  311. }
  312. if (l->end > r->start)
  313. return 1;
  314. return 0;
  315. }
  316. size_t map__fprintf(struct map *map, FILE *fp)
  317. {
  318. return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
  319. map->start, map->end, map->pgoff, map->dso->name);
  320. }
  321. size_t map__fprintf_dsoname(struct map *map, FILE *fp)
  322. {
  323. const char *dsoname = "[unknown]";
  324. if (map && map->dso && (map->dso->name || map->dso->long_name)) {
  325. if (symbol_conf.show_kernel_path && map->dso->long_name)
  326. dsoname = map->dso->long_name;
  327. else if (map->dso->name)
  328. dsoname = map->dso->name;
  329. }
  330. return fprintf(fp, "%s", dsoname);
  331. }
  332. int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
  333. FILE *fp)
  334. {
  335. char *srcline;
  336. int ret = 0;
  337. if (map && map->dso) {
  338. srcline = get_srcline(map->dso,
  339. map__rip_2objdump(map, addr), NULL, true);
  340. if (srcline != SRCLINE_UNKNOWN)
  341. ret = fprintf(fp, "%s%s", prefix, srcline);
  342. free_srcline(srcline);
  343. }
  344. return ret;
  345. }
  346. /**
  347. * map__rip_2objdump - convert symbol start address to objdump address.
  348. * @map: memory map
  349. * @rip: symbol start address
  350. *
  351. * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
  352. * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
  353. * relative to section start.
  354. *
  355. * Return: Address suitable for passing to "objdump --start-address="
  356. */
  357. u64 map__rip_2objdump(struct map *map, u64 rip)
  358. {
  359. if (!map->dso->adjust_symbols)
  360. return rip;
  361. if (map->dso->rel)
  362. return rip - map->pgoff;
  363. /*
  364. * kernel modules also have DSO_TYPE_USER in dso->kernel,
  365. * but all kernel modules are ET_REL, so won't get here.
  366. */
  367. if (map->dso->kernel == DSO_TYPE_USER)
  368. return rip + map->dso->text_offset;
  369. return map->unmap_ip(map, rip) - map->reloc;
  370. }
  371. /**
  372. * map__objdump_2mem - convert objdump address to a memory address.
  373. * @map: memory map
  374. * @ip: objdump address
  375. *
  376. * Closely related to map__rip_2objdump(), this function takes an address from
  377. * objdump and converts it to a memory address. Note this assumes that @map
  378. * contains the address. To be sure the result is valid, check it forwards
  379. * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
  380. *
  381. * Return: Memory address.
  382. */
  383. u64 map__objdump_2mem(struct map *map, u64 ip)
  384. {
  385. if (!map->dso->adjust_symbols)
  386. return map->unmap_ip(map, ip);
  387. if (map->dso->rel)
  388. return map->unmap_ip(map, ip + map->pgoff);
  389. /*
  390. * kernel modules also have DSO_TYPE_USER in dso->kernel,
  391. * but all kernel modules are ET_REL, so won't get here.
  392. */
  393. if (map->dso->kernel == DSO_TYPE_USER)
  394. return map->unmap_ip(map, ip - map->dso->text_offset);
  395. return ip + map->reloc;
  396. }
  397. static void maps__init(struct maps *maps)
  398. {
  399. maps->entries = RB_ROOT;
  400. pthread_rwlock_init(&maps->lock, NULL);
  401. }
  402. void map_groups__init(struct map_groups *mg, struct machine *machine)
  403. {
  404. int i;
  405. for (i = 0; i < MAP__NR_TYPES; ++i) {
  406. maps__init(&mg->maps[i]);
  407. }
  408. mg->machine = machine;
  409. atomic_set(&mg->refcnt, 1);
  410. }
  411. static void __maps__purge(struct maps *maps)
  412. {
  413. struct rb_root *root = &maps->entries;
  414. struct rb_node *next = rb_first(root);
  415. while (next) {
  416. struct map *pos = rb_entry(next, struct map, rb_node);
  417. next = rb_next(&pos->rb_node);
  418. rb_erase_init(&pos->rb_node, root);
  419. map__put(pos);
  420. }
  421. }
  422. static void maps__exit(struct maps *maps)
  423. {
  424. pthread_rwlock_wrlock(&maps->lock);
  425. __maps__purge(maps);
  426. pthread_rwlock_unlock(&maps->lock);
  427. }
  428. void map_groups__exit(struct map_groups *mg)
  429. {
  430. int i;
  431. for (i = 0; i < MAP__NR_TYPES; ++i)
  432. maps__exit(&mg->maps[i]);
  433. }
  434. bool map_groups__empty(struct map_groups *mg)
  435. {
  436. int i;
  437. for (i = 0; i < MAP__NR_TYPES; ++i) {
  438. if (maps__first(&mg->maps[i]))
  439. return false;
  440. }
  441. return true;
  442. }
  443. struct map_groups *map_groups__new(struct machine *machine)
  444. {
  445. struct map_groups *mg = malloc(sizeof(*mg));
  446. if (mg != NULL)
  447. map_groups__init(mg, machine);
  448. return mg;
  449. }
  450. void map_groups__delete(struct map_groups *mg)
  451. {
  452. map_groups__exit(mg);
  453. free(mg);
  454. }
  455. void map_groups__put(struct map_groups *mg)
  456. {
  457. if (mg && atomic_dec_and_test(&mg->refcnt))
  458. map_groups__delete(mg);
  459. }
  460. struct symbol *map_groups__find_symbol(struct map_groups *mg,
  461. enum map_type type, u64 addr,
  462. struct map **mapp)
  463. {
  464. struct map *map = map_groups__find(mg, type, addr);
  465. /* Ensure map is loaded before using map->map_ip */
  466. if (map != NULL && map__load(map) >= 0) {
  467. if (mapp != NULL)
  468. *mapp = map;
  469. return map__find_symbol(map, map->map_ip(map, addr));
  470. }
  471. return NULL;
  472. }
  473. struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
  474. struct map **mapp)
  475. {
  476. struct symbol *sym;
  477. struct rb_node *nd;
  478. pthread_rwlock_rdlock(&maps->lock);
  479. for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
  480. struct map *pos = rb_entry(nd, struct map, rb_node);
  481. sym = map__find_symbol_by_name(pos, name);
  482. if (sym == NULL)
  483. continue;
  484. if (mapp != NULL)
  485. *mapp = pos;
  486. goto out;
  487. }
  488. sym = NULL;
  489. out:
  490. pthread_rwlock_unlock(&maps->lock);
  491. return sym;
  492. }
  493. struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
  494. enum map_type type,
  495. const char *name,
  496. struct map **mapp)
  497. {
  498. struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp);
  499. return sym;
  500. }
  501. int map_groups__find_ams(struct addr_map_symbol *ams)
  502. {
  503. if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
  504. if (ams->map->groups == NULL)
  505. return -1;
  506. ams->map = map_groups__find(ams->map->groups, ams->map->type,
  507. ams->addr);
  508. if (ams->map == NULL)
  509. return -1;
  510. }
  511. ams->al_addr = ams->map->map_ip(ams->map, ams->addr);
  512. ams->sym = map__find_symbol(ams->map, ams->al_addr);
  513. return ams->sym ? 0 : -1;
  514. }
  515. static size_t maps__fprintf(struct maps *maps, FILE *fp)
  516. {
  517. size_t printed = 0;
  518. struct rb_node *nd;
  519. pthread_rwlock_rdlock(&maps->lock);
  520. for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
  521. struct map *pos = rb_entry(nd, struct map, rb_node);
  522. printed += fprintf(fp, "Map:");
  523. printed += map__fprintf(pos, fp);
  524. if (verbose > 2) {
  525. printed += dso__fprintf(pos->dso, pos->type, fp);
  526. printed += fprintf(fp, "--\n");
  527. }
  528. }
  529. pthread_rwlock_unlock(&maps->lock);
  530. return printed;
  531. }
  532. size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
  533. FILE *fp)
  534. {
  535. size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
  536. return printed += maps__fprintf(&mg->maps[type], fp);
  537. }
  538. size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
  539. {
  540. size_t printed = 0, i;
  541. for (i = 0; i < MAP__NR_TYPES; ++i)
  542. printed += __map_groups__fprintf_maps(mg, i, fp);
  543. return printed;
  544. }
  545. static void __map_groups__insert(struct map_groups *mg, struct map *map)
  546. {
  547. __maps__insert(&mg->maps[map->type], map);
  548. map->groups = mg;
  549. }
  550. static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
  551. {
  552. struct rb_root *root;
  553. struct rb_node *next;
  554. int err = 0;
  555. pthread_rwlock_wrlock(&maps->lock);
  556. root = &maps->entries;
  557. next = rb_first(root);
  558. while (next) {
  559. struct map *pos = rb_entry(next, struct map, rb_node);
  560. next = rb_next(&pos->rb_node);
  561. if (!map__overlap(pos, map))
  562. continue;
  563. if (verbose >= 2) {
  564. fputs("overlapping maps:\n", fp);
  565. map__fprintf(map, fp);
  566. map__fprintf(pos, fp);
  567. }
  568. rb_erase_init(&pos->rb_node, root);
  569. /*
  570. * Now check if we need to create new maps for areas not
  571. * overlapped by the new map:
  572. */
  573. if (map->start > pos->start) {
  574. struct map *before = map__clone(pos);
  575. if (before == NULL) {
  576. err = -ENOMEM;
  577. goto put_map;
  578. }
  579. before->end = map->start;
  580. __map_groups__insert(pos->groups, before);
  581. if (verbose >= 2)
  582. map__fprintf(before, fp);
  583. map__put(before);
  584. }
  585. if (map->end < pos->end) {
  586. struct map *after = map__clone(pos);
  587. if (after == NULL) {
  588. err = -ENOMEM;
  589. goto put_map;
  590. }
  591. after->start = map->end;
  592. __map_groups__insert(pos->groups, after);
  593. if (verbose >= 2)
  594. map__fprintf(after, fp);
  595. map__put(after);
  596. }
  597. put_map:
  598. map__put(pos);
  599. if (err)
  600. goto out;
  601. }
  602. err = 0;
  603. out:
  604. pthread_rwlock_unlock(&maps->lock);
  605. return err;
  606. }
  607. int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
  608. FILE *fp)
  609. {
  610. return maps__fixup_overlappings(&mg->maps[map->type], map, fp);
  611. }
  612. /*
  613. * XXX This should not really _copy_ te maps, but refcount them.
  614. */
  615. int map_groups__clone(struct thread *thread,
  616. struct map_groups *parent, enum map_type type)
  617. {
  618. struct map_groups *mg = thread->mg;
  619. int err = -ENOMEM;
  620. struct map *map;
  621. struct maps *maps = &parent->maps[type];
  622. pthread_rwlock_rdlock(&maps->lock);
  623. for (map = maps__first(maps); map; map = map__next(map)) {
  624. struct map *new = map__clone(map);
  625. if (new == NULL)
  626. goto out_unlock;
  627. err = unwind__prepare_access(thread, new, NULL);
  628. if (err)
  629. goto out_unlock;
  630. map_groups__insert(mg, new);
  631. map__put(new);
  632. }
  633. err = 0;
  634. out_unlock:
  635. pthread_rwlock_unlock(&maps->lock);
  636. return err;
  637. }
  638. static void __maps__insert(struct maps *maps, struct map *map)
  639. {
  640. struct rb_node **p = &maps->entries.rb_node;
  641. struct rb_node *parent = NULL;
  642. const u64 ip = map->start;
  643. struct map *m;
  644. while (*p != NULL) {
  645. parent = *p;
  646. m = rb_entry(parent, struct map, rb_node);
  647. if (ip < m->start)
  648. p = &(*p)->rb_left;
  649. else
  650. p = &(*p)->rb_right;
  651. }
  652. rb_link_node(&map->rb_node, parent, p);
  653. rb_insert_color(&map->rb_node, &maps->entries);
  654. map__get(map);
  655. }
  656. void maps__insert(struct maps *maps, struct map *map)
  657. {
  658. pthread_rwlock_wrlock(&maps->lock);
  659. __maps__insert(maps, map);
  660. pthread_rwlock_unlock(&maps->lock);
  661. }
  662. static void __maps__remove(struct maps *maps, struct map *map)
  663. {
  664. rb_erase_init(&map->rb_node, &maps->entries);
  665. map__put(map);
  666. }
  667. void maps__remove(struct maps *maps, struct map *map)
  668. {
  669. pthread_rwlock_wrlock(&maps->lock);
  670. __maps__remove(maps, map);
  671. pthread_rwlock_unlock(&maps->lock);
  672. }
  673. struct map *maps__find(struct maps *maps, u64 ip)
  674. {
  675. struct rb_node **p, *parent = NULL;
  676. struct map *m;
  677. pthread_rwlock_rdlock(&maps->lock);
  678. p = &maps->entries.rb_node;
  679. while (*p != NULL) {
  680. parent = *p;
  681. m = rb_entry(parent, struct map, rb_node);
  682. if (ip < m->start)
  683. p = &(*p)->rb_left;
  684. else if (ip >= m->end)
  685. p = &(*p)->rb_right;
  686. else
  687. goto out;
  688. }
  689. m = NULL;
  690. out:
  691. pthread_rwlock_unlock(&maps->lock);
  692. return m;
  693. }
  694. struct map *maps__first(struct maps *maps)
  695. {
  696. struct rb_node *first = rb_first(&maps->entries);
  697. if (first)
  698. return rb_entry(first, struct map, rb_node);
  699. return NULL;
  700. }
  701. struct map *map__next(struct map *map)
  702. {
  703. struct rb_node *next = rb_next(&map->rb_node);
  704. if (next)
  705. return rb_entry(next, struct map, rb_node);
  706. return NULL;
  707. }
  708. struct kmap *map__kmap(struct map *map)
  709. {
  710. if (!map->dso || !map->dso->kernel) {
  711. pr_err("Internal error: map__kmap with a non-kernel map\n");
  712. return NULL;
  713. }
  714. return (struct kmap *)(map + 1);
  715. }
  716. struct map_groups *map__kmaps(struct map *map)
  717. {
  718. struct kmap *kmap = map__kmap(map);
  719. if (!kmap || !kmap->kmaps) {
  720. pr_err("Internal error: map__kmaps with a non-kernel map\n");
  721. return NULL;
  722. }
  723. return kmap->kmaps;
  724. }