libbpf.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449
  1. /*
  2. * Common eBPF ELF object loading operations.
  3. *
  4. * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
  5. * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  6. * Copyright (C) 2015 Huawei Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation;
  11. * version 2.1 of the License (not later!)
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with this program; if not, see <http://www.gnu.org/licenses>
  20. */
  21. #include <stdlib.h>
  22. #include <stdio.h>
  23. #include <stdarg.h>
  24. #include <inttypes.h>
  25. #include <string.h>
  26. #include <unistd.h>
  27. #include <fcntl.h>
  28. #include <errno.h>
  29. #include <asm/unistd.h>
  30. #include <linux/kernel.h>
  31. #include <linux/bpf.h>
  32. #include <linux/list.h>
  33. #include <libelf.h>
  34. #include <gelf.h>
  35. #include "libbpf.h"
  36. #include "bpf.h"
  37. #ifndef EM_BPF
  38. #define EM_BPF 247
  39. #endif
  40. #define __printf(a, b) __attribute__((format(printf, a, b)))
  41. __printf(1, 2)
  42. static int __base_pr(const char *format, ...)
  43. {
  44. va_list args;
  45. int err;
  46. va_start(args, format);
  47. err = vfprintf(stderr, format, args);
  48. va_end(args);
  49. return err;
  50. }
  51. static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
  52. static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
  53. static __printf(1, 2) libbpf_print_fn_t __pr_debug;
  54. #define __pr(func, fmt, ...) \
  55. do { \
  56. if ((func)) \
  57. (func)("libbpf: " fmt, ##__VA_ARGS__); \
  58. } while (0)
  59. #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
  60. #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
  61. #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
  62. void libbpf_set_print(libbpf_print_fn_t warn,
  63. libbpf_print_fn_t info,
  64. libbpf_print_fn_t debug)
  65. {
  66. __pr_warning = warn;
  67. __pr_info = info;
  68. __pr_debug = debug;
  69. }
  70. #define STRERR_BUFSIZE 128
  71. #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
  72. #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
  73. #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
  74. static const char *libbpf_strerror_table[NR_ERRNO] = {
  75. [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
  76. [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
  77. [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
  78. [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
  79. [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
  80. [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
  81. [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
  82. [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
  83. [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
  84. [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
  85. };
  86. int libbpf_strerror(int err, char *buf, size_t size)
  87. {
  88. if (!buf || !size)
  89. return -1;
  90. err = err > 0 ? err : -err;
  91. if (err < __LIBBPF_ERRNO__START) {
  92. int ret;
  93. ret = strerror_r(err, buf, size);
  94. buf[size - 1] = '\0';
  95. return ret;
  96. }
  97. if (err < __LIBBPF_ERRNO__END) {
  98. const char *msg;
  99. msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
  100. snprintf(buf, size, "%s", msg);
  101. buf[size - 1] = '\0';
  102. return 0;
  103. }
  104. snprintf(buf, size, "Unknown libbpf error %d", err);
  105. buf[size - 1] = '\0';
  106. return -1;
  107. }
  108. #define CHECK_ERR(action, err, out) do { \
  109. err = action; \
  110. if (err) \
  111. goto out; \
  112. } while(0)
  113. /* Copied from tools/perf/util/util.h */
  114. #ifndef zfree
  115. # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
  116. #endif
  117. #ifndef zclose
  118. # define zclose(fd) ({ \
  119. int ___err = 0; \
  120. if ((fd) >= 0) \
  121. ___err = close((fd)); \
  122. fd = -1; \
  123. ___err; })
  124. #endif
  125. #ifdef HAVE_LIBELF_MMAP_SUPPORT
  126. # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
  127. #else
  128. # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
  129. #endif
  130. /*
  131. * bpf_prog should be a better name but it has been used in
  132. * linux/filter.h.
  133. */
  134. struct bpf_program {
  135. /* Index in elf obj file, for relocation use. */
  136. int idx;
  137. char *section_name;
  138. struct bpf_insn *insns;
  139. size_t insns_cnt;
  140. enum bpf_prog_type type;
  141. struct {
  142. int insn_idx;
  143. int map_idx;
  144. } *reloc_desc;
  145. int nr_reloc;
  146. struct {
  147. int nr;
  148. int *fds;
  149. } instances;
  150. bpf_program_prep_t preprocessor;
  151. struct bpf_object *obj;
  152. void *priv;
  153. bpf_program_clear_priv_t clear_priv;
  154. };
  155. struct bpf_map {
  156. int fd;
  157. char *name;
  158. struct bpf_map_def def;
  159. void *priv;
  160. bpf_map_clear_priv_t clear_priv;
  161. };
  162. static LIST_HEAD(bpf_objects_list);
  163. struct bpf_object {
  164. char license[64];
  165. u32 kern_version;
  166. struct bpf_program *programs;
  167. size_t nr_programs;
  168. struct bpf_map *maps;
  169. size_t nr_maps;
  170. bool loaded;
  171. /*
  172. * Information when doing elf related work. Only valid if fd
  173. * is valid.
  174. */
  175. struct {
  176. int fd;
  177. void *obj_buf;
  178. size_t obj_buf_sz;
  179. Elf *elf;
  180. GElf_Ehdr ehdr;
  181. Elf_Data *symbols;
  182. size_t strtabidx;
  183. struct {
  184. GElf_Shdr shdr;
  185. Elf_Data *data;
  186. } *reloc;
  187. int nr_reloc;
  188. int maps_shndx;
  189. } efile;
  190. /*
  191. * All loaded bpf_object is linked in a list, which is
  192. * hidden to caller. bpf_objects__<func> handlers deal with
  193. * all objects.
  194. */
  195. struct list_head list;
  196. char path[];
  197. };
  198. #define obj_elf_valid(o) ((o)->efile.elf)
  199. static void bpf_program__unload(struct bpf_program *prog)
  200. {
  201. int i;
  202. if (!prog)
  203. return;
  204. /*
  205. * If the object is opened but the program was never loaded,
  206. * it is possible that prog->instances.nr == -1.
  207. */
  208. if (prog->instances.nr > 0) {
  209. for (i = 0; i < prog->instances.nr; i++)
  210. zclose(prog->instances.fds[i]);
  211. } else if (prog->instances.nr != -1) {
  212. pr_warning("Internal error: instances.nr is %d\n",
  213. prog->instances.nr);
  214. }
  215. prog->instances.nr = -1;
  216. zfree(&prog->instances.fds);
  217. }
  218. static void bpf_program__exit(struct bpf_program *prog)
  219. {
  220. if (!prog)
  221. return;
  222. if (prog->clear_priv)
  223. prog->clear_priv(prog, prog->priv);
  224. prog->priv = NULL;
  225. prog->clear_priv = NULL;
  226. bpf_program__unload(prog);
  227. zfree(&prog->section_name);
  228. zfree(&prog->insns);
  229. zfree(&prog->reloc_desc);
  230. prog->nr_reloc = 0;
  231. prog->insns_cnt = 0;
  232. prog->idx = -1;
  233. }
  234. static int
  235. bpf_program__init(void *data, size_t size, char *name, int idx,
  236. struct bpf_program *prog)
  237. {
  238. if (size < sizeof(struct bpf_insn)) {
  239. pr_warning("corrupted section '%s'\n", name);
  240. return -EINVAL;
  241. }
  242. bzero(prog, sizeof(*prog));
  243. prog->section_name = strdup(name);
  244. if (!prog->section_name) {
  245. pr_warning("failed to alloc name for prog %s\n",
  246. name);
  247. goto errout;
  248. }
  249. prog->insns = malloc(size);
  250. if (!prog->insns) {
  251. pr_warning("failed to alloc insns for %s\n", name);
  252. goto errout;
  253. }
  254. prog->insns_cnt = size / sizeof(struct bpf_insn);
  255. memcpy(prog->insns, data,
  256. prog->insns_cnt * sizeof(struct bpf_insn));
  257. prog->idx = idx;
  258. prog->instances.fds = NULL;
  259. prog->instances.nr = -1;
  260. prog->type = BPF_PROG_TYPE_KPROBE;
  261. return 0;
  262. errout:
  263. bpf_program__exit(prog);
  264. return -ENOMEM;
  265. }
  266. static int
  267. bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
  268. char *name, int idx)
  269. {
  270. struct bpf_program prog, *progs;
  271. int nr_progs, err;
  272. err = bpf_program__init(data, size, name, idx, &prog);
  273. if (err)
  274. return err;
  275. progs = obj->programs;
  276. nr_progs = obj->nr_programs;
  277. progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
  278. if (!progs) {
  279. /*
  280. * In this case the original obj->programs
  281. * is still valid, so don't need special treat for
  282. * bpf_close_object().
  283. */
  284. pr_warning("failed to alloc a new program '%s'\n",
  285. name);
  286. bpf_program__exit(&prog);
  287. return -ENOMEM;
  288. }
  289. pr_debug("found program %s\n", prog.section_name);
  290. obj->programs = progs;
  291. obj->nr_programs = nr_progs + 1;
  292. prog.obj = obj;
  293. progs[nr_progs] = prog;
  294. return 0;
  295. }
  296. static struct bpf_object *bpf_object__new(const char *path,
  297. void *obj_buf,
  298. size_t obj_buf_sz)
  299. {
  300. struct bpf_object *obj;
  301. obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
  302. if (!obj) {
  303. pr_warning("alloc memory failed for %s\n", path);
  304. return ERR_PTR(-ENOMEM);
  305. }
  306. strcpy(obj->path, path);
  307. obj->efile.fd = -1;
  308. /*
  309. * Caller of this function should also calls
  310. * bpf_object__elf_finish() after data collection to return
  311. * obj_buf to user. If not, we should duplicate the buffer to
  312. * avoid user freeing them before elf finish.
  313. */
  314. obj->efile.obj_buf = obj_buf;
  315. obj->efile.obj_buf_sz = obj_buf_sz;
  316. obj->efile.maps_shndx = -1;
  317. obj->loaded = false;
  318. INIT_LIST_HEAD(&obj->list);
  319. list_add(&obj->list, &bpf_objects_list);
  320. return obj;
  321. }
  322. static void bpf_object__elf_finish(struct bpf_object *obj)
  323. {
  324. if (!obj_elf_valid(obj))
  325. return;
  326. if (obj->efile.elf) {
  327. elf_end(obj->efile.elf);
  328. obj->efile.elf = NULL;
  329. }
  330. obj->efile.symbols = NULL;
  331. zfree(&obj->efile.reloc);
  332. obj->efile.nr_reloc = 0;
  333. zclose(obj->efile.fd);
  334. obj->efile.obj_buf = NULL;
  335. obj->efile.obj_buf_sz = 0;
  336. }
  337. static int bpf_object__elf_init(struct bpf_object *obj)
  338. {
  339. int err = 0;
  340. GElf_Ehdr *ep;
  341. if (obj_elf_valid(obj)) {
  342. pr_warning("elf init: internal error\n");
  343. return -LIBBPF_ERRNO__LIBELF;
  344. }
  345. if (obj->efile.obj_buf_sz > 0) {
  346. /*
  347. * obj_buf should have been validated by
  348. * bpf_object__open_buffer().
  349. */
  350. obj->efile.elf = elf_memory(obj->efile.obj_buf,
  351. obj->efile.obj_buf_sz);
  352. } else {
  353. obj->efile.fd = open(obj->path, O_RDONLY);
  354. if (obj->efile.fd < 0) {
  355. pr_warning("failed to open %s: %s\n", obj->path,
  356. strerror(errno));
  357. return -errno;
  358. }
  359. obj->efile.elf = elf_begin(obj->efile.fd,
  360. LIBBPF_ELF_C_READ_MMAP,
  361. NULL);
  362. }
  363. if (!obj->efile.elf) {
  364. pr_warning("failed to open %s as ELF file\n",
  365. obj->path);
  366. err = -LIBBPF_ERRNO__LIBELF;
  367. goto errout;
  368. }
  369. if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
  370. pr_warning("failed to get EHDR from %s\n",
  371. obj->path);
  372. err = -LIBBPF_ERRNO__FORMAT;
  373. goto errout;
  374. }
  375. ep = &obj->efile.ehdr;
  376. /* Old LLVM set e_machine to EM_NONE */
  377. if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
  378. pr_warning("%s is not an eBPF object file\n",
  379. obj->path);
  380. err = -LIBBPF_ERRNO__FORMAT;
  381. goto errout;
  382. }
  383. return 0;
  384. errout:
  385. bpf_object__elf_finish(obj);
  386. return err;
  387. }
  388. static int
  389. bpf_object__check_endianness(struct bpf_object *obj)
  390. {
  391. static unsigned int const endian = 1;
  392. switch (obj->efile.ehdr.e_ident[EI_DATA]) {
  393. case ELFDATA2LSB:
  394. /* We are big endian, BPF obj is little endian. */
  395. if (*(unsigned char const *)&endian != 1)
  396. goto mismatch;
  397. break;
  398. case ELFDATA2MSB:
  399. /* We are little endian, BPF obj is big endian. */
  400. if (*(unsigned char const *)&endian != 0)
  401. goto mismatch;
  402. break;
  403. default:
  404. return -LIBBPF_ERRNO__ENDIAN;
  405. }
  406. return 0;
  407. mismatch:
  408. pr_warning("Error: endianness mismatch.\n");
  409. return -LIBBPF_ERRNO__ENDIAN;
  410. }
  411. static int
  412. bpf_object__init_license(struct bpf_object *obj,
  413. void *data, size_t size)
  414. {
  415. memcpy(obj->license, data,
  416. min(size, sizeof(obj->license) - 1));
  417. pr_debug("license of %s is %s\n", obj->path, obj->license);
  418. return 0;
  419. }
  420. static int
  421. bpf_object__init_kversion(struct bpf_object *obj,
  422. void *data, size_t size)
  423. {
  424. u32 kver;
  425. if (size != sizeof(kver)) {
  426. pr_warning("invalid kver section in %s\n", obj->path);
  427. return -LIBBPF_ERRNO__FORMAT;
  428. }
  429. memcpy(&kver, data, sizeof(kver));
  430. obj->kern_version = kver;
  431. pr_debug("kernel version of %s is %x\n", obj->path,
  432. obj->kern_version);
  433. return 0;
  434. }
  435. static int
  436. bpf_object__init_maps(struct bpf_object *obj, void *data,
  437. size_t size)
  438. {
  439. size_t nr_maps;
  440. int i;
  441. nr_maps = size / sizeof(struct bpf_map_def);
  442. if (!data || !nr_maps) {
  443. pr_debug("%s doesn't need map definition\n",
  444. obj->path);
  445. return 0;
  446. }
  447. pr_debug("maps in %s: %zd bytes\n", obj->path, size);
  448. obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
  449. if (!obj->maps) {
  450. pr_warning("alloc maps for object failed\n");
  451. return -ENOMEM;
  452. }
  453. obj->nr_maps = nr_maps;
  454. for (i = 0; i < nr_maps; i++) {
  455. struct bpf_map_def *def = &obj->maps[i].def;
  456. /*
  457. * fill all fd with -1 so won't close incorrect
  458. * fd (fd=0 is stdin) when failure (zclose won't close
  459. * negative fd)).
  460. */
  461. obj->maps[i].fd = -1;
  462. /* Save map definition into obj->maps */
  463. *def = ((struct bpf_map_def *)data)[i];
  464. }
  465. return 0;
  466. }
  467. static int
  468. bpf_object__init_maps_name(struct bpf_object *obj)
  469. {
  470. int i;
  471. Elf_Data *symbols = obj->efile.symbols;
  472. if (!symbols || obj->efile.maps_shndx < 0)
  473. return -EINVAL;
  474. for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
  475. GElf_Sym sym;
  476. size_t map_idx;
  477. const char *map_name;
  478. if (!gelf_getsym(symbols, i, &sym))
  479. continue;
  480. if (sym.st_shndx != obj->efile.maps_shndx)
  481. continue;
  482. map_name = elf_strptr(obj->efile.elf,
  483. obj->efile.strtabidx,
  484. sym.st_name);
  485. map_idx = sym.st_value / sizeof(struct bpf_map_def);
  486. if (map_idx >= obj->nr_maps) {
  487. pr_warning("index of map \"%s\" is buggy: %zu > %zu\n",
  488. map_name, map_idx, obj->nr_maps);
  489. continue;
  490. }
  491. obj->maps[map_idx].name = strdup(map_name);
  492. if (!obj->maps[map_idx].name) {
  493. pr_warning("failed to alloc map name\n");
  494. return -ENOMEM;
  495. }
  496. pr_debug("map %zu is \"%s\"\n", map_idx,
  497. obj->maps[map_idx].name);
  498. }
  499. return 0;
  500. }
  501. static int bpf_object__elf_collect(struct bpf_object *obj)
  502. {
  503. Elf *elf = obj->efile.elf;
  504. GElf_Ehdr *ep = &obj->efile.ehdr;
  505. Elf_Scn *scn = NULL;
  506. int idx = 0, err = 0;
  507. /* Elf is corrupted/truncated, avoid calling elf_strptr. */
  508. if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
  509. pr_warning("failed to get e_shstrndx from %s\n",
  510. obj->path);
  511. return -LIBBPF_ERRNO__FORMAT;
  512. }
  513. while ((scn = elf_nextscn(elf, scn)) != NULL) {
  514. char *name;
  515. GElf_Shdr sh;
  516. Elf_Data *data;
  517. idx++;
  518. if (gelf_getshdr(scn, &sh) != &sh) {
  519. pr_warning("failed to get section header from %s\n",
  520. obj->path);
  521. err = -LIBBPF_ERRNO__FORMAT;
  522. goto out;
  523. }
  524. name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
  525. if (!name) {
  526. pr_warning("failed to get section name from %s\n",
  527. obj->path);
  528. err = -LIBBPF_ERRNO__FORMAT;
  529. goto out;
  530. }
  531. data = elf_getdata(scn, 0);
  532. if (!data) {
  533. pr_warning("failed to get section data from %s(%s)\n",
  534. name, obj->path);
  535. err = -LIBBPF_ERRNO__FORMAT;
  536. goto out;
  537. }
  538. pr_debug("section %s, size %ld, link %d, flags %lx, type=%d\n",
  539. name, (unsigned long)data->d_size,
  540. (int)sh.sh_link, (unsigned long)sh.sh_flags,
  541. (int)sh.sh_type);
  542. if (strcmp(name, "license") == 0)
  543. err = bpf_object__init_license(obj,
  544. data->d_buf,
  545. data->d_size);
  546. else if (strcmp(name, "version") == 0)
  547. err = bpf_object__init_kversion(obj,
  548. data->d_buf,
  549. data->d_size);
  550. else if (strcmp(name, "maps") == 0) {
  551. err = bpf_object__init_maps(obj, data->d_buf,
  552. data->d_size);
  553. obj->efile.maps_shndx = idx;
  554. } else if (sh.sh_type == SHT_SYMTAB) {
  555. if (obj->efile.symbols) {
  556. pr_warning("bpf: multiple SYMTAB in %s\n",
  557. obj->path);
  558. err = -LIBBPF_ERRNO__FORMAT;
  559. } else {
  560. obj->efile.symbols = data;
  561. obj->efile.strtabidx = sh.sh_link;
  562. }
  563. } else if ((sh.sh_type == SHT_PROGBITS) &&
  564. (sh.sh_flags & SHF_EXECINSTR) &&
  565. (data->d_size > 0)) {
  566. err = bpf_object__add_program(obj, data->d_buf,
  567. data->d_size, name, idx);
  568. if (err) {
  569. char errmsg[STRERR_BUFSIZE];
  570. strerror_r(-err, errmsg, sizeof(errmsg));
  571. pr_warning("failed to alloc program %s (%s): %s",
  572. name, obj->path, errmsg);
  573. }
  574. } else if (sh.sh_type == SHT_REL) {
  575. void *reloc = obj->efile.reloc;
  576. int nr_reloc = obj->efile.nr_reloc + 1;
  577. reloc = realloc(reloc,
  578. sizeof(*obj->efile.reloc) * nr_reloc);
  579. if (!reloc) {
  580. pr_warning("realloc failed\n");
  581. err = -ENOMEM;
  582. } else {
  583. int n = nr_reloc - 1;
  584. obj->efile.reloc = reloc;
  585. obj->efile.nr_reloc = nr_reloc;
  586. obj->efile.reloc[n].shdr = sh;
  587. obj->efile.reloc[n].data = data;
  588. }
  589. }
  590. if (err)
  591. goto out;
  592. }
  593. if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
  594. pr_warning("Corrupted ELF file: index of strtab invalid\n");
  595. return LIBBPF_ERRNO__FORMAT;
  596. }
  597. if (obj->efile.maps_shndx >= 0)
  598. err = bpf_object__init_maps_name(obj);
  599. out:
  600. return err;
  601. }
  602. static struct bpf_program *
  603. bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
  604. {
  605. struct bpf_program *prog;
  606. size_t i;
  607. for (i = 0; i < obj->nr_programs; i++) {
  608. prog = &obj->programs[i];
  609. if (prog->idx == idx)
  610. return prog;
  611. }
  612. return NULL;
  613. }
  614. static int
  615. bpf_program__collect_reloc(struct bpf_program *prog,
  616. size_t nr_maps, GElf_Shdr *shdr,
  617. Elf_Data *data, Elf_Data *symbols,
  618. int maps_shndx)
  619. {
  620. int i, nrels;
  621. pr_debug("collecting relocating info for: '%s'\n",
  622. prog->section_name);
  623. nrels = shdr->sh_size / shdr->sh_entsize;
  624. prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
  625. if (!prog->reloc_desc) {
  626. pr_warning("failed to alloc memory in relocation\n");
  627. return -ENOMEM;
  628. }
  629. prog->nr_reloc = nrels;
  630. for (i = 0; i < nrels; i++) {
  631. GElf_Sym sym;
  632. GElf_Rel rel;
  633. unsigned int insn_idx;
  634. struct bpf_insn *insns = prog->insns;
  635. size_t map_idx;
  636. if (!gelf_getrel(data, i, &rel)) {
  637. pr_warning("relocation: failed to get %d reloc\n", i);
  638. return -LIBBPF_ERRNO__FORMAT;
  639. }
  640. if (!gelf_getsym(symbols,
  641. GELF_R_SYM(rel.r_info),
  642. &sym)) {
  643. pr_warning("relocation: symbol %"PRIx64" not found\n",
  644. GELF_R_SYM(rel.r_info));
  645. return -LIBBPF_ERRNO__FORMAT;
  646. }
  647. if (sym.st_shndx != maps_shndx) {
  648. pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
  649. prog->section_name, sym.st_shndx);
  650. return -LIBBPF_ERRNO__RELOC;
  651. }
  652. insn_idx = rel.r_offset / sizeof(struct bpf_insn);
  653. pr_debug("relocation: insn_idx=%u\n", insn_idx);
  654. if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
  655. pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
  656. insn_idx, insns[insn_idx].code);
  657. return -LIBBPF_ERRNO__RELOC;
  658. }
  659. map_idx = sym.st_value / sizeof(struct bpf_map_def);
  660. if (map_idx >= nr_maps) {
  661. pr_warning("bpf relocation: map_idx %d large than %d\n",
  662. (int)map_idx, (int)nr_maps - 1);
  663. return -LIBBPF_ERRNO__RELOC;
  664. }
  665. prog->reloc_desc[i].insn_idx = insn_idx;
  666. prog->reloc_desc[i].map_idx = map_idx;
  667. }
  668. return 0;
  669. }
  670. static int
  671. bpf_object__create_maps(struct bpf_object *obj)
  672. {
  673. unsigned int i;
  674. for (i = 0; i < obj->nr_maps; i++) {
  675. struct bpf_map_def *def = &obj->maps[i].def;
  676. int *pfd = &obj->maps[i].fd;
  677. *pfd = bpf_create_map(def->type,
  678. def->key_size,
  679. def->value_size,
  680. def->max_entries);
  681. if (*pfd < 0) {
  682. size_t j;
  683. int err = *pfd;
  684. pr_warning("failed to create map: %s\n",
  685. strerror(errno));
  686. for (j = 0; j < i; j++)
  687. zclose(obj->maps[j].fd);
  688. return err;
  689. }
  690. pr_debug("create map: fd=%d\n", *pfd);
  691. }
  692. return 0;
  693. }
  694. static int
  695. bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
  696. {
  697. int i;
  698. if (!prog || !prog->reloc_desc)
  699. return 0;
  700. for (i = 0; i < prog->nr_reloc; i++) {
  701. int insn_idx, map_idx;
  702. struct bpf_insn *insns = prog->insns;
  703. insn_idx = prog->reloc_desc[i].insn_idx;
  704. map_idx = prog->reloc_desc[i].map_idx;
  705. if (insn_idx >= (int)prog->insns_cnt) {
  706. pr_warning("relocation out of range: '%s'\n",
  707. prog->section_name);
  708. return -LIBBPF_ERRNO__RELOC;
  709. }
  710. insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
  711. insns[insn_idx].imm = obj->maps[map_idx].fd;
  712. }
  713. zfree(&prog->reloc_desc);
  714. prog->nr_reloc = 0;
  715. return 0;
  716. }
  717. static int
  718. bpf_object__relocate(struct bpf_object *obj)
  719. {
  720. struct bpf_program *prog;
  721. size_t i;
  722. int err;
  723. for (i = 0; i < obj->nr_programs; i++) {
  724. prog = &obj->programs[i];
  725. err = bpf_program__relocate(prog, obj);
  726. if (err) {
  727. pr_warning("failed to relocate '%s'\n",
  728. prog->section_name);
  729. return err;
  730. }
  731. }
  732. return 0;
  733. }
  734. static int bpf_object__collect_reloc(struct bpf_object *obj)
  735. {
  736. int i, err;
  737. if (!obj_elf_valid(obj)) {
  738. pr_warning("Internal error: elf object is closed\n");
  739. return -LIBBPF_ERRNO__INTERNAL;
  740. }
  741. for (i = 0; i < obj->efile.nr_reloc; i++) {
  742. GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
  743. Elf_Data *data = obj->efile.reloc[i].data;
  744. int idx = shdr->sh_info;
  745. struct bpf_program *prog;
  746. size_t nr_maps = obj->nr_maps;
  747. if (shdr->sh_type != SHT_REL) {
  748. pr_warning("internal error at %d\n", __LINE__);
  749. return -LIBBPF_ERRNO__INTERNAL;
  750. }
  751. prog = bpf_object__find_prog_by_idx(obj, idx);
  752. if (!prog) {
  753. pr_warning("relocation failed: no %d section\n",
  754. idx);
  755. return -LIBBPF_ERRNO__RELOC;
  756. }
  757. err = bpf_program__collect_reloc(prog, nr_maps,
  758. shdr, data,
  759. obj->efile.symbols,
  760. obj->efile.maps_shndx);
  761. if (err)
  762. return err;
  763. }
  764. return 0;
  765. }
  766. static int
  767. load_program(enum bpf_prog_type type, struct bpf_insn *insns,
  768. int insns_cnt, char *license, u32 kern_version, int *pfd)
  769. {
  770. int ret;
  771. char *log_buf;
  772. if (!insns || !insns_cnt)
  773. return -EINVAL;
  774. log_buf = malloc(BPF_LOG_BUF_SIZE);
  775. if (!log_buf)
  776. pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
  777. ret = bpf_load_program(type, insns, insns_cnt, license,
  778. kern_version, log_buf, BPF_LOG_BUF_SIZE);
  779. if (ret >= 0) {
  780. *pfd = ret;
  781. ret = 0;
  782. goto out;
  783. }
  784. ret = -LIBBPF_ERRNO__LOAD;
  785. pr_warning("load bpf program failed: %s\n", strerror(errno));
  786. if (log_buf && log_buf[0] != '\0') {
  787. ret = -LIBBPF_ERRNO__VERIFY;
  788. pr_warning("-- BEGIN DUMP LOG ---\n");
  789. pr_warning("\n%s\n", log_buf);
  790. pr_warning("-- END LOG --\n");
  791. } else if (insns_cnt >= BPF_MAXINSNS) {
  792. pr_warning("Program too large (%d insns), at most %d insns\n",
  793. insns_cnt, BPF_MAXINSNS);
  794. ret = -LIBBPF_ERRNO__PROG2BIG;
  795. } else {
  796. /* Wrong program type? */
  797. if (type != BPF_PROG_TYPE_KPROBE) {
  798. int fd;
  799. fd = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
  800. insns_cnt, license, kern_version,
  801. NULL, 0);
  802. if (fd >= 0) {
  803. close(fd);
  804. ret = -LIBBPF_ERRNO__PROGTYPE;
  805. goto out;
  806. }
  807. }
  808. if (log_buf)
  809. ret = -LIBBPF_ERRNO__KVER;
  810. }
  811. out:
  812. free(log_buf);
  813. return ret;
  814. }
  815. static int
  816. bpf_program__load(struct bpf_program *prog,
  817. char *license, u32 kern_version)
  818. {
  819. int err = 0, fd, i;
  820. if (prog->instances.nr < 0 || !prog->instances.fds) {
  821. if (prog->preprocessor) {
  822. pr_warning("Internal error: can't load program '%s'\n",
  823. prog->section_name);
  824. return -LIBBPF_ERRNO__INTERNAL;
  825. }
  826. prog->instances.fds = malloc(sizeof(int));
  827. if (!prog->instances.fds) {
  828. pr_warning("Not enough memory for BPF fds\n");
  829. return -ENOMEM;
  830. }
  831. prog->instances.nr = 1;
  832. prog->instances.fds[0] = -1;
  833. }
  834. if (!prog->preprocessor) {
  835. if (prog->instances.nr != 1) {
  836. pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
  837. prog->section_name, prog->instances.nr);
  838. }
  839. err = load_program(prog->type, prog->insns, prog->insns_cnt,
  840. license, kern_version, &fd);
  841. if (!err)
  842. prog->instances.fds[0] = fd;
  843. goto out;
  844. }
  845. for (i = 0; i < prog->instances.nr; i++) {
  846. struct bpf_prog_prep_result result;
  847. bpf_program_prep_t preprocessor = prog->preprocessor;
  848. bzero(&result, sizeof(result));
  849. err = preprocessor(prog, i, prog->insns,
  850. prog->insns_cnt, &result);
  851. if (err) {
  852. pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
  853. i, prog->section_name);
  854. goto out;
  855. }
  856. if (!result.new_insn_ptr || !result.new_insn_cnt) {
  857. pr_debug("Skip loading the %dth instance of program '%s'\n",
  858. i, prog->section_name);
  859. prog->instances.fds[i] = -1;
  860. if (result.pfd)
  861. *result.pfd = -1;
  862. continue;
  863. }
  864. err = load_program(prog->type, result.new_insn_ptr,
  865. result.new_insn_cnt,
  866. license, kern_version, &fd);
  867. if (err) {
  868. pr_warning("Loading the %dth instance of program '%s' failed\n",
  869. i, prog->section_name);
  870. goto out;
  871. }
  872. if (result.pfd)
  873. *result.pfd = fd;
  874. prog->instances.fds[i] = fd;
  875. }
  876. out:
  877. if (err)
  878. pr_warning("failed to load program '%s'\n",
  879. prog->section_name);
  880. zfree(&prog->insns);
  881. prog->insns_cnt = 0;
  882. return err;
  883. }
  884. static int
  885. bpf_object__load_progs(struct bpf_object *obj)
  886. {
  887. size_t i;
  888. int err;
  889. for (i = 0; i < obj->nr_programs; i++) {
  890. err = bpf_program__load(&obj->programs[i],
  891. obj->license,
  892. obj->kern_version);
  893. if (err)
  894. return err;
  895. }
  896. return 0;
  897. }
  898. static int bpf_object__validate(struct bpf_object *obj)
  899. {
  900. if (obj->kern_version == 0) {
  901. pr_warning("%s doesn't provide kernel version\n",
  902. obj->path);
  903. return -LIBBPF_ERRNO__KVERSION;
  904. }
  905. return 0;
  906. }
  907. static struct bpf_object *
  908. __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz)
  909. {
  910. struct bpf_object *obj;
  911. int err;
  912. if (elf_version(EV_CURRENT) == EV_NONE) {
  913. pr_warning("failed to init libelf for %s\n", path);
  914. return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
  915. }
  916. obj = bpf_object__new(path, obj_buf, obj_buf_sz);
  917. if (IS_ERR(obj))
  918. return obj;
  919. CHECK_ERR(bpf_object__elf_init(obj), err, out);
  920. CHECK_ERR(bpf_object__check_endianness(obj), err, out);
  921. CHECK_ERR(bpf_object__elf_collect(obj), err, out);
  922. CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
  923. CHECK_ERR(bpf_object__validate(obj), err, out);
  924. bpf_object__elf_finish(obj);
  925. return obj;
  926. out:
  927. bpf_object__close(obj);
  928. return ERR_PTR(err);
  929. }
  930. struct bpf_object *bpf_object__open(const char *path)
  931. {
  932. /* param validation */
  933. if (!path)
  934. return NULL;
  935. pr_debug("loading %s\n", path);
  936. return __bpf_object__open(path, NULL, 0);
  937. }
  938. struct bpf_object *bpf_object__open_buffer(void *obj_buf,
  939. size_t obj_buf_sz,
  940. const char *name)
  941. {
  942. char tmp_name[64];
  943. /* param validation */
  944. if (!obj_buf || obj_buf_sz <= 0)
  945. return NULL;
  946. if (!name) {
  947. snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
  948. (unsigned long)obj_buf,
  949. (unsigned long)obj_buf_sz);
  950. tmp_name[sizeof(tmp_name) - 1] = '\0';
  951. name = tmp_name;
  952. }
  953. pr_debug("loading object '%s' from buffer\n",
  954. name);
  955. return __bpf_object__open(name, obj_buf, obj_buf_sz);
  956. }
  957. int bpf_object__unload(struct bpf_object *obj)
  958. {
  959. size_t i;
  960. if (!obj)
  961. return -EINVAL;
  962. for (i = 0; i < obj->nr_maps; i++)
  963. zclose(obj->maps[i].fd);
  964. for (i = 0; i < obj->nr_programs; i++)
  965. bpf_program__unload(&obj->programs[i]);
  966. return 0;
  967. }
  968. int bpf_object__load(struct bpf_object *obj)
  969. {
  970. int err;
  971. if (!obj)
  972. return -EINVAL;
  973. if (obj->loaded) {
  974. pr_warning("object should not be loaded twice\n");
  975. return -EINVAL;
  976. }
  977. obj->loaded = true;
  978. CHECK_ERR(bpf_object__create_maps(obj), err, out);
  979. CHECK_ERR(bpf_object__relocate(obj), err, out);
  980. CHECK_ERR(bpf_object__load_progs(obj), err, out);
  981. return 0;
  982. out:
  983. bpf_object__unload(obj);
  984. pr_warning("failed to load object '%s'\n", obj->path);
  985. return err;
  986. }
  987. void bpf_object__close(struct bpf_object *obj)
  988. {
  989. size_t i;
  990. if (!obj)
  991. return;
  992. bpf_object__elf_finish(obj);
  993. bpf_object__unload(obj);
  994. for (i = 0; i < obj->nr_maps; i++) {
  995. zfree(&obj->maps[i].name);
  996. if (obj->maps[i].clear_priv)
  997. obj->maps[i].clear_priv(&obj->maps[i],
  998. obj->maps[i].priv);
  999. obj->maps[i].priv = NULL;
  1000. obj->maps[i].clear_priv = NULL;
  1001. }
  1002. zfree(&obj->maps);
  1003. obj->nr_maps = 0;
  1004. if (obj->programs && obj->nr_programs) {
  1005. for (i = 0; i < obj->nr_programs; i++)
  1006. bpf_program__exit(&obj->programs[i]);
  1007. }
  1008. zfree(&obj->programs);
  1009. list_del(&obj->list);
  1010. free(obj);
  1011. }
  1012. struct bpf_object *
  1013. bpf_object__next(struct bpf_object *prev)
  1014. {
  1015. struct bpf_object *next;
  1016. if (!prev)
  1017. next = list_first_entry(&bpf_objects_list,
  1018. struct bpf_object,
  1019. list);
  1020. else
  1021. next = list_next_entry(prev, list);
  1022. /* Empty list is noticed here so don't need checking on entry. */
  1023. if (&next->list == &bpf_objects_list)
  1024. return NULL;
  1025. return next;
  1026. }
  1027. const char *bpf_object__name(struct bpf_object *obj)
  1028. {
  1029. return obj ? obj->path : ERR_PTR(-EINVAL);
  1030. }
  1031. unsigned int bpf_object__kversion(struct bpf_object *obj)
  1032. {
  1033. return obj ? obj->kern_version : 0;
  1034. }
  1035. struct bpf_program *
  1036. bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
  1037. {
  1038. size_t idx;
  1039. if (!obj->programs)
  1040. return NULL;
  1041. /* First handler */
  1042. if (prev == NULL)
  1043. return &obj->programs[0];
  1044. if (prev->obj != obj) {
  1045. pr_warning("error: program handler doesn't match object\n");
  1046. return NULL;
  1047. }
  1048. idx = (prev - obj->programs) + 1;
  1049. if (idx >= obj->nr_programs)
  1050. return NULL;
  1051. return &obj->programs[idx];
  1052. }
  1053. int bpf_program__set_priv(struct bpf_program *prog, void *priv,
  1054. bpf_program_clear_priv_t clear_priv)
  1055. {
  1056. if (prog->priv && prog->clear_priv)
  1057. prog->clear_priv(prog, prog->priv);
  1058. prog->priv = priv;
  1059. prog->clear_priv = clear_priv;
  1060. return 0;
  1061. }
  1062. void *bpf_program__priv(struct bpf_program *prog)
  1063. {
  1064. return prog ? prog->priv : ERR_PTR(-EINVAL);
  1065. }
  1066. const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
  1067. {
  1068. const char *title;
  1069. title = prog->section_name;
  1070. if (needs_copy) {
  1071. title = strdup(title);
  1072. if (!title) {
  1073. pr_warning("failed to strdup program title\n");
  1074. return ERR_PTR(-ENOMEM);
  1075. }
  1076. }
  1077. return title;
  1078. }
  1079. int bpf_program__fd(struct bpf_program *prog)
  1080. {
  1081. return bpf_program__nth_fd(prog, 0);
  1082. }
  1083. int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
  1084. bpf_program_prep_t prep)
  1085. {
  1086. int *instances_fds;
  1087. if (nr_instances <= 0 || !prep)
  1088. return -EINVAL;
  1089. if (prog->instances.nr > 0 || prog->instances.fds) {
  1090. pr_warning("Can't set pre-processor after loading\n");
  1091. return -EINVAL;
  1092. }
  1093. instances_fds = malloc(sizeof(int) * nr_instances);
  1094. if (!instances_fds) {
  1095. pr_warning("alloc memory failed for fds\n");
  1096. return -ENOMEM;
  1097. }
  1098. /* fill all fd with -1 */
  1099. memset(instances_fds, -1, sizeof(int) * nr_instances);
  1100. prog->instances.nr = nr_instances;
  1101. prog->instances.fds = instances_fds;
  1102. prog->preprocessor = prep;
  1103. return 0;
  1104. }
  1105. int bpf_program__nth_fd(struct bpf_program *prog, int n)
  1106. {
  1107. int fd;
  1108. if (n >= prog->instances.nr || n < 0) {
  1109. pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
  1110. n, prog->section_name, prog->instances.nr);
  1111. return -EINVAL;
  1112. }
  1113. fd = prog->instances.fds[n];
  1114. if (fd < 0) {
  1115. pr_warning("%dth instance of program '%s' is invalid\n",
  1116. n, prog->section_name);
  1117. return -ENOENT;
  1118. }
  1119. return fd;
  1120. }
  1121. static void bpf_program__set_type(struct bpf_program *prog,
  1122. enum bpf_prog_type type)
  1123. {
  1124. prog->type = type;
  1125. }
  1126. int bpf_program__set_tracepoint(struct bpf_program *prog)
  1127. {
  1128. if (!prog)
  1129. return -EINVAL;
  1130. bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
  1131. return 0;
  1132. }
  1133. int bpf_program__set_kprobe(struct bpf_program *prog)
  1134. {
  1135. if (!prog)
  1136. return -EINVAL;
  1137. bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
  1138. return 0;
  1139. }
  1140. static bool bpf_program__is_type(struct bpf_program *prog,
  1141. enum bpf_prog_type type)
  1142. {
  1143. return prog ? (prog->type == type) : false;
  1144. }
  1145. bool bpf_program__is_tracepoint(struct bpf_program *prog)
  1146. {
  1147. return bpf_program__is_type(prog, BPF_PROG_TYPE_TRACEPOINT);
  1148. }
  1149. bool bpf_program__is_kprobe(struct bpf_program *prog)
  1150. {
  1151. return bpf_program__is_type(prog, BPF_PROG_TYPE_KPROBE);
  1152. }
  1153. int bpf_map__fd(struct bpf_map *map)
  1154. {
  1155. return map ? map->fd : -EINVAL;
  1156. }
  1157. const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
  1158. {
  1159. return map ? &map->def : ERR_PTR(-EINVAL);
  1160. }
  1161. const char *bpf_map__name(struct bpf_map *map)
  1162. {
  1163. return map ? map->name : NULL;
  1164. }
  1165. int bpf_map__set_priv(struct bpf_map *map, void *priv,
  1166. bpf_map_clear_priv_t clear_priv)
  1167. {
  1168. if (!map)
  1169. return -EINVAL;
  1170. if (map->priv) {
  1171. if (map->clear_priv)
  1172. map->clear_priv(map, map->priv);
  1173. }
  1174. map->priv = priv;
  1175. map->clear_priv = clear_priv;
  1176. return 0;
  1177. }
  1178. void *bpf_map__priv(struct bpf_map *map)
  1179. {
  1180. return map ? map->priv : ERR_PTR(-EINVAL);
  1181. }
  1182. struct bpf_map *
  1183. bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
  1184. {
  1185. size_t idx;
  1186. struct bpf_map *s, *e;
  1187. if (!obj || !obj->maps)
  1188. return NULL;
  1189. s = obj->maps;
  1190. e = obj->maps + obj->nr_maps;
  1191. if (prev == NULL)
  1192. return s;
  1193. if ((prev < s) || (prev >= e)) {
  1194. pr_warning("error in %s: map handler doesn't belong to object\n",
  1195. __func__);
  1196. return NULL;
  1197. }
  1198. idx = (prev - obj->maps) + 1;
  1199. if (idx >= obj->nr_maps)
  1200. return NULL;
  1201. return &obj->maps[idx];
  1202. }
  1203. struct bpf_map *
  1204. bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
  1205. {
  1206. struct bpf_map *pos;
  1207. bpf_map__for_each(pos, obj) {
  1208. if (pos->name && !strcmp(pos->name, name))
  1209. return pos;
  1210. }
  1211. return NULL;
  1212. }