libbpf.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548
  1. /*
  2. * Common eBPF ELF object loading operations.
  3. *
  4. * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
  5. * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  6. * Copyright (C) 2015 Huawei Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation;
  11. * version 2.1 of the License (not later!)
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with this program; if not, see <http://www.gnu.org/licenses>
  20. */
  21. #include <stdlib.h>
  22. #include <stdio.h>
  23. #include <stdarg.h>
  24. #include <inttypes.h>
  25. #include <string.h>
  26. #include <unistd.h>
  27. #include <fcntl.h>
  28. #include <errno.h>
  29. #include <asm/unistd.h>
  30. #include <linux/kernel.h>
  31. #include <linux/bpf.h>
  32. #include <linux/list.h>
  33. #include <libelf.h>
  34. #include <gelf.h>
  35. #include "libbpf.h"
  36. #include "bpf.h"
  37. #ifndef EM_BPF
  38. #define EM_BPF 247
  39. #endif
  40. #define __printf(a, b) __attribute__((format(printf, a, b)))
  41. __printf(1, 2)
  42. static int __base_pr(const char *format, ...)
  43. {
  44. va_list args;
  45. int err;
  46. va_start(args, format);
  47. err = vfprintf(stderr, format, args);
  48. va_end(args);
  49. return err;
  50. }
  51. static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
  52. static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
  53. static __printf(1, 2) libbpf_print_fn_t __pr_debug;
  54. #define __pr(func, fmt, ...) \
  55. do { \
  56. if ((func)) \
  57. (func)("libbpf: " fmt, ##__VA_ARGS__); \
  58. } while (0)
  59. #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
  60. #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
  61. #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
  62. void libbpf_set_print(libbpf_print_fn_t warn,
  63. libbpf_print_fn_t info,
  64. libbpf_print_fn_t debug)
  65. {
  66. __pr_warning = warn;
  67. __pr_info = info;
  68. __pr_debug = debug;
  69. }
  70. #define STRERR_BUFSIZE 128
  71. #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
  72. #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
  73. #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
  74. static const char *libbpf_strerror_table[NR_ERRNO] = {
  75. [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
  76. [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
  77. [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
  78. [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
  79. [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
  80. [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
  81. [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
  82. [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
  83. [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
  84. [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
  85. };
  86. int libbpf_strerror(int err, char *buf, size_t size)
  87. {
  88. if (!buf || !size)
  89. return -1;
  90. err = err > 0 ? err : -err;
  91. if (err < __LIBBPF_ERRNO__START) {
  92. int ret;
  93. ret = strerror_r(err, buf, size);
  94. buf[size - 1] = '\0';
  95. return ret;
  96. }
  97. if (err < __LIBBPF_ERRNO__END) {
  98. const char *msg;
  99. msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
  100. snprintf(buf, size, "%s", msg);
  101. buf[size - 1] = '\0';
  102. return 0;
  103. }
  104. snprintf(buf, size, "Unknown libbpf error %d", err);
  105. buf[size - 1] = '\0';
  106. return -1;
  107. }
  108. #define CHECK_ERR(action, err, out) do { \
  109. err = action; \
  110. if (err) \
  111. goto out; \
  112. } while(0)
  113. /* Copied from tools/perf/util/util.h */
  114. #ifndef zfree
  115. # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
  116. #endif
  117. #ifndef zclose
  118. # define zclose(fd) ({ \
  119. int ___err = 0; \
  120. if ((fd) >= 0) \
  121. ___err = close((fd)); \
  122. fd = -1; \
  123. ___err; })
  124. #endif
  125. #ifdef HAVE_LIBELF_MMAP_SUPPORT
  126. # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
  127. #else
  128. # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
  129. #endif
  130. /*
  131. * bpf_prog should be a better name but it has been used in
  132. * linux/filter.h.
  133. */
  134. struct bpf_program {
  135. /* Index in elf obj file, for relocation use. */
  136. int idx;
  137. char *section_name;
  138. struct bpf_insn *insns;
  139. size_t insns_cnt;
  140. enum bpf_prog_type type;
  141. struct {
  142. int insn_idx;
  143. int map_idx;
  144. } *reloc_desc;
  145. int nr_reloc;
  146. struct {
  147. int nr;
  148. int *fds;
  149. } instances;
  150. bpf_program_prep_t preprocessor;
  151. struct bpf_object *obj;
  152. void *priv;
  153. bpf_program_clear_priv_t clear_priv;
  154. };
  155. struct bpf_map {
  156. int fd;
  157. char *name;
  158. size_t offset;
  159. struct bpf_map_def def;
  160. void *priv;
  161. bpf_map_clear_priv_t clear_priv;
  162. };
  163. static LIST_HEAD(bpf_objects_list);
  164. struct bpf_object {
  165. char license[64];
  166. u32 kern_version;
  167. struct bpf_program *programs;
  168. size_t nr_programs;
  169. struct bpf_map *maps;
  170. size_t nr_maps;
  171. bool loaded;
  172. /*
  173. * Information when doing elf related work. Only valid if fd
  174. * is valid.
  175. */
  176. struct {
  177. int fd;
  178. void *obj_buf;
  179. size_t obj_buf_sz;
  180. Elf *elf;
  181. GElf_Ehdr ehdr;
  182. Elf_Data *symbols;
  183. size_t strtabidx;
  184. struct {
  185. GElf_Shdr shdr;
  186. Elf_Data *data;
  187. } *reloc;
  188. int nr_reloc;
  189. int maps_shndx;
  190. } efile;
  191. /*
  192. * All loaded bpf_object is linked in a list, which is
  193. * hidden to caller. bpf_objects__<func> handlers deal with
  194. * all objects.
  195. */
  196. struct list_head list;
  197. void *priv;
  198. bpf_object_clear_priv_t clear_priv;
  199. char path[];
  200. };
  201. #define obj_elf_valid(o) ((o)->efile.elf)
  202. static void bpf_program__unload(struct bpf_program *prog)
  203. {
  204. int i;
  205. if (!prog)
  206. return;
  207. /*
  208. * If the object is opened but the program was never loaded,
  209. * it is possible that prog->instances.nr == -1.
  210. */
  211. if (prog->instances.nr > 0) {
  212. for (i = 0; i < prog->instances.nr; i++)
  213. zclose(prog->instances.fds[i]);
  214. } else if (prog->instances.nr != -1) {
  215. pr_warning("Internal error: instances.nr is %d\n",
  216. prog->instances.nr);
  217. }
  218. prog->instances.nr = -1;
  219. zfree(&prog->instances.fds);
  220. }
  221. static void bpf_program__exit(struct bpf_program *prog)
  222. {
  223. if (!prog)
  224. return;
  225. if (prog->clear_priv)
  226. prog->clear_priv(prog, prog->priv);
  227. prog->priv = NULL;
  228. prog->clear_priv = NULL;
  229. bpf_program__unload(prog);
  230. zfree(&prog->section_name);
  231. zfree(&prog->insns);
  232. zfree(&prog->reloc_desc);
  233. prog->nr_reloc = 0;
  234. prog->insns_cnt = 0;
  235. prog->idx = -1;
  236. }
  237. static int
  238. bpf_program__init(void *data, size_t size, char *name, int idx,
  239. struct bpf_program *prog)
  240. {
  241. if (size < sizeof(struct bpf_insn)) {
  242. pr_warning("corrupted section '%s'\n", name);
  243. return -EINVAL;
  244. }
  245. bzero(prog, sizeof(*prog));
  246. prog->section_name = strdup(name);
  247. if (!prog->section_name) {
  248. pr_warning("failed to alloc name for prog %s\n",
  249. name);
  250. goto errout;
  251. }
  252. prog->insns = malloc(size);
  253. if (!prog->insns) {
  254. pr_warning("failed to alloc insns for %s\n", name);
  255. goto errout;
  256. }
  257. prog->insns_cnt = size / sizeof(struct bpf_insn);
  258. memcpy(prog->insns, data,
  259. prog->insns_cnt * sizeof(struct bpf_insn));
  260. prog->idx = idx;
  261. prog->instances.fds = NULL;
  262. prog->instances.nr = -1;
  263. prog->type = BPF_PROG_TYPE_KPROBE;
  264. return 0;
  265. errout:
  266. bpf_program__exit(prog);
  267. return -ENOMEM;
  268. }
  269. static int
  270. bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
  271. char *name, int idx)
  272. {
  273. struct bpf_program prog, *progs;
  274. int nr_progs, err;
  275. err = bpf_program__init(data, size, name, idx, &prog);
  276. if (err)
  277. return err;
  278. progs = obj->programs;
  279. nr_progs = obj->nr_programs;
  280. progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
  281. if (!progs) {
  282. /*
  283. * In this case the original obj->programs
  284. * is still valid, so don't need special treat for
  285. * bpf_close_object().
  286. */
  287. pr_warning("failed to alloc a new program '%s'\n",
  288. name);
  289. bpf_program__exit(&prog);
  290. return -ENOMEM;
  291. }
  292. pr_debug("found program %s\n", prog.section_name);
  293. obj->programs = progs;
  294. obj->nr_programs = nr_progs + 1;
  295. prog.obj = obj;
  296. progs[nr_progs] = prog;
  297. return 0;
  298. }
  299. static struct bpf_object *bpf_object__new(const char *path,
  300. void *obj_buf,
  301. size_t obj_buf_sz)
  302. {
  303. struct bpf_object *obj;
  304. obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
  305. if (!obj) {
  306. pr_warning("alloc memory failed for %s\n", path);
  307. return ERR_PTR(-ENOMEM);
  308. }
  309. strcpy(obj->path, path);
  310. obj->efile.fd = -1;
  311. /*
  312. * Caller of this function should also calls
  313. * bpf_object__elf_finish() after data collection to return
  314. * obj_buf to user. If not, we should duplicate the buffer to
  315. * avoid user freeing them before elf finish.
  316. */
  317. obj->efile.obj_buf = obj_buf;
  318. obj->efile.obj_buf_sz = obj_buf_sz;
  319. obj->efile.maps_shndx = -1;
  320. obj->loaded = false;
  321. INIT_LIST_HEAD(&obj->list);
  322. list_add(&obj->list, &bpf_objects_list);
  323. return obj;
  324. }
  325. static void bpf_object__elf_finish(struct bpf_object *obj)
  326. {
  327. if (!obj_elf_valid(obj))
  328. return;
  329. if (obj->efile.elf) {
  330. elf_end(obj->efile.elf);
  331. obj->efile.elf = NULL;
  332. }
  333. obj->efile.symbols = NULL;
  334. zfree(&obj->efile.reloc);
  335. obj->efile.nr_reloc = 0;
  336. zclose(obj->efile.fd);
  337. obj->efile.obj_buf = NULL;
  338. obj->efile.obj_buf_sz = 0;
  339. }
  340. static int bpf_object__elf_init(struct bpf_object *obj)
  341. {
  342. int err = 0;
  343. GElf_Ehdr *ep;
  344. if (obj_elf_valid(obj)) {
  345. pr_warning("elf init: internal error\n");
  346. return -LIBBPF_ERRNO__LIBELF;
  347. }
  348. if (obj->efile.obj_buf_sz > 0) {
  349. /*
  350. * obj_buf should have been validated by
  351. * bpf_object__open_buffer().
  352. */
  353. obj->efile.elf = elf_memory(obj->efile.obj_buf,
  354. obj->efile.obj_buf_sz);
  355. } else {
  356. obj->efile.fd = open(obj->path, O_RDONLY);
  357. if (obj->efile.fd < 0) {
  358. pr_warning("failed to open %s: %s\n", obj->path,
  359. strerror(errno));
  360. return -errno;
  361. }
  362. obj->efile.elf = elf_begin(obj->efile.fd,
  363. LIBBPF_ELF_C_READ_MMAP,
  364. NULL);
  365. }
  366. if (!obj->efile.elf) {
  367. pr_warning("failed to open %s as ELF file\n",
  368. obj->path);
  369. err = -LIBBPF_ERRNO__LIBELF;
  370. goto errout;
  371. }
  372. if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
  373. pr_warning("failed to get EHDR from %s\n",
  374. obj->path);
  375. err = -LIBBPF_ERRNO__FORMAT;
  376. goto errout;
  377. }
  378. ep = &obj->efile.ehdr;
  379. /* Old LLVM set e_machine to EM_NONE */
  380. if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
  381. pr_warning("%s is not an eBPF object file\n",
  382. obj->path);
  383. err = -LIBBPF_ERRNO__FORMAT;
  384. goto errout;
  385. }
  386. return 0;
  387. errout:
  388. bpf_object__elf_finish(obj);
  389. return err;
  390. }
  391. static int
  392. bpf_object__check_endianness(struct bpf_object *obj)
  393. {
  394. static unsigned int const endian = 1;
  395. switch (obj->efile.ehdr.e_ident[EI_DATA]) {
  396. case ELFDATA2LSB:
  397. /* We are big endian, BPF obj is little endian. */
  398. if (*(unsigned char const *)&endian != 1)
  399. goto mismatch;
  400. break;
  401. case ELFDATA2MSB:
  402. /* We are little endian, BPF obj is big endian. */
  403. if (*(unsigned char const *)&endian != 0)
  404. goto mismatch;
  405. break;
  406. default:
  407. return -LIBBPF_ERRNO__ENDIAN;
  408. }
  409. return 0;
  410. mismatch:
  411. pr_warning("Error: endianness mismatch.\n");
  412. return -LIBBPF_ERRNO__ENDIAN;
  413. }
  414. static int
  415. bpf_object__init_license(struct bpf_object *obj,
  416. void *data, size_t size)
  417. {
  418. memcpy(obj->license, data,
  419. min(size, sizeof(obj->license) - 1));
  420. pr_debug("license of %s is %s\n", obj->path, obj->license);
  421. return 0;
  422. }
  423. static int
  424. bpf_object__init_kversion(struct bpf_object *obj,
  425. void *data, size_t size)
  426. {
  427. u32 kver;
  428. if (size != sizeof(kver)) {
  429. pr_warning("invalid kver section in %s\n", obj->path);
  430. return -LIBBPF_ERRNO__FORMAT;
  431. }
  432. memcpy(&kver, data, sizeof(kver));
  433. obj->kern_version = kver;
  434. pr_debug("kernel version of %s is %x\n", obj->path,
  435. obj->kern_version);
  436. return 0;
  437. }
  438. static int
  439. bpf_object__validate_maps(struct bpf_object *obj)
  440. {
  441. int i;
  442. /*
  443. * If there's only 1 map, the only error case should have been
  444. * catched in bpf_object__init_maps().
  445. */
  446. if (!obj->maps || !obj->nr_maps || (obj->nr_maps == 1))
  447. return 0;
  448. for (i = 1; i < obj->nr_maps; i++) {
  449. const struct bpf_map *a = &obj->maps[i - 1];
  450. const struct bpf_map *b = &obj->maps[i];
  451. if (b->offset - a->offset < sizeof(struct bpf_map_def)) {
  452. pr_warning("corrupted map section in %s: map \"%s\" too small\n",
  453. obj->path, a->name);
  454. return -EINVAL;
  455. }
  456. }
  457. return 0;
  458. }
  459. static int compare_bpf_map(const void *_a, const void *_b)
  460. {
  461. const struct bpf_map *a = _a;
  462. const struct bpf_map *b = _b;
  463. return a->offset - b->offset;
  464. }
  465. static int
  466. bpf_object__init_maps(struct bpf_object *obj)
  467. {
  468. int i, map_idx, nr_maps = 0;
  469. Elf_Scn *scn;
  470. Elf_Data *data;
  471. Elf_Data *symbols = obj->efile.symbols;
  472. if (obj->efile.maps_shndx < 0)
  473. return -EINVAL;
  474. if (!symbols)
  475. return -EINVAL;
  476. scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
  477. if (scn)
  478. data = elf_getdata(scn, NULL);
  479. if (!scn || !data) {
  480. pr_warning("failed to get Elf_Data from map section %d\n",
  481. obj->efile.maps_shndx);
  482. return -EINVAL;
  483. }
  484. /*
  485. * Count number of maps. Each map has a name.
  486. * Array of maps is not supported: only the first element is
  487. * considered.
  488. *
  489. * TODO: Detect array of map and report error.
  490. */
  491. for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
  492. GElf_Sym sym;
  493. if (!gelf_getsym(symbols, i, &sym))
  494. continue;
  495. if (sym.st_shndx != obj->efile.maps_shndx)
  496. continue;
  497. nr_maps++;
  498. }
  499. /* Alloc obj->maps and fill nr_maps. */
  500. pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
  501. nr_maps, data->d_size);
  502. if (!nr_maps)
  503. return 0;
  504. obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
  505. if (!obj->maps) {
  506. pr_warning("alloc maps for object failed\n");
  507. return -ENOMEM;
  508. }
  509. obj->nr_maps = nr_maps;
  510. /*
  511. * fill all fd with -1 so won't close incorrect
  512. * fd (fd=0 is stdin) when failure (zclose won't close
  513. * negative fd)).
  514. */
  515. for (i = 0; i < nr_maps; i++)
  516. obj->maps[i].fd = -1;
  517. /*
  518. * Fill obj->maps using data in "maps" section.
  519. */
  520. for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
  521. GElf_Sym sym;
  522. const char *map_name;
  523. struct bpf_map_def *def;
  524. if (!gelf_getsym(symbols, i, &sym))
  525. continue;
  526. if (sym.st_shndx != obj->efile.maps_shndx)
  527. continue;
  528. map_name = elf_strptr(obj->efile.elf,
  529. obj->efile.strtabidx,
  530. sym.st_name);
  531. obj->maps[map_idx].offset = sym.st_value;
  532. if (sym.st_value + sizeof(struct bpf_map_def) > data->d_size) {
  533. pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
  534. obj->path, map_name);
  535. return -EINVAL;
  536. }
  537. obj->maps[map_idx].name = strdup(map_name);
  538. if (!obj->maps[map_idx].name) {
  539. pr_warning("failed to alloc map name\n");
  540. return -ENOMEM;
  541. }
  542. pr_debug("map %d is \"%s\"\n", map_idx,
  543. obj->maps[map_idx].name);
  544. def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
  545. obj->maps[map_idx].def = *def;
  546. map_idx++;
  547. }
  548. qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
  549. return bpf_object__validate_maps(obj);
  550. }
  551. static int bpf_object__elf_collect(struct bpf_object *obj)
  552. {
  553. Elf *elf = obj->efile.elf;
  554. GElf_Ehdr *ep = &obj->efile.ehdr;
  555. Elf_Scn *scn = NULL;
  556. int idx = 0, err = 0;
  557. /* Elf is corrupted/truncated, avoid calling elf_strptr. */
  558. if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
  559. pr_warning("failed to get e_shstrndx from %s\n",
  560. obj->path);
  561. return -LIBBPF_ERRNO__FORMAT;
  562. }
  563. while ((scn = elf_nextscn(elf, scn)) != NULL) {
  564. char *name;
  565. GElf_Shdr sh;
  566. Elf_Data *data;
  567. idx++;
  568. if (gelf_getshdr(scn, &sh) != &sh) {
  569. pr_warning("failed to get section header from %s\n",
  570. obj->path);
  571. err = -LIBBPF_ERRNO__FORMAT;
  572. goto out;
  573. }
  574. name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
  575. if (!name) {
  576. pr_warning("failed to get section name from %s\n",
  577. obj->path);
  578. err = -LIBBPF_ERRNO__FORMAT;
  579. goto out;
  580. }
  581. data = elf_getdata(scn, 0);
  582. if (!data) {
  583. pr_warning("failed to get section data from %s(%s)\n",
  584. name, obj->path);
  585. err = -LIBBPF_ERRNO__FORMAT;
  586. goto out;
  587. }
  588. pr_debug("section %s, size %ld, link %d, flags %lx, type=%d\n",
  589. name, (unsigned long)data->d_size,
  590. (int)sh.sh_link, (unsigned long)sh.sh_flags,
  591. (int)sh.sh_type);
  592. if (strcmp(name, "license") == 0)
  593. err = bpf_object__init_license(obj,
  594. data->d_buf,
  595. data->d_size);
  596. else if (strcmp(name, "version") == 0)
  597. err = bpf_object__init_kversion(obj,
  598. data->d_buf,
  599. data->d_size);
  600. else if (strcmp(name, "maps") == 0)
  601. obj->efile.maps_shndx = idx;
  602. else if (sh.sh_type == SHT_SYMTAB) {
  603. if (obj->efile.symbols) {
  604. pr_warning("bpf: multiple SYMTAB in %s\n",
  605. obj->path);
  606. err = -LIBBPF_ERRNO__FORMAT;
  607. } else {
  608. obj->efile.symbols = data;
  609. obj->efile.strtabidx = sh.sh_link;
  610. }
  611. } else if ((sh.sh_type == SHT_PROGBITS) &&
  612. (sh.sh_flags & SHF_EXECINSTR) &&
  613. (data->d_size > 0)) {
  614. err = bpf_object__add_program(obj, data->d_buf,
  615. data->d_size, name, idx);
  616. if (err) {
  617. char errmsg[STRERR_BUFSIZE];
  618. strerror_r(-err, errmsg, sizeof(errmsg));
  619. pr_warning("failed to alloc program %s (%s): %s",
  620. name, obj->path, errmsg);
  621. }
  622. } else if (sh.sh_type == SHT_REL) {
  623. void *reloc = obj->efile.reloc;
  624. int nr_reloc = obj->efile.nr_reloc + 1;
  625. reloc = realloc(reloc,
  626. sizeof(*obj->efile.reloc) * nr_reloc);
  627. if (!reloc) {
  628. pr_warning("realloc failed\n");
  629. err = -ENOMEM;
  630. } else {
  631. int n = nr_reloc - 1;
  632. obj->efile.reloc = reloc;
  633. obj->efile.nr_reloc = nr_reloc;
  634. obj->efile.reloc[n].shdr = sh;
  635. obj->efile.reloc[n].data = data;
  636. }
  637. }
  638. if (err)
  639. goto out;
  640. }
  641. if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
  642. pr_warning("Corrupted ELF file: index of strtab invalid\n");
  643. return LIBBPF_ERRNO__FORMAT;
  644. }
  645. if (obj->efile.maps_shndx >= 0)
  646. err = bpf_object__init_maps(obj);
  647. out:
  648. return err;
  649. }
  650. static struct bpf_program *
  651. bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
  652. {
  653. struct bpf_program *prog;
  654. size_t i;
  655. for (i = 0; i < obj->nr_programs; i++) {
  656. prog = &obj->programs[i];
  657. if (prog->idx == idx)
  658. return prog;
  659. }
  660. return NULL;
  661. }
  662. static int
  663. bpf_program__collect_reloc(struct bpf_program *prog,
  664. size_t nr_maps, GElf_Shdr *shdr,
  665. Elf_Data *data, Elf_Data *symbols,
  666. int maps_shndx, struct bpf_map *maps)
  667. {
  668. int i, nrels;
  669. pr_debug("collecting relocating info for: '%s'\n",
  670. prog->section_name);
  671. nrels = shdr->sh_size / shdr->sh_entsize;
  672. prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
  673. if (!prog->reloc_desc) {
  674. pr_warning("failed to alloc memory in relocation\n");
  675. return -ENOMEM;
  676. }
  677. prog->nr_reloc = nrels;
  678. for (i = 0; i < nrels; i++) {
  679. GElf_Sym sym;
  680. GElf_Rel rel;
  681. unsigned int insn_idx;
  682. struct bpf_insn *insns = prog->insns;
  683. size_t map_idx;
  684. if (!gelf_getrel(data, i, &rel)) {
  685. pr_warning("relocation: failed to get %d reloc\n", i);
  686. return -LIBBPF_ERRNO__FORMAT;
  687. }
  688. if (!gelf_getsym(symbols,
  689. GELF_R_SYM(rel.r_info),
  690. &sym)) {
  691. pr_warning("relocation: symbol %"PRIx64" not found\n",
  692. GELF_R_SYM(rel.r_info));
  693. return -LIBBPF_ERRNO__FORMAT;
  694. }
  695. if (sym.st_shndx != maps_shndx) {
  696. pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
  697. prog->section_name, sym.st_shndx);
  698. return -LIBBPF_ERRNO__RELOC;
  699. }
  700. insn_idx = rel.r_offset / sizeof(struct bpf_insn);
  701. pr_debug("relocation: insn_idx=%u\n", insn_idx);
  702. if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
  703. pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
  704. insn_idx, insns[insn_idx].code);
  705. return -LIBBPF_ERRNO__RELOC;
  706. }
  707. /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
  708. for (map_idx = 0; map_idx < nr_maps; map_idx++) {
  709. if (maps[map_idx].offset == sym.st_value) {
  710. pr_debug("relocation: find map %zd (%s) for insn %u\n",
  711. map_idx, maps[map_idx].name, insn_idx);
  712. break;
  713. }
  714. }
  715. if (map_idx >= nr_maps) {
  716. pr_warning("bpf relocation: map_idx %d large than %d\n",
  717. (int)map_idx, (int)nr_maps - 1);
  718. return -LIBBPF_ERRNO__RELOC;
  719. }
  720. prog->reloc_desc[i].insn_idx = insn_idx;
  721. prog->reloc_desc[i].map_idx = map_idx;
  722. }
  723. return 0;
  724. }
  725. static int
  726. bpf_object__create_maps(struct bpf_object *obj)
  727. {
  728. unsigned int i;
  729. for (i = 0; i < obj->nr_maps; i++) {
  730. struct bpf_map_def *def = &obj->maps[i].def;
  731. int *pfd = &obj->maps[i].fd;
  732. *pfd = bpf_create_map(def->type,
  733. def->key_size,
  734. def->value_size,
  735. def->max_entries,
  736. 0);
  737. if (*pfd < 0) {
  738. size_t j;
  739. int err = *pfd;
  740. pr_warning("failed to create map: %s\n",
  741. strerror(errno));
  742. for (j = 0; j < i; j++)
  743. zclose(obj->maps[j].fd);
  744. return err;
  745. }
  746. pr_debug("create map %s: fd=%d\n", obj->maps[i].name, *pfd);
  747. }
  748. return 0;
  749. }
  750. static int
  751. bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
  752. {
  753. int i;
  754. if (!prog || !prog->reloc_desc)
  755. return 0;
  756. for (i = 0; i < prog->nr_reloc; i++) {
  757. int insn_idx, map_idx;
  758. struct bpf_insn *insns = prog->insns;
  759. insn_idx = prog->reloc_desc[i].insn_idx;
  760. map_idx = prog->reloc_desc[i].map_idx;
  761. if (insn_idx >= (int)prog->insns_cnt) {
  762. pr_warning("relocation out of range: '%s'\n",
  763. prog->section_name);
  764. return -LIBBPF_ERRNO__RELOC;
  765. }
  766. insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
  767. insns[insn_idx].imm = obj->maps[map_idx].fd;
  768. }
  769. zfree(&prog->reloc_desc);
  770. prog->nr_reloc = 0;
  771. return 0;
  772. }
  773. static int
  774. bpf_object__relocate(struct bpf_object *obj)
  775. {
  776. struct bpf_program *prog;
  777. size_t i;
  778. int err;
  779. for (i = 0; i < obj->nr_programs; i++) {
  780. prog = &obj->programs[i];
  781. err = bpf_program__relocate(prog, obj);
  782. if (err) {
  783. pr_warning("failed to relocate '%s'\n",
  784. prog->section_name);
  785. return err;
  786. }
  787. }
  788. return 0;
  789. }
  790. static int bpf_object__collect_reloc(struct bpf_object *obj)
  791. {
  792. int i, err;
  793. if (!obj_elf_valid(obj)) {
  794. pr_warning("Internal error: elf object is closed\n");
  795. return -LIBBPF_ERRNO__INTERNAL;
  796. }
  797. for (i = 0; i < obj->efile.nr_reloc; i++) {
  798. GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
  799. Elf_Data *data = obj->efile.reloc[i].data;
  800. int idx = shdr->sh_info;
  801. struct bpf_program *prog;
  802. size_t nr_maps = obj->nr_maps;
  803. if (shdr->sh_type != SHT_REL) {
  804. pr_warning("internal error at %d\n", __LINE__);
  805. return -LIBBPF_ERRNO__INTERNAL;
  806. }
  807. prog = bpf_object__find_prog_by_idx(obj, idx);
  808. if (!prog) {
  809. pr_warning("relocation failed: no %d section\n",
  810. idx);
  811. return -LIBBPF_ERRNO__RELOC;
  812. }
  813. err = bpf_program__collect_reloc(prog, nr_maps,
  814. shdr, data,
  815. obj->efile.symbols,
  816. obj->efile.maps_shndx,
  817. obj->maps);
  818. if (err)
  819. return err;
  820. }
  821. return 0;
  822. }
  823. static int
  824. load_program(enum bpf_prog_type type, struct bpf_insn *insns,
  825. int insns_cnt, char *license, u32 kern_version, int *pfd)
  826. {
  827. int ret;
  828. char *log_buf;
  829. if (!insns || !insns_cnt)
  830. return -EINVAL;
  831. log_buf = malloc(BPF_LOG_BUF_SIZE);
  832. if (!log_buf)
  833. pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
  834. ret = bpf_load_program(type, insns, insns_cnt, license,
  835. kern_version, log_buf, BPF_LOG_BUF_SIZE);
  836. if (ret >= 0) {
  837. *pfd = ret;
  838. ret = 0;
  839. goto out;
  840. }
  841. ret = -LIBBPF_ERRNO__LOAD;
  842. pr_warning("load bpf program failed: %s\n", strerror(errno));
  843. if (log_buf && log_buf[0] != '\0') {
  844. ret = -LIBBPF_ERRNO__VERIFY;
  845. pr_warning("-- BEGIN DUMP LOG ---\n");
  846. pr_warning("\n%s\n", log_buf);
  847. pr_warning("-- END LOG --\n");
  848. } else if (insns_cnt >= BPF_MAXINSNS) {
  849. pr_warning("Program too large (%d insns), at most %d insns\n",
  850. insns_cnt, BPF_MAXINSNS);
  851. ret = -LIBBPF_ERRNO__PROG2BIG;
  852. } else {
  853. /* Wrong program type? */
  854. if (type != BPF_PROG_TYPE_KPROBE) {
  855. int fd;
  856. fd = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
  857. insns_cnt, license, kern_version,
  858. NULL, 0);
  859. if (fd >= 0) {
  860. close(fd);
  861. ret = -LIBBPF_ERRNO__PROGTYPE;
  862. goto out;
  863. }
  864. }
  865. if (log_buf)
  866. ret = -LIBBPF_ERRNO__KVER;
  867. }
  868. out:
  869. free(log_buf);
  870. return ret;
  871. }
  872. static int
  873. bpf_program__load(struct bpf_program *prog,
  874. char *license, u32 kern_version)
  875. {
  876. int err = 0, fd, i;
  877. if (prog->instances.nr < 0 || !prog->instances.fds) {
  878. if (prog->preprocessor) {
  879. pr_warning("Internal error: can't load program '%s'\n",
  880. prog->section_name);
  881. return -LIBBPF_ERRNO__INTERNAL;
  882. }
  883. prog->instances.fds = malloc(sizeof(int));
  884. if (!prog->instances.fds) {
  885. pr_warning("Not enough memory for BPF fds\n");
  886. return -ENOMEM;
  887. }
  888. prog->instances.nr = 1;
  889. prog->instances.fds[0] = -1;
  890. }
  891. if (!prog->preprocessor) {
  892. if (prog->instances.nr != 1) {
  893. pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
  894. prog->section_name, prog->instances.nr);
  895. }
  896. err = load_program(prog->type, prog->insns, prog->insns_cnt,
  897. license, kern_version, &fd);
  898. if (!err)
  899. prog->instances.fds[0] = fd;
  900. goto out;
  901. }
  902. for (i = 0; i < prog->instances.nr; i++) {
  903. struct bpf_prog_prep_result result;
  904. bpf_program_prep_t preprocessor = prog->preprocessor;
  905. bzero(&result, sizeof(result));
  906. err = preprocessor(prog, i, prog->insns,
  907. prog->insns_cnt, &result);
  908. if (err) {
  909. pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
  910. i, prog->section_name);
  911. goto out;
  912. }
  913. if (!result.new_insn_ptr || !result.new_insn_cnt) {
  914. pr_debug("Skip loading the %dth instance of program '%s'\n",
  915. i, prog->section_name);
  916. prog->instances.fds[i] = -1;
  917. if (result.pfd)
  918. *result.pfd = -1;
  919. continue;
  920. }
  921. err = load_program(prog->type, result.new_insn_ptr,
  922. result.new_insn_cnt,
  923. license, kern_version, &fd);
  924. if (err) {
  925. pr_warning("Loading the %dth instance of program '%s' failed\n",
  926. i, prog->section_name);
  927. goto out;
  928. }
  929. if (result.pfd)
  930. *result.pfd = fd;
  931. prog->instances.fds[i] = fd;
  932. }
  933. out:
  934. if (err)
  935. pr_warning("failed to load program '%s'\n",
  936. prog->section_name);
  937. zfree(&prog->insns);
  938. prog->insns_cnt = 0;
  939. return err;
  940. }
  941. static int
  942. bpf_object__load_progs(struct bpf_object *obj)
  943. {
  944. size_t i;
  945. int err;
  946. for (i = 0; i < obj->nr_programs; i++) {
  947. err = bpf_program__load(&obj->programs[i],
  948. obj->license,
  949. obj->kern_version);
  950. if (err)
  951. return err;
  952. }
  953. return 0;
  954. }
  955. static int bpf_object__validate(struct bpf_object *obj)
  956. {
  957. if (obj->kern_version == 0) {
  958. pr_warning("%s doesn't provide kernel version\n",
  959. obj->path);
  960. return -LIBBPF_ERRNO__KVERSION;
  961. }
  962. return 0;
  963. }
  964. static struct bpf_object *
  965. __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz)
  966. {
  967. struct bpf_object *obj;
  968. int err;
  969. if (elf_version(EV_CURRENT) == EV_NONE) {
  970. pr_warning("failed to init libelf for %s\n", path);
  971. return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
  972. }
  973. obj = bpf_object__new(path, obj_buf, obj_buf_sz);
  974. if (IS_ERR(obj))
  975. return obj;
  976. CHECK_ERR(bpf_object__elf_init(obj), err, out);
  977. CHECK_ERR(bpf_object__check_endianness(obj), err, out);
  978. CHECK_ERR(bpf_object__elf_collect(obj), err, out);
  979. CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
  980. CHECK_ERR(bpf_object__validate(obj), err, out);
  981. bpf_object__elf_finish(obj);
  982. return obj;
  983. out:
  984. bpf_object__close(obj);
  985. return ERR_PTR(err);
  986. }
  987. struct bpf_object *bpf_object__open(const char *path)
  988. {
  989. /* param validation */
  990. if (!path)
  991. return NULL;
  992. pr_debug("loading %s\n", path);
  993. return __bpf_object__open(path, NULL, 0);
  994. }
  995. struct bpf_object *bpf_object__open_buffer(void *obj_buf,
  996. size_t obj_buf_sz,
  997. const char *name)
  998. {
  999. char tmp_name[64];
  1000. /* param validation */
  1001. if (!obj_buf || obj_buf_sz <= 0)
  1002. return NULL;
  1003. if (!name) {
  1004. snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
  1005. (unsigned long)obj_buf,
  1006. (unsigned long)obj_buf_sz);
  1007. tmp_name[sizeof(tmp_name) - 1] = '\0';
  1008. name = tmp_name;
  1009. }
  1010. pr_debug("loading object '%s' from buffer\n",
  1011. name);
  1012. return __bpf_object__open(name, obj_buf, obj_buf_sz);
  1013. }
  1014. int bpf_object__unload(struct bpf_object *obj)
  1015. {
  1016. size_t i;
  1017. if (!obj)
  1018. return -EINVAL;
  1019. for (i = 0; i < obj->nr_maps; i++)
  1020. zclose(obj->maps[i].fd);
  1021. for (i = 0; i < obj->nr_programs; i++)
  1022. bpf_program__unload(&obj->programs[i]);
  1023. return 0;
  1024. }
  1025. int bpf_object__load(struct bpf_object *obj)
  1026. {
  1027. int err;
  1028. if (!obj)
  1029. return -EINVAL;
  1030. if (obj->loaded) {
  1031. pr_warning("object should not be loaded twice\n");
  1032. return -EINVAL;
  1033. }
  1034. obj->loaded = true;
  1035. CHECK_ERR(bpf_object__create_maps(obj), err, out);
  1036. CHECK_ERR(bpf_object__relocate(obj), err, out);
  1037. CHECK_ERR(bpf_object__load_progs(obj), err, out);
  1038. return 0;
  1039. out:
  1040. bpf_object__unload(obj);
  1041. pr_warning("failed to load object '%s'\n", obj->path);
  1042. return err;
  1043. }
  1044. void bpf_object__close(struct bpf_object *obj)
  1045. {
  1046. size_t i;
  1047. if (!obj)
  1048. return;
  1049. if (obj->clear_priv)
  1050. obj->clear_priv(obj, obj->priv);
  1051. bpf_object__elf_finish(obj);
  1052. bpf_object__unload(obj);
  1053. for (i = 0; i < obj->nr_maps; i++) {
  1054. zfree(&obj->maps[i].name);
  1055. if (obj->maps[i].clear_priv)
  1056. obj->maps[i].clear_priv(&obj->maps[i],
  1057. obj->maps[i].priv);
  1058. obj->maps[i].priv = NULL;
  1059. obj->maps[i].clear_priv = NULL;
  1060. }
  1061. zfree(&obj->maps);
  1062. obj->nr_maps = 0;
  1063. if (obj->programs && obj->nr_programs) {
  1064. for (i = 0; i < obj->nr_programs; i++)
  1065. bpf_program__exit(&obj->programs[i]);
  1066. }
  1067. zfree(&obj->programs);
  1068. list_del(&obj->list);
  1069. free(obj);
  1070. }
  1071. struct bpf_object *
  1072. bpf_object__next(struct bpf_object *prev)
  1073. {
  1074. struct bpf_object *next;
  1075. if (!prev)
  1076. next = list_first_entry(&bpf_objects_list,
  1077. struct bpf_object,
  1078. list);
  1079. else
  1080. next = list_next_entry(prev, list);
  1081. /* Empty list is noticed here so don't need checking on entry. */
  1082. if (&next->list == &bpf_objects_list)
  1083. return NULL;
  1084. return next;
  1085. }
  1086. const char *bpf_object__name(struct bpf_object *obj)
  1087. {
  1088. return obj ? obj->path : ERR_PTR(-EINVAL);
  1089. }
  1090. unsigned int bpf_object__kversion(struct bpf_object *obj)
  1091. {
  1092. return obj ? obj->kern_version : 0;
  1093. }
  1094. int bpf_object__set_priv(struct bpf_object *obj, void *priv,
  1095. bpf_object_clear_priv_t clear_priv)
  1096. {
  1097. if (obj->priv && obj->clear_priv)
  1098. obj->clear_priv(obj, obj->priv);
  1099. obj->priv = priv;
  1100. obj->clear_priv = clear_priv;
  1101. return 0;
  1102. }
  1103. void *bpf_object__priv(struct bpf_object *obj)
  1104. {
  1105. return obj ? obj->priv : ERR_PTR(-EINVAL);
  1106. }
  1107. struct bpf_program *
  1108. bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
  1109. {
  1110. size_t idx;
  1111. if (!obj->programs)
  1112. return NULL;
  1113. /* First handler */
  1114. if (prev == NULL)
  1115. return &obj->programs[0];
  1116. if (prev->obj != obj) {
  1117. pr_warning("error: program handler doesn't match object\n");
  1118. return NULL;
  1119. }
  1120. idx = (prev - obj->programs) + 1;
  1121. if (idx >= obj->nr_programs)
  1122. return NULL;
  1123. return &obj->programs[idx];
  1124. }
  1125. int bpf_program__set_priv(struct bpf_program *prog, void *priv,
  1126. bpf_program_clear_priv_t clear_priv)
  1127. {
  1128. if (prog->priv && prog->clear_priv)
  1129. prog->clear_priv(prog, prog->priv);
  1130. prog->priv = priv;
  1131. prog->clear_priv = clear_priv;
  1132. return 0;
  1133. }
  1134. void *bpf_program__priv(struct bpf_program *prog)
  1135. {
  1136. return prog ? prog->priv : ERR_PTR(-EINVAL);
  1137. }
  1138. const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
  1139. {
  1140. const char *title;
  1141. title = prog->section_name;
  1142. if (needs_copy) {
  1143. title = strdup(title);
  1144. if (!title) {
  1145. pr_warning("failed to strdup program title\n");
  1146. return ERR_PTR(-ENOMEM);
  1147. }
  1148. }
  1149. return title;
  1150. }
  1151. int bpf_program__fd(struct bpf_program *prog)
  1152. {
  1153. return bpf_program__nth_fd(prog, 0);
  1154. }
  1155. int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
  1156. bpf_program_prep_t prep)
  1157. {
  1158. int *instances_fds;
  1159. if (nr_instances <= 0 || !prep)
  1160. return -EINVAL;
  1161. if (prog->instances.nr > 0 || prog->instances.fds) {
  1162. pr_warning("Can't set pre-processor after loading\n");
  1163. return -EINVAL;
  1164. }
  1165. instances_fds = malloc(sizeof(int) * nr_instances);
  1166. if (!instances_fds) {
  1167. pr_warning("alloc memory failed for fds\n");
  1168. return -ENOMEM;
  1169. }
  1170. /* fill all fd with -1 */
  1171. memset(instances_fds, -1, sizeof(int) * nr_instances);
  1172. prog->instances.nr = nr_instances;
  1173. prog->instances.fds = instances_fds;
  1174. prog->preprocessor = prep;
  1175. return 0;
  1176. }
  1177. int bpf_program__nth_fd(struct bpf_program *prog, int n)
  1178. {
  1179. int fd;
  1180. if (n >= prog->instances.nr || n < 0) {
  1181. pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
  1182. n, prog->section_name, prog->instances.nr);
  1183. return -EINVAL;
  1184. }
  1185. fd = prog->instances.fds[n];
  1186. if (fd < 0) {
  1187. pr_warning("%dth instance of program '%s' is invalid\n",
  1188. n, prog->section_name);
  1189. return -ENOENT;
  1190. }
  1191. return fd;
  1192. }
  1193. static void bpf_program__set_type(struct bpf_program *prog,
  1194. enum bpf_prog_type type)
  1195. {
  1196. prog->type = type;
  1197. }
  1198. int bpf_program__set_tracepoint(struct bpf_program *prog)
  1199. {
  1200. if (!prog)
  1201. return -EINVAL;
  1202. bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
  1203. return 0;
  1204. }
  1205. int bpf_program__set_kprobe(struct bpf_program *prog)
  1206. {
  1207. if (!prog)
  1208. return -EINVAL;
  1209. bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
  1210. return 0;
  1211. }
  1212. static bool bpf_program__is_type(struct bpf_program *prog,
  1213. enum bpf_prog_type type)
  1214. {
  1215. return prog ? (prog->type == type) : false;
  1216. }
  1217. bool bpf_program__is_tracepoint(struct bpf_program *prog)
  1218. {
  1219. return bpf_program__is_type(prog, BPF_PROG_TYPE_TRACEPOINT);
  1220. }
  1221. bool bpf_program__is_kprobe(struct bpf_program *prog)
  1222. {
  1223. return bpf_program__is_type(prog, BPF_PROG_TYPE_KPROBE);
  1224. }
  1225. int bpf_map__fd(struct bpf_map *map)
  1226. {
  1227. return map ? map->fd : -EINVAL;
  1228. }
  1229. const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
  1230. {
  1231. return map ? &map->def : ERR_PTR(-EINVAL);
  1232. }
  1233. const char *bpf_map__name(struct bpf_map *map)
  1234. {
  1235. return map ? map->name : NULL;
  1236. }
  1237. int bpf_map__set_priv(struct bpf_map *map, void *priv,
  1238. bpf_map_clear_priv_t clear_priv)
  1239. {
  1240. if (!map)
  1241. return -EINVAL;
  1242. if (map->priv) {
  1243. if (map->clear_priv)
  1244. map->clear_priv(map, map->priv);
  1245. }
  1246. map->priv = priv;
  1247. map->clear_priv = clear_priv;
  1248. return 0;
  1249. }
  1250. void *bpf_map__priv(struct bpf_map *map)
  1251. {
  1252. return map ? map->priv : ERR_PTR(-EINVAL);
  1253. }
  1254. struct bpf_map *
  1255. bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
  1256. {
  1257. size_t idx;
  1258. struct bpf_map *s, *e;
  1259. if (!obj || !obj->maps)
  1260. return NULL;
  1261. s = obj->maps;
  1262. e = obj->maps + obj->nr_maps;
  1263. if (prev == NULL)
  1264. return s;
  1265. if ((prev < s) || (prev >= e)) {
  1266. pr_warning("error in %s: map handler doesn't belong to object\n",
  1267. __func__);
  1268. return NULL;
  1269. }
  1270. idx = (prev - obj->maps) + 1;
  1271. if (idx >= obj->nr_maps)
  1272. return NULL;
  1273. return &obj->maps[idx];
  1274. }
  1275. struct bpf_map *
  1276. bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
  1277. {
  1278. struct bpf_map *pos;
  1279. bpf_map__for_each(pos, obj) {
  1280. if (pos->name && !strcmp(pos->name, name))
  1281. return pos;
  1282. }
  1283. return NULL;
  1284. }
  1285. struct bpf_map *
  1286. bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
  1287. {
  1288. int i;
  1289. for (i = 0; i < obj->nr_maps; i++) {
  1290. if (obj->maps[i].offset == offset)
  1291. return &obj->maps[i];
  1292. }
  1293. return ERR_PTR(-ENOENT);
  1294. }