libbpf.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391
  1. /*
  2. * Common eBPF ELF object loading operations.
  3. *
  4. * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
  5. * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  6. * Copyright (C) 2015 Huawei Inc.
  7. */
  8. #include <stdlib.h>
  9. #include <stdio.h>
  10. #include <stdarg.h>
  11. #include <inttypes.h>
  12. #include <string.h>
  13. #include <unistd.h>
  14. #include <fcntl.h>
  15. #include <errno.h>
  16. #include <asm/unistd.h>
  17. #include <linux/kernel.h>
  18. #include <linux/bpf.h>
  19. #include <linux/list.h>
  20. #include <libelf.h>
  21. #include <gelf.h>
  22. #include "libbpf.h"
  23. #include "bpf.h"
  24. #define __printf(a, b) __attribute__((format(printf, a, b)))
  25. __printf(1, 2)
  26. static int __base_pr(const char *format, ...)
  27. {
  28. va_list args;
  29. int err;
  30. va_start(args, format);
  31. err = vfprintf(stderr, format, args);
  32. va_end(args);
  33. return err;
  34. }
  35. static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
  36. static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
  37. static __printf(1, 2) libbpf_print_fn_t __pr_debug;
  38. #define __pr(func, fmt, ...) \
  39. do { \
  40. if ((func)) \
  41. (func)("libbpf: " fmt, ##__VA_ARGS__); \
  42. } while (0)
  43. #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
  44. #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
  45. #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
  46. void libbpf_set_print(libbpf_print_fn_t warn,
  47. libbpf_print_fn_t info,
  48. libbpf_print_fn_t debug)
  49. {
  50. __pr_warning = warn;
  51. __pr_info = info;
  52. __pr_debug = debug;
  53. }
  54. #define STRERR_BUFSIZE 128
  55. #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
  56. #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
  57. #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
  58. static const char *libbpf_strerror_table[NR_ERRNO] = {
  59. [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
  60. [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
  61. [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
  62. [ERRCODE_OFFSET(ENDIAN)] = "Endian missmatch",
  63. [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
  64. [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
  65. [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
  66. [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
  67. [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
  68. };
  69. int libbpf_strerror(int err, char *buf, size_t size)
  70. {
  71. if (!buf || !size)
  72. return -1;
  73. err = err > 0 ? err : -err;
  74. if (err < __LIBBPF_ERRNO__START) {
  75. int ret;
  76. ret = strerror_r(err, buf, size);
  77. buf[size - 1] = '\0';
  78. return ret;
  79. }
  80. if (err < __LIBBPF_ERRNO__END) {
  81. const char *msg;
  82. msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
  83. snprintf(buf, size, "%s", msg);
  84. buf[size - 1] = '\0';
  85. return 0;
  86. }
  87. snprintf(buf, size, "Unknown libbpf error %d", err);
  88. buf[size - 1] = '\0';
  89. return -1;
  90. }
  91. #define CHECK_ERR(action, err, out) do { \
  92. err = action; \
  93. if (err) \
  94. goto out; \
  95. } while(0)
  96. /* Copied from tools/perf/util/util.h */
  97. #ifndef zfree
  98. # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
  99. #endif
  100. #ifndef zclose
  101. # define zclose(fd) ({ \
  102. int ___err = 0; \
  103. if ((fd) >= 0) \
  104. ___err = close((fd)); \
  105. fd = -1; \
  106. ___err; })
  107. #endif
  108. #ifdef HAVE_LIBELF_MMAP_SUPPORT
  109. # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
  110. #else
  111. # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
  112. #endif
  113. /*
  114. * bpf_prog should be a better name but it has been used in
  115. * linux/filter.h.
  116. */
  117. struct bpf_program {
  118. /* Index in elf obj file, for relocation use. */
  119. int idx;
  120. char *section_name;
  121. struct bpf_insn *insns;
  122. size_t insns_cnt;
  123. struct {
  124. int insn_idx;
  125. int map_idx;
  126. } *reloc_desc;
  127. int nr_reloc;
  128. struct {
  129. int nr;
  130. int *fds;
  131. } instances;
  132. bpf_program_prep_t preprocessor;
  133. struct bpf_object *obj;
  134. void *priv;
  135. bpf_program_clear_priv_t clear_priv;
  136. };
  137. struct bpf_map {
  138. int fd;
  139. char *name;
  140. struct bpf_map_def def;
  141. void *priv;
  142. bpf_map_clear_priv_t clear_priv;
  143. };
  144. static LIST_HEAD(bpf_objects_list);
  145. struct bpf_object {
  146. char license[64];
  147. u32 kern_version;
  148. struct bpf_program *programs;
  149. size_t nr_programs;
  150. struct bpf_map *maps;
  151. size_t nr_maps;
  152. bool loaded;
  153. /*
  154. * Information when doing elf related work. Only valid if fd
  155. * is valid.
  156. */
  157. struct {
  158. int fd;
  159. void *obj_buf;
  160. size_t obj_buf_sz;
  161. Elf *elf;
  162. GElf_Ehdr ehdr;
  163. Elf_Data *symbols;
  164. size_t strtabidx;
  165. struct {
  166. GElf_Shdr shdr;
  167. Elf_Data *data;
  168. } *reloc;
  169. int nr_reloc;
  170. } efile;
  171. /*
  172. * All loaded bpf_object is linked in a list, which is
  173. * hidden to caller. bpf_objects__<func> handlers deal with
  174. * all objects.
  175. */
  176. struct list_head list;
  177. char path[];
  178. };
  179. #define obj_elf_valid(o) ((o)->efile.elf)
  180. static void bpf_program__unload(struct bpf_program *prog)
  181. {
  182. int i;
  183. if (!prog)
  184. return;
  185. /*
  186. * If the object is opened but the program was never loaded,
  187. * it is possible that prog->instances.nr == -1.
  188. */
  189. if (prog->instances.nr > 0) {
  190. for (i = 0; i < prog->instances.nr; i++)
  191. zclose(prog->instances.fds[i]);
  192. } else if (prog->instances.nr != -1) {
  193. pr_warning("Internal error: instances.nr is %d\n",
  194. prog->instances.nr);
  195. }
  196. prog->instances.nr = -1;
  197. zfree(&prog->instances.fds);
  198. }
  199. static void bpf_program__exit(struct bpf_program *prog)
  200. {
  201. if (!prog)
  202. return;
  203. if (prog->clear_priv)
  204. prog->clear_priv(prog, prog->priv);
  205. prog->priv = NULL;
  206. prog->clear_priv = NULL;
  207. bpf_program__unload(prog);
  208. zfree(&prog->section_name);
  209. zfree(&prog->insns);
  210. zfree(&prog->reloc_desc);
  211. prog->nr_reloc = 0;
  212. prog->insns_cnt = 0;
  213. prog->idx = -1;
  214. }
  215. static int
  216. bpf_program__init(void *data, size_t size, char *name, int idx,
  217. struct bpf_program *prog)
  218. {
  219. if (size < sizeof(struct bpf_insn)) {
  220. pr_warning("corrupted section '%s'\n", name);
  221. return -EINVAL;
  222. }
  223. bzero(prog, sizeof(*prog));
  224. prog->section_name = strdup(name);
  225. if (!prog->section_name) {
  226. pr_warning("failed to alloc name for prog %s\n",
  227. name);
  228. goto errout;
  229. }
  230. prog->insns = malloc(size);
  231. if (!prog->insns) {
  232. pr_warning("failed to alloc insns for %s\n", name);
  233. goto errout;
  234. }
  235. prog->insns_cnt = size / sizeof(struct bpf_insn);
  236. memcpy(prog->insns, data,
  237. prog->insns_cnt * sizeof(struct bpf_insn));
  238. prog->idx = idx;
  239. prog->instances.fds = NULL;
  240. prog->instances.nr = -1;
  241. return 0;
  242. errout:
  243. bpf_program__exit(prog);
  244. return -ENOMEM;
  245. }
  246. static int
  247. bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
  248. char *name, int idx)
  249. {
  250. struct bpf_program prog, *progs;
  251. int nr_progs, err;
  252. err = bpf_program__init(data, size, name, idx, &prog);
  253. if (err)
  254. return err;
  255. progs = obj->programs;
  256. nr_progs = obj->nr_programs;
  257. progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
  258. if (!progs) {
  259. /*
  260. * In this case the original obj->programs
  261. * is still valid, so don't need special treat for
  262. * bpf_close_object().
  263. */
  264. pr_warning("failed to alloc a new program '%s'\n",
  265. name);
  266. bpf_program__exit(&prog);
  267. return -ENOMEM;
  268. }
  269. pr_debug("found program %s\n", prog.section_name);
  270. obj->programs = progs;
  271. obj->nr_programs = nr_progs + 1;
  272. prog.obj = obj;
  273. progs[nr_progs] = prog;
  274. return 0;
  275. }
  276. static struct bpf_object *bpf_object__new(const char *path,
  277. void *obj_buf,
  278. size_t obj_buf_sz)
  279. {
  280. struct bpf_object *obj;
  281. obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
  282. if (!obj) {
  283. pr_warning("alloc memory failed for %s\n", path);
  284. return ERR_PTR(-ENOMEM);
  285. }
  286. strcpy(obj->path, path);
  287. obj->efile.fd = -1;
  288. /*
  289. * Caller of this function should also calls
  290. * bpf_object__elf_finish() after data collection to return
  291. * obj_buf to user. If not, we should duplicate the buffer to
  292. * avoid user freeing them before elf finish.
  293. */
  294. obj->efile.obj_buf = obj_buf;
  295. obj->efile.obj_buf_sz = obj_buf_sz;
  296. obj->loaded = false;
  297. INIT_LIST_HEAD(&obj->list);
  298. list_add(&obj->list, &bpf_objects_list);
  299. return obj;
  300. }
  301. static void bpf_object__elf_finish(struct bpf_object *obj)
  302. {
  303. if (!obj_elf_valid(obj))
  304. return;
  305. if (obj->efile.elf) {
  306. elf_end(obj->efile.elf);
  307. obj->efile.elf = NULL;
  308. }
  309. obj->efile.symbols = NULL;
  310. zfree(&obj->efile.reloc);
  311. obj->efile.nr_reloc = 0;
  312. zclose(obj->efile.fd);
  313. obj->efile.obj_buf = NULL;
  314. obj->efile.obj_buf_sz = 0;
  315. }
  316. static int bpf_object__elf_init(struct bpf_object *obj)
  317. {
  318. int err = 0;
  319. GElf_Ehdr *ep;
  320. if (obj_elf_valid(obj)) {
  321. pr_warning("elf init: internal error\n");
  322. return -LIBBPF_ERRNO__LIBELF;
  323. }
  324. if (obj->efile.obj_buf_sz > 0) {
  325. /*
  326. * obj_buf should have been validated by
  327. * bpf_object__open_buffer().
  328. */
  329. obj->efile.elf = elf_memory(obj->efile.obj_buf,
  330. obj->efile.obj_buf_sz);
  331. } else {
  332. obj->efile.fd = open(obj->path, O_RDONLY);
  333. if (obj->efile.fd < 0) {
  334. pr_warning("failed to open %s: %s\n", obj->path,
  335. strerror(errno));
  336. return -errno;
  337. }
  338. obj->efile.elf = elf_begin(obj->efile.fd,
  339. LIBBPF_ELF_C_READ_MMAP,
  340. NULL);
  341. }
  342. if (!obj->efile.elf) {
  343. pr_warning("failed to open %s as ELF file\n",
  344. obj->path);
  345. err = -LIBBPF_ERRNO__LIBELF;
  346. goto errout;
  347. }
  348. if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
  349. pr_warning("failed to get EHDR from %s\n",
  350. obj->path);
  351. err = -LIBBPF_ERRNO__FORMAT;
  352. goto errout;
  353. }
  354. ep = &obj->efile.ehdr;
  355. if ((ep->e_type != ET_REL) || (ep->e_machine != 0)) {
  356. pr_warning("%s is not an eBPF object file\n",
  357. obj->path);
  358. err = -LIBBPF_ERRNO__FORMAT;
  359. goto errout;
  360. }
  361. return 0;
  362. errout:
  363. bpf_object__elf_finish(obj);
  364. return err;
  365. }
  366. static int
  367. bpf_object__check_endianness(struct bpf_object *obj)
  368. {
  369. static unsigned int const endian = 1;
  370. switch (obj->efile.ehdr.e_ident[EI_DATA]) {
  371. case ELFDATA2LSB:
  372. /* We are big endian, BPF obj is little endian. */
  373. if (*(unsigned char const *)&endian != 1)
  374. goto mismatch;
  375. break;
  376. case ELFDATA2MSB:
  377. /* We are little endian, BPF obj is big endian. */
  378. if (*(unsigned char const *)&endian != 0)
  379. goto mismatch;
  380. break;
  381. default:
  382. return -LIBBPF_ERRNO__ENDIAN;
  383. }
  384. return 0;
  385. mismatch:
  386. pr_warning("Error: endianness mismatch.\n");
  387. return -LIBBPF_ERRNO__ENDIAN;
  388. }
  389. static int
  390. bpf_object__init_license(struct bpf_object *obj,
  391. void *data, size_t size)
  392. {
  393. memcpy(obj->license, data,
  394. min(size, sizeof(obj->license) - 1));
  395. pr_debug("license of %s is %s\n", obj->path, obj->license);
  396. return 0;
  397. }
  398. static int
  399. bpf_object__init_kversion(struct bpf_object *obj,
  400. void *data, size_t size)
  401. {
  402. u32 kver;
  403. if (size != sizeof(kver)) {
  404. pr_warning("invalid kver section in %s\n", obj->path);
  405. return -LIBBPF_ERRNO__FORMAT;
  406. }
  407. memcpy(&kver, data, sizeof(kver));
  408. obj->kern_version = kver;
  409. pr_debug("kernel version of %s is %x\n", obj->path,
  410. obj->kern_version);
  411. return 0;
  412. }
  413. static int
  414. bpf_object__init_maps(struct bpf_object *obj, void *data,
  415. size_t size)
  416. {
  417. size_t nr_maps;
  418. int i;
  419. nr_maps = size / sizeof(struct bpf_map_def);
  420. if (!data || !nr_maps) {
  421. pr_debug("%s doesn't need map definition\n",
  422. obj->path);
  423. return 0;
  424. }
  425. pr_debug("maps in %s: %zd bytes\n", obj->path, size);
  426. obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
  427. if (!obj->maps) {
  428. pr_warning("alloc maps for object failed\n");
  429. return -ENOMEM;
  430. }
  431. obj->nr_maps = nr_maps;
  432. for (i = 0; i < nr_maps; i++) {
  433. struct bpf_map_def *def = &obj->maps[i].def;
  434. /*
  435. * fill all fd with -1 so won't close incorrect
  436. * fd (fd=0 is stdin) when failure (zclose won't close
  437. * negative fd)).
  438. */
  439. obj->maps[i].fd = -1;
  440. /* Save map definition into obj->maps */
  441. *def = ((struct bpf_map_def *)data)[i];
  442. }
  443. return 0;
  444. }
  445. static int
  446. bpf_object__init_maps_name(struct bpf_object *obj, int maps_shndx)
  447. {
  448. int i;
  449. Elf_Data *symbols = obj->efile.symbols;
  450. if (!symbols || maps_shndx < 0)
  451. return -EINVAL;
  452. for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
  453. GElf_Sym sym;
  454. size_t map_idx;
  455. const char *map_name;
  456. if (!gelf_getsym(symbols, i, &sym))
  457. continue;
  458. if (sym.st_shndx != maps_shndx)
  459. continue;
  460. map_name = elf_strptr(obj->efile.elf,
  461. obj->efile.strtabidx,
  462. sym.st_name);
  463. map_idx = sym.st_value / sizeof(struct bpf_map_def);
  464. if (map_idx >= obj->nr_maps) {
  465. pr_warning("index of map \"%s\" is buggy: %zu > %zu\n",
  466. map_name, map_idx, obj->nr_maps);
  467. continue;
  468. }
  469. obj->maps[map_idx].name = strdup(map_name);
  470. if (!obj->maps[map_idx].name) {
  471. pr_warning("failed to alloc map name\n");
  472. return -ENOMEM;
  473. }
  474. pr_debug("map %zu is \"%s\"\n", map_idx,
  475. obj->maps[map_idx].name);
  476. }
  477. return 0;
  478. }
  479. static int bpf_object__elf_collect(struct bpf_object *obj)
  480. {
  481. Elf *elf = obj->efile.elf;
  482. GElf_Ehdr *ep = &obj->efile.ehdr;
  483. Elf_Scn *scn = NULL;
  484. int idx = 0, err = 0, maps_shndx = -1;
  485. /* Elf is corrupted/truncated, avoid calling elf_strptr. */
  486. if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
  487. pr_warning("failed to get e_shstrndx from %s\n",
  488. obj->path);
  489. return -LIBBPF_ERRNO__FORMAT;
  490. }
  491. while ((scn = elf_nextscn(elf, scn)) != NULL) {
  492. char *name;
  493. GElf_Shdr sh;
  494. Elf_Data *data;
  495. idx++;
  496. if (gelf_getshdr(scn, &sh) != &sh) {
  497. pr_warning("failed to get section header from %s\n",
  498. obj->path);
  499. err = -LIBBPF_ERRNO__FORMAT;
  500. goto out;
  501. }
  502. name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
  503. if (!name) {
  504. pr_warning("failed to get section name from %s\n",
  505. obj->path);
  506. err = -LIBBPF_ERRNO__FORMAT;
  507. goto out;
  508. }
  509. data = elf_getdata(scn, 0);
  510. if (!data) {
  511. pr_warning("failed to get section data from %s(%s)\n",
  512. name, obj->path);
  513. err = -LIBBPF_ERRNO__FORMAT;
  514. goto out;
  515. }
  516. pr_debug("section %s, size %ld, link %d, flags %lx, type=%d\n",
  517. name, (unsigned long)data->d_size,
  518. (int)sh.sh_link, (unsigned long)sh.sh_flags,
  519. (int)sh.sh_type);
  520. if (strcmp(name, "license") == 0)
  521. err = bpf_object__init_license(obj,
  522. data->d_buf,
  523. data->d_size);
  524. else if (strcmp(name, "version") == 0)
  525. err = bpf_object__init_kversion(obj,
  526. data->d_buf,
  527. data->d_size);
  528. else if (strcmp(name, "maps") == 0) {
  529. err = bpf_object__init_maps(obj, data->d_buf,
  530. data->d_size);
  531. maps_shndx = idx;
  532. } else if (sh.sh_type == SHT_SYMTAB) {
  533. if (obj->efile.symbols) {
  534. pr_warning("bpf: multiple SYMTAB in %s\n",
  535. obj->path);
  536. err = -LIBBPF_ERRNO__FORMAT;
  537. } else {
  538. obj->efile.symbols = data;
  539. obj->efile.strtabidx = sh.sh_link;
  540. }
  541. } else if ((sh.sh_type == SHT_PROGBITS) &&
  542. (sh.sh_flags & SHF_EXECINSTR) &&
  543. (data->d_size > 0)) {
  544. err = bpf_object__add_program(obj, data->d_buf,
  545. data->d_size, name, idx);
  546. if (err) {
  547. char errmsg[STRERR_BUFSIZE];
  548. strerror_r(-err, errmsg, sizeof(errmsg));
  549. pr_warning("failed to alloc program %s (%s): %s",
  550. name, obj->path, errmsg);
  551. }
  552. } else if (sh.sh_type == SHT_REL) {
  553. void *reloc = obj->efile.reloc;
  554. int nr_reloc = obj->efile.nr_reloc + 1;
  555. reloc = realloc(reloc,
  556. sizeof(*obj->efile.reloc) * nr_reloc);
  557. if (!reloc) {
  558. pr_warning("realloc failed\n");
  559. err = -ENOMEM;
  560. } else {
  561. int n = nr_reloc - 1;
  562. obj->efile.reloc = reloc;
  563. obj->efile.nr_reloc = nr_reloc;
  564. obj->efile.reloc[n].shdr = sh;
  565. obj->efile.reloc[n].data = data;
  566. }
  567. }
  568. if (err)
  569. goto out;
  570. }
  571. if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
  572. pr_warning("Corrupted ELF file: index of strtab invalid\n");
  573. return LIBBPF_ERRNO__FORMAT;
  574. }
  575. if (maps_shndx >= 0)
  576. err = bpf_object__init_maps_name(obj, maps_shndx);
  577. out:
  578. return err;
  579. }
  580. static struct bpf_program *
  581. bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
  582. {
  583. struct bpf_program *prog;
  584. size_t i;
  585. for (i = 0; i < obj->nr_programs; i++) {
  586. prog = &obj->programs[i];
  587. if (prog->idx == idx)
  588. return prog;
  589. }
  590. return NULL;
  591. }
  592. static int
  593. bpf_program__collect_reloc(struct bpf_program *prog,
  594. size_t nr_maps, GElf_Shdr *shdr,
  595. Elf_Data *data, Elf_Data *symbols)
  596. {
  597. int i, nrels;
  598. pr_debug("collecting relocating info for: '%s'\n",
  599. prog->section_name);
  600. nrels = shdr->sh_size / shdr->sh_entsize;
  601. prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
  602. if (!prog->reloc_desc) {
  603. pr_warning("failed to alloc memory in relocation\n");
  604. return -ENOMEM;
  605. }
  606. prog->nr_reloc = nrels;
  607. for (i = 0; i < nrels; i++) {
  608. GElf_Sym sym;
  609. GElf_Rel rel;
  610. unsigned int insn_idx;
  611. struct bpf_insn *insns = prog->insns;
  612. size_t map_idx;
  613. if (!gelf_getrel(data, i, &rel)) {
  614. pr_warning("relocation: failed to get %d reloc\n", i);
  615. return -LIBBPF_ERRNO__FORMAT;
  616. }
  617. insn_idx = rel.r_offset / sizeof(struct bpf_insn);
  618. pr_debug("relocation: insn_idx=%u\n", insn_idx);
  619. if (!gelf_getsym(symbols,
  620. GELF_R_SYM(rel.r_info),
  621. &sym)) {
  622. pr_warning("relocation: symbol %"PRIx64" not found\n",
  623. GELF_R_SYM(rel.r_info));
  624. return -LIBBPF_ERRNO__FORMAT;
  625. }
  626. if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
  627. pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
  628. insn_idx, insns[insn_idx].code);
  629. return -LIBBPF_ERRNO__RELOC;
  630. }
  631. map_idx = sym.st_value / sizeof(struct bpf_map_def);
  632. if (map_idx >= nr_maps) {
  633. pr_warning("bpf relocation: map_idx %d large than %d\n",
  634. (int)map_idx, (int)nr_maps - 1);
  635. return -LIBBPF_ERRNO__RELOC;
  636. }
  637. prog->reloc_desc[i].insn_idx = insn_idx;
  638. prog->reloc_desc[i].map_idx = map_idx;
  639. }
  640. return 0;
  641. }
  642. static int
  643. bpf_object__create_maps(struct bpf_object *obj)
  644. {
  645. unsigned int i;
  646. for (i = 0; i < obj->nr_maps; i++) {
  647. struct bpf_map_def *def = &obj->maps[i].def;
  648. int *pfd = &obj->maps[i].fd;
  649. *pfd = bpf_create_map(def->type,
  650. def->key_size,
  651. def->value_size,
  652. def->max_entries);
  653. if (*pfd < 0) {
  654. size_t j;
  655. int err = *pfd;
  656. pr_warning("failed to create map: %s\n",
  657. strerror(errno));
  658. for (j = 0; j < i; j++)
  659. zclose(obj->maps[j].fd);
  660. return err;
  661. }
  662. pr_debug("create map: fd=%d\n", *pfd);
  663. }
  664. return 0;
  665. }
  666. static int
  667. bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
  668. {
  669. int i;
  670. if (!prog || !prog->reloc_desc)
  671. return 0;
  672. for (i = 0; i < prog->nr_reloc; i++) {
  673. int insn_idx, map_idx;
  674. struct bpf_insn *insns = prog->insns;
  675. insn_idx = prog->reloc_desc[i].insn_idx;
  676. map_idx = prog->reloc_desc[i].map_idx;
  677. if (insn_idx >= (int)prog->insns_cnt) {
  678. pr_warning("relocation out of range: '%s'\n",
  679. prog->section_name);
  680. return -LIBBPF_ERRNO__RELOC;
  681. }
  682. insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
  683. insns[insn_idx].imm = obj->maps[map_idx].fd;
  684. }
  685. zfree(&prog->reloc_desc);
  686. prog->nr_reloc = 0;
  687. return 0;
  688. }
  689. static int
  690. bpf_object__relocate(struct bpf_object *obj)
  691. {
  692. struct bpf_program *prog;
  693. size_t i;
  694. int err;
  695. for (i = 0; i < obj->nr_programs; i++) {
  696. prog = &obj->programs[i];
  697. err = bpf_program__relocate(prog, obj);
  698. if (err) {
  699. pr_warning("failed to relocate '%s'\n",
  700. prog->section_name);
  701. return err;
  702. }
  703. }
  704. return 0;
  705. }
  706. static int bpf_object__collect_reloc(struct bpf_object *obj)
  707. {
  708. int i, err;
  709. if (!obj_elf_valid(obj)) {
  710. pr_warning("Internal error: elf object is closed\n");
  711. return -LIBBPF_ERRNO__INTERNAL;
  712. }
  713. for (i = 0; i < obj->efile.nr_reloc; i++) {
  714. GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
  715. Elf_Data *data = obj->efile.reloc[i].data;
  716. int idx = shdr->sh_info;
  717. struct bpf_program *prog;
  718. size_t nr_maps = obj->nr_maps;
  719. if (shdr->sh_type != SHT_REL) {
  720. pr_warning("internal error at %d\n", __LINE__);
  721. return -LIBBPF_ERRNO__INTERNAL;
  722. }
  723. prog = bpf_object__find_prog_by_idx(obj, idx);
  724. if (!prog) {
  725. pr_warning("relocation failed: no %d section\n",
  726. idx);
  727. return -LIBBPF_ERRNO__RELOC;
  728. }
  729. err = bpf_program__collect_reloc(prog, nr_maps,
  730. shdr, data,
  731. obj->efile.symbols);
  732. if (err)
  733. return err;
  734. }
  735. return 0;
  736. }
  737. static int
  738. load_program(struct bpf_insn *insns, int insns_cnt,
  739. char *license, u32 kern_version, int *pfd)
  740. {
  741. int ret;
  742. char *log_buf;
  743. if (!insns || !insns_cnt)
  744. return -EINVAL;
  745. log_buf = malloc(BPF_LOG_BUF_SIZE);
  746. if (!log_buf)
  747. pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
  748. ret = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
  749. insns_cnt, license, kern_version,
  750. log_buf, BPF_LOG_BUF_SIZE);
  751. if (ret >= 0) {
  752. *pfd = ret;
  753. ret = 0;
  754. goto out;
  755. }
  756. ret = -LIBBPF_ERRNO__LOAD;
  757. pr_warning("load bpf program failed: %s\n", strerror(errno));
  758. if (log_buf && log_buf[0] != '\0') {
  759. ret = -LIBBPF_ERRNO__VERIFY;
  760. pr_warning("-- BEGIN DUMP LOG ---\n");
  761. pr_warning("\n%s\n", log_buf);
  762. pr_warning("-- END LOG --\n");
  763. } else {
  764. if (insns_cnt >= BPF_MAXINSNS) {
  765. pr_warning("Program too large (%d insns), at most %d insns\n",
  766. insns_cnt, BPF_MAXINSNS);
  767. ret = -LIBBPF_ERRNO__PROG2BIG;
  768. } else if (log_buf) {
  769. pr_warning("log buffer is empty\n");
  770. ret = -LIBBPF_ERRNO__KVER;
  771. }
  772. }
  773. out:
  774. free(log_buf);
  775. return ret;
  776. }
  777. static int
  778. bpf_program__load(struct bpf_program *prog,
  779. char *license, u32 kern_version)
  780. {
  781. int err = 0, fd, i;
  782. if (prog->instances.nr < 0 || !prog->instances.fds) {
  783. if (prog->preprocessor) {
  784. pr_warning("Internal error: can't load program '%s'\n",
  785. prog->section_name);
  786. return -LIBBPF_ERRNO__INTERNAL;
  787. }
  788. prog->instances.fds = malloc(sizeof(int));
  789. if (!prog->instances.fds) {
  790. pr_warning("Not enough memory for BPF fds\n");
  791. return -ENOMEM;
  792. }
  793. prog->instances.nr = 1;
  794. prog->instances.fds[0] = -1;
  795. }
  796. if (!prog->preprocessor) {
  797. if (prog->instances.nr != 1) {
  798. pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
  799. prog->section_name, prog->instances.nr);
  800. }
  801. err = load_program(prog->insns, prog->insns_cnt,
  802. license, kern_version, &fd);
  803. if (!err)
  804. prog->instances.fds[0] = fd;
  805. goto out;
  806. }
  807. for (i = 0; i < prog->instances.nr; i++) {
  808. struct bpf_prog_prep_result result;
  809. bpf_program_prep_t preprocessor = prog->preprocessor;
  810. bzero(&result, sizeof(result));
  811. err = preprocessor(prog, i, prog->insns,
  812. prog->insns_cnt, &result);
  813. if (err) {
  814. pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
  815. i, prog->section_name);
  816. goto out;
  817. }
  818. if (!result.new_insn_ptr || !result.new_insn_cnt) {
  819. pr_debug("Skip loading the %dth instance of program '%s'\n",
  820. i, prog->section_name);
  821. prog->instances.fds[i] = -1;
  822. if (result.pfd)
  823. *result.pfd = -1;
  824. continue;
  825. }
  826. err = load_program(result.new_insn_ptr,
  827. result.new_insn_cnt,
  828. license, kern_version, &fd);
  829. if (err) {
  830. pr_warning("Loading the %dth instance of program '%s' failed\n",
  831. i, prog->section_name);
  832. goto out;
  833. }
  834. if (result.pfd)
  835. *result.pfd = fd;
  836. prog->instances.fds[i] = fd;
  837. }
  838. out:
  839. if (err)
  840. pr_warning("failed to load program '%s'\n",
  841. prog->section_name);
  842. zfree(&prog->insns);
  843. prog->insns_cnt = 0;
  844. return err;
  845. }
  846. static int
  847. bpf_object__load_progs(struct bpf_object *obj)
  848. {
  849. size_t i;
  850. int err;
  851. for (i = 0; i < obj->nr_programs; i++) {
  852. err = bpf_program__load(&obj->programs[i],
  853. obj->license,
  854. obj->kern_version);
  855. if (err)
  856. return err;
  857. }
  858. return 0;
  859. }
  860. static int bpf_object__validate(struct bpf_object *obj)
  861. {
  862. if (obj->kern_version == 0) {
  863. pr_warning("%s doesn't provide kernel version\n",
  864. obj->path);
  865. return -LIBBPF_ERRNO__KVERSION;
  866. }
  867. return 0;
  868. }
  869. static struct bpf_object *
  870. __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz)
  871. {
  872. struct bpf_object *obj;
  873. int err;
  874. if (elf_version(EV_CURRENT) == EV_NONE) {
  875. pr_warning("failed to init libelf for %s\n", path);
  876. return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
  877. }
  878. obj = bpf_object__new(path, obj_buf, obj_buf_sz);
  879. if (IS_ERR(obj))
  880. return obj;
  881. CHECK_ERR(bpf_object__elf_init(obj), err, out);
  882. CHECK_ERR(bpf_object__check_endianness(obj), err, out);
  883. CHECK_ERR(bpf_object__elf_collect(obj), err, out);
  884. CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
  885. CHECK_ERR(bpf_object__validate(obj), err, out);
  886. bpf_object__elf_finish(obj);
  887. return obj;
  888. out:
  889. bpf_object__close(obj);
  890. return ERR_PTR(err);
  891. }
  892. struct bpf_object *bpf_object__open(const char *path)
  893. {
  894. /* param validation */
  895. if (!path)
  896. return NULL;
  897. pr_debug("loading %s\n", path);
  898. return __bpf_object__open(path, NULL, 0);
  899. }
  900. struct bpf_object *bpf_object__open_buffer(void *obj_buf,
  901. size_t obj_buf_sz,
  902. const char *name)
  903. {
  904. char tmp_name[64];
  905. /* param validation */
  906. if (!obj_buf || obj_buf_sz <= 0)
  907. return NULL;
  908. if (!name) {
  909. snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
  910. (unsigned long)obj_buf,
  911. (unsigned long)obj_buf_sz);
  912. tmp_name[sizeof(tmp_name) - 1] = '\0';
  913. name = tmp_name;
  914. }
  915. pr_debug("loading object '%s' from buffer\n",
  916. name);
  917. return __bpf_object__open(name, obj_buf, obj_buf_sz);
  918. }
  919. int bpf_object__unload(struct bpf_object *obj)
  920. {
  921. size_t i;
  922. if (!obj)
  923. return -EINVAL;
  924. for (i = 0; i < obj->nr_maps; i++)
  925. zclose(obj->maps[i].fd);
  926. for (i = 0; i < obj->nr_programs; i++)
  927. bpf_program__unload(&obj->programs[i]);
  928. return 0;
  929. }
  930. int bpf_object__load(struct bpf_object *obj)
  931. {
  932. int err;
  933. if (!obj)
  934. return -EINVAL;
  935. if (obj->loaded) {
  936. pr_warning("object should not be loaded twice\n");
  937. return -EINVAL;
  938. }
  939. obj->loaded = true;
  940. CHECK_ERR(bpf_object__create_maps(obj), err, out);
  941. CHECK_ERR(bpf_object__relocate(obj), err, out);
  942. CHECK_ERR(bpf_object__load_progs(obj), err, out);
  943. return 0;
  944. out:
  945. bpf_object__unload(obj);
  946. pr_warning("failed to load object '%s'\n", obj->path);
  947. return err;
  948. }
  949. void bpf_object__close(struct bpf_object *obj)
  950. {
  951. size_t i;
  952. if (!obj)
  953. return;
  954. bpf_object__elf_finish(obj);
  955. bpf_object__unload(obj);
  956. for (i = 0; i < obj->nr_maps; i++) {
  957. zfree(&obj->maps[i].name);
  958. if (obj->maps[i].clear_priv)
  959. obj->maps[i].clear_priv(&obj->maps[i],
  960. obj->maps[i].priv);
  961. obj->maps[i].priv = NULL;
  962. obj->maps[i].clear_priv = NULL;
  963. }
  964. zfree(&obj->maps);
  965. obj->nr_maps = 0;
  966. if (obj->programs && obj->nr_programs) {
  967. for (i = 0; i < obj->nr_programs; i++)
  968. bpf_program__exit(&obj->programs[i]);
  969. }
  970. zfree(&obj->programs);
  971. list_del(&obj->list);
  972. free(obj);
  973. }
  974. struct bpf_object *
  975. bpf_object__next(struct bpf_object *prev)
  976. {
  977. struct bpf_object *next;
  978. if (!prev)
  979. next = list_first_entry(&bpf_objects_list,
  980. struct bpf_object,
  981. list);
  982. else
  983. next = list_next_entry(prev, list);
  984. /* Empty list is noticed here so don't need checking on entry. */
  985. if (&next->list == &bpf_objects_list)
  986. return NULL;
  987. return next;
  988. }
  989. const char *
  990. bpf_object__get_name(struct bpf_object *obj)
  991. {
  992. if (!obj)
  993. return ERR_PTR(-EINVAL);
  994. return obj->path;
  995. }
  996. unsigned int
  997. bpf_object__get_kversion(struct bpf_object *obj)
  998. {
  999. if (!obj)
  1000. return 0;
  1001. return obj->kern_version;
  1002. }
  1003. struct bpf_program *
  1004. bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
  1005. {
  1006. size_t idx;
  1007. if (!obj->programs)
  1008. return NULL;
  1009. /* First handler */
  1010. if (prev == NULL)
  1011. return &obj->programs[0];
  1012. if (prev->obj != obj) {
  1013. pr_warning("error: program handler doesn't match object\n");
  1014. return NULL;
  1015. }
  1016. idx = (prev - obj->programs) + 1;
  1017. if (idx >= obj->nr_programs)
  1018. return NULL;
  1019. return &obj->programs[idx];
  1020. }
  1021. int bpf_program__set_private(struct bpf_program *prog,
  1022. void *priv,
  1023. bpf_program_clear_priv_t clear_priv)
  1024. {
  1025. if (prog->priv && prog->clear_priv)
  1026. prog->clear_priv(prog, prog->priv);
  1027. prog->priv = priv;
  1028. prog->clear_priv = clear_priv;
  1029. return 0;
  1030. }
  1031. int bpf_program__get_private(struct bpf_program *prog, void **ppriv)
  1032. {
  1033. *ppriv = prog->priv;
  1034. return 0;
  1035. }
  1036. const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
  1037. {
  1038. const char *title;
  1039. title = prog->section_name;
  1040. if (needs_copy) {
  1041. title = strdup(title);
  1042. if (!title) {
  1043. pr_warning("failed to strdup program title\n");
  1044. return ERR_PTR(-ENOMEM);
  1045. }
  1046. }
  1047. return title;
  1048. }
  1049. int bpf_program__fd(struct bpf_program *prog)
  1050. {
  1051. return bpf_program__nth_fd(prog, 0);
  1052. }
  1053. int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
  1054. bpf_program_prep_t prep)
  1055. {
  1056. int *instances_fds;
  1057. if (nr_instances <= 0 || !prep)
  1058. return -EINVAL;
  1059. if (prog->instances.nr > 0 || prog->instances.fds) {
  1060. pr_warning("Can't set pre-processor after loading\n");
  1061. return -EINVAL;
  1062. }
  1063. instances_fds = malloc(sizeof(int) * nr_instances);
  1064. if (!instances_fds) {
  1065. pr_warning("alloc memory failed for fds\n");
  1066. return -ENOMEM;
  1067. }
  1068. /* fill all fd with -1 */
  1069. memset(instances_fds, -1, sizeof(int) * nr_instances);
  1070. prog->instances.nr = nr_instances;
  1071. prog->instances.fds = instances_fds;
  1072. prog->preprocessor = prep;
  1073. return 0;
  1074. }
  1075. int bpf_program__nth_fd(struct bpf_program *prog, int n)
  1076. {
  1077. int fd;
  1078. if (n >= prog->instances.nr || n < 0) {
  1079. pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
  1080. n, prog->section_name, prog->instances.nr);
  1081. return -EINVAL;
  1082. }
  1083. fd = prog->instances.fds[n];
  1084. if (fd < 0) {
  1085. pr_warning("%dth instance of program '%s' is invalid\n",
  1086. n, prog->section_name);
  1087. return -ENOENT;
  1088. }
  1089. return fd;
  1090. }
  1091. int bpf_map__get_fd(struct bpf_map *map)
  1092. {
  1093. if (!map)
  1094. return -EINVAL;
  1095. return map->fd;
  1096. }
  1097. int bpf_map__get_def(struct bpf_map *map, struct bpf_map_def *pdef)
  1098. {
  1099. if (!map || !pdef)
  1100. return -EINVAL;
  1101. *pdef = map->def;
  1102. return 0;
  1103. }
  1104. const char *bpf_map__get_name(struct bpf_map *map)
  1105. {
  1106. if (!map)
  1107. return NULL;
  1108. return map->name;
  1109. }
  1110. int bpf_map__set_private(struct bpf_map *map, void *priv,
  1111. bpf_map_clear_priv_t clear_priv)
  1112. {
  1113. if (!map)
  1114. return -EINVAL;
  1115. if (map->priv) {
  1116. if (map->clear_priv)
  1117. map->clear_priv(map, map->priv);
  1118. }
  1119. map->priv = priv;
  1120. map->clear_priv = clear_priv;
  1121. return 0;
  1122. }
  1123. int bpf_map__get_private(struct bpf_map *map, void **ppriv)
  1124. {
  1125. if (!map)
  1126. return -EINVAL;
  1127. if (ppriv)
  1128. *ppriv = map->priv;
  1129. return 0;
  1130. }
  1131. struct bpf_map *
  1132. bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
  1133. {
  1134. size_t idx;
  1135. struct bpf_map *s, *e;
  1136. if (!obj || !obj->maps)
  1137. return NULL;
  1138. s = obj->maps;
  1139. e = obj->maps + obj->nr_maps;
  1140. if (prev == NULL)
  1141. return s;
  1142. if ((prev < s) || (prev >= e)) {
  1143. pr_warning("error in %s: map handler doesn't belong to object\n",
  1144. __func__);
  1145. return NULL;
  1146. }
  1147. idx = (prev - obj->maps) + 1;
  1148. if (idx >= obj->nr_maps)
  1149. return NULL;
  1150. return &obj->maps[idx];
  1151. }
  1152. struct bpf_map *
  1153. bpf_object__get_map_by_name(struct bpf_object *obj, const char *name)
  1154. {
  1155. struct bpf_map *pos;
  1156. bpf_map__for_each(pos, obj) {
  1157. if (pos->name && !strcmp(pos->name, name))
  1158. return pos;
  1159. }
  1160. return NULL;
  1161. }