libbpf.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379
  1. /*
  2. * Common eBPF ELF object loading operations.
  3. *
  4. * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
  5. * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  6. * Copyright (C) 2015 Huawei Inc.
  7. */
  8. #include <stdlib.h>
  9. #include <stdio.h>
  10. #include <stdarg.h>
  11. #include <inttypes.h>
  12. #include <string.h>
  13. #include <unistd.h>
  14. #include <fcntl.h>
  15. #include <errno.h>
  16. #include <asm/unistd.h>
  17. #include <linux/kernel.h>
  18. #include <linux/bpf.h>
  19. #include <linux/list.h>
  20. #include <libelf.h>
  21. #include <gelf.h>
  22. #include "libbpf.h"
  23. #include "bpf.h"
  24. #define __printf(a, b) __attribute__((format(printf, a, b)))
  25. __printf(1, 2)
  26. static int __base_pr(const char *format, ...)
  27. {
  28. va_list args;
  29. int err;
  30. va_start(args, format);
  31. err = vfprintf(stderr, format, args);
  32. va_end(args);
  33. return err;
  34. }
  35. static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
  36. static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
  37. static __printf(1, 2) libbpf_print_fn_t __pr_debug;
  38. #define __pr(func, fmt, ...) \
  39. do { \
  40. if ((func)) \
  41. (func)("libbpf: " fmt, ##__VA_ARGS__); \
  42. } while (0)
  43. #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
  44. #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
  45. #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
  46. void libbpf_set_print(libbpf_print_fn_t warn,
  47. libbpf_print_fn_t info,
  48. libbpf_print_fn_t debug)
  49. {
  50. __pr_warning = warn;
  51. __pr_info = info;
  52. __pr_debug = debug;
  53. }
  54. #define STRERR_BUFSIZE 128
  55. #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
  56. #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
  57. #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
  58. static const char *libbpf_strerror_table[NR_ERRNO] = {
  59. [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
  60. [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
  61. [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
  62. [ERRCODE_OFFSET(ENDIAN)] = "Endian missmatch",
  63. [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
  64. [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
  65. [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
  66. [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
  67. [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
  68. };
  69. int libbpf_strerror(int err, char *buf, size_t size)
  70. {
  71. if (!buf || !size)
  72. return -1;
  73. err = err > 0 ? err : -err;
  74. if (err < __LIBBPF_ERRNO__START) {
  75. int ret;
  76. ret = strerror_r(err, buf, size);
  77. buf[size - 1] = '\0';
  78. return ret;
  79. }
  80. if (err < __LIBBPF_ERRNO__END) {
  81. const char *msg;
  82. msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
  83. snprintf(buf, size, "%s", msg);
  84. buf[size - 1] = '\0';
  85. return 0;
  86. }
  87. snprintf(buf, size, "Unknown libbpf error %d", err);
  88. buf[size - 1] = '\0';
  89. return -1;
  90. }
  91. #define CHECK_ERR(action, err, out) do { \
  92. err = action; \
  93. if (err) \
  94. goto out; \
  95. } while(0)
  96. /* Copied from tools/perf/util/util.h */
  97. #ifndef zfree
  98. # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
  99. #endif
  100. #ifndef zclose
  101. # define zclose(fd) ({ \
  102. int ___err = 0; \
  103. if ((fd) >= 0) \
  104. ___err = close((fd)); \
  105. fd = -1; \
  106. ___err; })
  107. #endif
  108. #ifdef HAVE_LIBELF_MMAP_SUPPORT
  109. # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
  110. #else
  111. # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
  112. #endif
  113. /*
  114. * bpf_prog should be a better name but it has been used in
  115. * linux/filter.h.
  116. */
  117. struct bpf_program {
  118. /* Index in elf obj file, for relocation use. */
  119. int idx;
  120. char *section_name;
  121. struct bpf_insn *insns;
  122. size_t insns_cnt;
  123. struct {
  124. int insn_idx;
  125. int map_idx;
  126. } *reloc_desc;
  127. int nr_reloc;
  128. struct {
  129. int nr;
  130. int *fds;
  131. } instances;
  132. bpf_program_prep_t preprocessor;
  133. struct bpf_object *obj;
  134. void *priv;
  135. bpf_program_clear_priv_t clear_priv;
  136. };
  137. struct bpf_map {
  138. int fd;
  139. char *name;
  140. struct bpf_map_def def;
  141. void *priv;
  142. bpf_map_clear_priv_t clear_priv;
  143. };
  144. static LIST_HEAD(bpf_objects_list);
  145. struct bpf_object {
  146. char license[64];
  147. u32 kern_version;
  148. struct bpf_program *programs;
  149. size_t nr_programs;
  150. struct bpf_map *maps;
  151. size_t nr_maps;
  152. bool loaded;
  153. /*
  154. * Information when doing elf related work. Only valid if fd
  155. * is valid.
  156. */
  157. struct {
  158. int fd;
  159. void *obj_buf;
  160. size_t obj_buf_sz;
  161. Elf *elf;
  162. GElf_Ehdr ehdr;
  163. Elf_Data *symbols;
  164. struct {
  165. GElf_Shdr shdr;
  166. Elf_Data *data;
  167. } *reloc;
  168. int nr_reloc;
  169. } efile;
  170. /*
  171. * All loaded bpf_object is linked in a list, which is
  172. * hidden to caller. bpf_objects__<func> handlers deal with
  173. * all objects.
  174. */
  175. struct list_head list;
  176. char path[];
  177. };
  178. #define obj_elf_valid(o) ((o)->efile.elf)
  179. static void bpf_program__unload(struct bpf_program *prog)
  180. {
  181. int i;
  182. if (!prog)
  183. return;
  184. /*
  185. * If the object is opened but the program was never loaded,
  186. * it is possible that prog->instances.nr == -1.
  187. */
  188. if (prog->instances.nr > 0) {
  189. for (i = 0; i < prog->instances.nr; i++)
  190. zclose(prog->instances.fds[i]);
  191. } else if (prog->instances.nr != -1) {
  192. pr_warning("Internal error: instances.nr is %d\n",
  193. prog->instances.nr);
  194. }
  195. prog->instances.nr = -1;
  196. zfree(&prog->instances.fds);
  197. }
  198. static void bpf_program__exit(struct bpf_program *prog)
  199. {
  200. if (!prog)
  201. return;
  202. if (prog->clear_priv)
  203. prog->clear_priv(prog, prog->priv);
  204. prog->priv = NULL;
  205. prog->clear_priv = NULL;
  206. bpf_program__unload(prog);
  207. zfree(&prog->section_name);
  208. zfree(&prog->insns);
  209. zfree(&prog->reloc_desc);
  210. prog->nr_reloc = 0;
  211. prog->insns_cnt = 0;
  212. prog->idx = -1;
  213. }
  214. static int
  215. bpf_program__init(void *data, size_t size, char *name, int idx,
  216. struct bpf_program *prog)
  217. {
  218. if (size < sizeof(struct bpf_insn)) {
  219. pr_warning("corrupted section '%s'\n", name);
  220. return -EINVAL;
  221. }
  222. bzero(prog, sizeof(*prog));
  223. prog->section_name = strdup(name);
  224. if (!prog->section_name) {
  225. pr_warning("failed to alloc name for prog %s\n",
  226. name);
  227. goto errout;
  228. }
  229. prog->insns = malloc(size);
  230. if (!prog->insns) {
  231. pr_warning("failed to alloc insns for %s\n", name);
  232. goto errout;
  233. }
  234. prog->insns_cnt = size / sizeof(struct bpf_insn);
  235. memcpy(prog->insns, data,
  236. prog->insns_cnt * sizeof(struct bpf_insn));
  237. prog->idx = idx;
  238. prog->instances.fds = NULL;
  239. prog->instances.nr = -1;
  240. return 0;
  241. errout:
  242. bpf_program__exit(prog);
  243. return -ENOMEM;
  244. }
  245. static int
  246. bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
  247. char *name, int idx)
  248. {
  249. struct bpf_program prog, *progs;
  250. int nr_progs, err;
  251. err = bpf_program__init(data, size, name, idx, &prog);
  252. if (err)
  253. return err;
  254. progs = obj->programs;
  255. nr_progs = obj->nr_programs;
  256. progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
  257. if (!progs) {
  258. /*
  259. * In this case the original obj->programs
  260. * is still valid, so don't need special treat for
  261. * bpf_close_object().
  262. */
  263. pr_warning("failed to alloc a new program '%s'\n",
  264. name);
  265. bpf_program__exit(&prog);
  266. return -ENOMEM;
  267. }
  268. pr_debug("found program %s\n", prog.section_name);
  269. obj->programs = progs;
  270. obj->nr_programs = nr_progs + 1;
  271. prog.obj = obj;
  272. progs[nr_progs] = prog;
  273. return 0;
  274. }
  275. static struct bpf_object *bpf_object__new(const char *path,
  276. void *obj_buf,
  277. size_t obj_buf_sz)
  278. {
  279. struct bpf_object *obj;
  280. obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
  281. if (!obj) {
  282. pr_warning("alloc memory failed for %s\n", path);
  283. return ERR_PTR(-ENOMEM);
  284. }
  285. strcpy(obj->path, path);
  286. obj->efile.fd = -1;
  287. /*
  288. * Caller of this function should also calls
  289. * bpf_object__elf_finish() after data collection to return
  290. * obj_buf to user. If not, we should duplicate the buffer to
  291. * avoid user freeing them before elf finish.
  292. */
  293. obj->efile.obj_buf = obj_buf;
  294. obj->efile.obj_buf_sz = obj_buf_sz;
  295. obj->loaded = false;
  296. INIT_LIST_HEAD(&obj->list);
  297. list_add(&obj->list, &bpf_objects_list);
  298. return obj;
  299. }
  300. static void bpf_object__elf_finish(struct bpf_object *obj)
  301. {
  302. if (!obj_elf_valid(obj))
  303. return;
  304. if (obj->efile.elf) {
  305. elf_end(obj->efile.elf);
  306. obj->efile.elf = NULL;
  307. }
  308. obj->efile.symbols = NULL;
  309. zfree(&obj->efile.reloc);
  310. obj->efile.nr_reloc = 0;
  311. zclose(obj->efile.fd);
  312. obj->efile.obj_buf = NULL;
  313. obj->efile.obj_buf_sz = 0;
  314. }
  315. static int bpf_object__elf_init(struct bpf_object *obj)
  316. {
  317. int err = 0;
  318. GElf_Ehdr *ep;
  319. if (obj_elf_valid(obj)) {
  320. pr_warning("elf init: internal error\n");
  321. return -LIBBPF_ERRNO__LIBELF;
  322. }
  323. if (obj->efile.obj_buf_sz > 0) {
  324. /*
  325. * obj_buf should have been validated by
  326. * bpf_object__open_buffer().
  327. */
  328. obj->efile.elf = elf_memory(obj->efile.obj_buf,
  329. obj->efile.obj_buf_sz);
  330. } else {
  331. obj->efile.fd = open(obj->path, O_RDONLY);
  332. if (obj->efile.fd < 0) {
  333. pr_warning("failed to open %s: %s\n", obj->path,
  334. strerror(errno));
  335. return -errno;
  336. }
  337. obj->efile.elf = elf_begin(obj->efile.fd,
  338. LIBBPF_ELF_C_READ_MMAP,
  339. NULL);
  340. }
  341. if (!obj->efile.elf) {
  342. pr_warning("failed to open %s as ELF file\n",
  343. obj->path);
  344. err = -LIBBPF_ERRNO__LIBELF;
  345. goto errout;
  346. }
  347. if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
  348. pr_warning("failed to get EHDR from %s\n",
  349. obj->path);
  350. err = -LIBBPF_ERRNO__FORMAT;
  351. goto errout;
  352. }
  353. ep = &obj->efile.ehdr;
  354. if ((ep->e_type != ET_REL) || (ep->e_machine != 0)) {
  355. pr_warning("%s is not an eBPF object file\n",
  356. obj->path);
  357. err = -LIBBPF_ERRNO__FORMAT;
  358. goto errout;
  359. }
  360. return 0;
  361. errout:
  362. bpf_object__elf_finish(obj);
  363. return err;
  364. }
  365. static int
  366. bpf_object__check_endianness(struct bpf_object *obj)
  367. {
  368. static unsigned int const endian = 1;
  369. switch (obj->efile.ehdr.e_ident[EI_DATA]) {
  370. case ELFDATA2LSB:
  371. /* We are big endian, BPF obj is little endian. */
  372. if (*(unsigned char const *)&endian != 1)
  373. goto mismatch;
  374. break;
  375. case ELFDATA2MSB:
  376. /* We are little endian, BPF obj is big endian. */
  377. if (*(unsigned char const *)&endian != 0)
  378. goto mismatch;
  379. break;
  380. default:
  381. return -LIBBPF_ERRNO__ENDIAN;
  382. }
  383. return 0;
  384. mismatch:
  385. pr_warning("Error: endianness mismatch.\n");
  386. return -LIBBPF_ERRNO__ENDIAN;
  387. }
  388. static int
  389. bpf_object__init_license(struct bpf_object *obj,
  390. void *data, size_t size)
  391. {
  392. memcpy(obj->license, data,
  393. min(size, sizeof(obj->license) - 1));
  394. pr_debug("license of %s is %s\n", obj->path, obj->license);
  395. return 0;
  396. }
  397. static int
  398. bpf_object__init_kversion(struct bpf_object *obj,
  399. void *data, size_t size)
  400. {
  401. u32 kver;
  402. if (size != sizeof(kver)) {
  403. pr_warning("invalid kver section in %s\n", obj->path);
  404. return -LIBBPF_ERRNO__FORMAT;
  405. }
  406. memcpy(&kver, data, sizeof(kver));
  407. obj->kern_version = kver;
  408. pr_debug("kernel version of %s is %x\n", obj->path,
  409. obj->kern_version);
  410. return 0;
  411. }
  412. static int
  413. bpf_object__init_maps(struct bpf_object *obj, void *data,
  414. size_t size)
  415. {
  416. size_t nr_maps;
  417. int i;
  418. nr_maps = size / sizeof(struct bpf_map_def);
  419. if (!data || !nr_maps) {
  420. pr_debug("%s doesn't need map definition\n",
  421. obj->path);
  422. return 0;
  423. }
  424. pr_debug("maps in %s: %zd bytes\n", obj->path, size);
  425. obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
  426. if (!obj->maps) {
  427. pr_warning("alloc maps for object failed\n");
  428. return -ENOMEM;
  429. }
  430. obj->nr_maps = nr_maps;
  431. for (i = 0; i < nr_maps; i++) {
  432. struct bpf_map_def *def = &obj->maps[i].def;
  433. /*
  434. * fill all fd with -1 so won't close incorrect
  435. * fd (fd=0 is stdin) when failure (zclose won't close
  436. * negative fd)).
  437. */
  438. obj->maps[i].fd = -1;
  439. /* Save map definition into obj->maps */
  440. *def = ((struct bpf_map_def *)data)[i];
  441. }
  442. return 0;
  443. }
  444. static void
  445. bpf_object__init_maps_name(struct bpf_object *obj, int maps_shndx)
  446. {
  447. int i;
  448. Elf_Data *symbols = obj->efile.symbols;
  449. if (!symbols || maps_shndx < 0)
  450. return;
  451. for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
  452. GElf_Sym sym;
  453. size_t map_idx;
  454. const char *map_name;
  455. if (!gelf_getsym(symbols, i, &sym))
  456. continue;
  457. if (sym.st_shndx != maps_shndx)
  458. continue;
  459. map_name = elf_strptr(obj->efile.elf,
  460. obj->efile.ehdr.e_shstrndx,
  461. sym.st_name);
  462. map_idx = sym.st_value / sizeof(struct bpf_map_def);
  463. if (map_idx >= obj->nr_maps) {
  464. pr_warning("index of map \"%s\" is buggy: %zu > %zu\n",
  465. map_name, map_idx, obj->nr_maps);
  466. continue;
  467. }
  468. obj->maps[map_idx].name = strdup(map_name);
  469. pr_debug("map %zu is \"%s\"\n", map_idx,
  470. obj->maps[map_idx].name);
  471. }
  472. }
  473. static int bpf_object__elf_collect(struct bpf_object *obj)
  474. {
  475. Elf *elf = obj->efile.elf;
  476. GElf_Ehdr *ep = &obj->efile.ehdr;
  477. Elf_Scn *scn = NULL;
  478. int idx = 0, err = 0, maps_shndx = -1;
  479. /* Elf is corrupted/truncated, avoid calling elf_strptr. */
  480. if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
  481. pr_warning("failed to get e_shstrndx from %s\n",
  482. obj->path);
  483. return -LIBBPF_ERRNO__FORMAT;
  484. }
  485. while ((scn = elf_nextscn(elf, scn)) != NULL) {
  486. char *name;
  487. GElf_Shdr sh;
  488. Elf_Data *data;
  489. idx++;
  490. if (gelf_getshdr(scn, &sh) != &sh) {
  491. pr_warning("failed to get section header from %s\n",
  492. obj->path);
  493. err = -LIBBPF_ERRNO__FORMAT;
  494. goto out;
  495. }
  496. name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
  497. if (!name) {
  498. pr_warning("failed to get section name from %s\n",
  499. obj->path);
  500. err = -LIBBPF_ERRNO__FORMAT;
  501. goto out;
  502. }
  503. data = elf_getdata(scn, 0);
  504. if (!data) {
  505. pr_warning("failed to get section data from %s(%s)\n",
  506. name, obj->path);
  507. err = -LIBBPF_ERRNO__FORMAT;
  508. goto out;
  509. }
  510. pr_debug("section %s, size %ld, link %d, flags %lx, type=%d\n",
  511. name, (unsigned long)data->d_size,
  512. (int)sh.sh_link, (unsigned long)sh.sh_flags,
  513. (int)sh.sh_type);
  514. if (strcmp(name, "license") == 0)
  515. err = bpf_object__init_license(obj,
  516. data->d_buf,
  517. data->d_size);
  518. else if (strcmp(name, "version") == 0)
  519. err = bpf_object__init_kversion(obj,
  520. data->d_buf,
  521. data->d_size);
  522. else if (strcmp(name, "maps") == 0) {
  523. err = bpf_object__init_maps(obj, data->d_buf,
  524. data->d_size);
  525. maps_shndx = idx;
  526. } else if (sh.sh_type == SHT_SYMTAB) {
  527. if (obj->efile.symbols) {
  528. pr_warning("bpf: multiple SYMTAB in %s\n",
  529. obj->path);
  530. err = -LIBBPF_ERRNO__FORMAT;
  531. } else
  532. obj->efile.symbols = data;
  533. } else if ((sh.sh_type == SHT_PROGBITS) &&
  534. (sh.sh_flags & SHF_EXECINSTR) &&
  535. (data->d_size > 0)) {
  536. err = bpf_object__add_program(obj, data->d_buf,
  537. data->d_size, name, idx);
  538. if (err) {
  539. char errmsg[STRERR_BUFSIZE];
  540. strerror_r(-err, errmsg, sizeof(errmsg));
  541. pr_warning("failed to alloc program %s (%s): %s",
  542. name, obj->path, errmsg);
  543. }
  544. } else if (sh.sh_type == SHT_REL) {
  545. void *reloc = obj->efile.reloc;
  546. int nr_reloc = obj->efile.nr_reloc + 1;
  547. reloc = realloc(reloc,
  548. sizeof(*obj->efile.reloc) * nr_reloc);
  549. if (!reloc) {
  550. pr_warning("realloc failed\n");
  551. err = -ENOMEM;
  552. } else {
  553. int n = nr_reloc - 1;
  554. obj->efile.reloc = reloc;
  555. obj->efile.nr_reloc = nr_reloc;
  556. obj->efile.reloc[n].shdr = sh;
  557. obj->efile.reloc[n].data = data;
  558. }
  559. }
  560. if (err)
  561. goto out;
  562. }
  563. if (maps_shndx >= 0)
  564. bpf_object__init_maps_name(obj, maps_shndx);
  565. out:
  566. return err;
  567. }
  568. static struct bpf_program *
  569. bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
  570. {
  571. struct bpf_program *prog;
  572. size_t i;
  573. for (i = 0; i < obj->nr_programs; i++) {
  574. prog = &obj->programs[i];
  575. if (prog->idx == idx)
  576. return prog;
  577. }
  578. return NULL;
  579. }
  580. static int
  581. bpf_program__collect_reloc(struct bpf_program *prog,
  582. size_t nr_maps, GElf_Shdr *shdr,
  583. Elf_Data *data, Elf_Data *symbols)
  584. {
  585. int i, nrels;
  586. pr_debug("collecting relocating info for: '%s'\n",
  587. prog->section_name);
  588. nrels = shdr->sh_size / shdr->sh_entsize;
  589. prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
  590. if (!prog->reloc_desc) {
  591. pr_warning("failed to alloc memory in relocation\n");
  592. return -ENOMEM;
  593. }
  594. prog->nr_reloc = nrels;
  595. for (i = 0; i < nrels; i++) {
  596. GElf_Sym sym;
  597. GElf_Rel rel;
  598. unsigned int insn_idx;
  599. struct bpf_insn *insns = prog->insns;
  600. size_t map_idx;
  601. if (!gelf_getrel(data, i, &rel)) {
  602. pr_warning("relocation: failed to get %d reloc\n", i);
  603. return -LIBBPF_ERRNO__FORMAT;
  604. }
  605. insn_idx = rel.r_offset / sizeof(struct bpf_insn);
  606. pr_debug("relocation: insn_idx=%u\n", insn_idx);
  607. if (!gelf_getsym(symbols,
  608. GELF_R_SYM(rel.r_info),
  609. &sym)) {
  610. pr_warning("relocation: symbol %"PRIx64" not found\n",
  611. GELF_R_SYM(rel.r_info));
  612. return -LIBBPF_ERRNO__FORMAT;
  613. }
  614. if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
  615. pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
  616. insn_idx, insns[insn_idx].code);
  617. return -LIBBPF_ERRNO__RELOC;
  618. }
  619. map_idx = sym.st_value / sizeof(struct bpf_map_def);
  620. if (map_idx >= nr_maps) {
  621. pr_warning("bpf relocation: map_idx %d large than %d\n",
  622. (int)map_idx, (int)nr_maps - 1);
  623. return -LIBBPF_ERRNO__RELOC;
  624. }
  625. prog->reloc_desc[i].insn_idx = insn_idx;
  626. prog->reloc_desc[i].map_idx = map_idx;
  627. }
  628. return 0;
  629. }
  630. static int
  631. bpf_object__create_maps(struct bpf_object *obj)
  632. {
  633. unsigned int i;
  634. for (i = 0; i < obj->nr_maps; i++) {
  635. struct bpf_map_def *def = &obj->maps[i].def;
  636. int *pfd = &obj->maps[i].fd;
  637. *pfd = bpf_create_map(def->type,
  638. def->key_size,
  639. def->value_size,
  640. def->max_entries);
  641. if (*pfd < 0) {
  642. size_t j;
  643. int err = *pfd;
  644. pr_warning("failed to create map: %s\n",
  645. strerror(errno));
  646. for (j = 0; j < i; j++)
  647. zclose(obj->maps[j].fd);
  648. return err;
  649. }
  650. pr_debug("create map: fd=%d\n", *pfd);
  651. }
  652. return 0;
  653. }
  654. static int
  655. bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
  656. {
  657. int i;
  658. if (!prog || !prog->reloc_desc)
  659. return 0;
  660. for (i = 0; i < prog->nr_reloc; i++) {
  661. int insn_idx, map_idx;
  662. struct bpf_insn *insns = prog->insns;
  663. insn_idx = prog->reloc_desc[i].insn_idx;
  664. map_idx = prog->reloc_desc[i].map_idx;
  665. if (insn_idx >= (int)prog->insns_cnt) {
  666. pr_warning("relocation out of range: '%s'\n",
  667. prog->section_name);
  668. return -LIBBPF_ERRNO__RELOC;
  669. }
  670. insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
  671. insns[insn_idx].imm = obj->maps[map_idx].fd;
  672. }
  673. zfree(&prog->reloc_desc);
  674. prog->nr_reloc = 0;
  675. return 0;
  676. }
  677. static int
  678. bpf_object__relocate(struct bpf_object *obj)
  679. {
  680. struct bpf_program *prog;
  681. size_t i;
  682. int err;
  683. for (i = 0; i < obj->nr_programs; i++) {
  684. prog = &obj->programs[i];
  685. err = bpf_program__relocate(prog, obj);
  686. if (err) {
  687. pr_warning("failed to relocate '%s'\n",
  688. prog->section_name);
  689. return err;
  690. }
  691. }
  692. return 0;
  693. }
  694. static int bpf_object__collect_reloc(struct bpf_object *obj)
  695. {
  696. int i, err;
  697. if (!obj_elf_valid(obj)) {
  698. pr_warning("Internal error: elf object is closed\n");
  699. return -LIBBPF_ERRNO__INTERNAL;
  700. }
  701. for (i = 0; i < obj->efile.nr_reloc; i++) {
  702. GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
  703. Elf_Data *data = obj->efile.reloc[i].data;
  704. int idx = shdr->sh_info;
  705. struct bpf_program *prog;
  706. size_t nr_maps = obj->nr_maps;
  707. if (shdr->sh_type != SHT_REL) {
  708. pr_warning("internal error at %d\n", __LINE__);
  709. return -LIBBPF_ERRNO__INTERNAL;
  710. }
  711. prog = bpf_object__find_prog_by_idx(obj, idx);
  712. if (!prog) {
  713. pr_warning("relocation failed: no %d section\n",
  714. idx);
  715. return -LIBBPF_ERRNO__RELOC;
  716. }
  717. err = bpf_program__collect_reloc(prog, nr_maps,
  718. shdr, data,
  719. obj->efile.symbols);
  720. if (err)
  721. return err;
  722. }
  723. return 0;
  724. }
  725. static int
  726. load_program(struct bpf_insn *insns, int insns_cnt,
  727. char *license, u32 kern_version, int *pfd)
  728. {
  729. int ret;
  730. char *log_buf;
  731. if (!insns || !insns_cnt)
  732. return -EINVAL;
  733. log_buf = malloc(BPF_LOG_BUF_SIZE);
  734. if (!log_buf)
  735. pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
  736. ret = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
  737. insns_cnt, license, kern_version,
  738. log_buf, BPF_LOG_BUF_SIZE);
  739. if (ret >= 0) {
  740. *pfd = ret;
  741. ret = 0;
  742. goto out;
  743. }
  744. ret = -LIBBPF_ERRNO__LOAD;
  745. pr_warning("load bpf program failed: %s\n", strerror(errno));
  746. if (log_buf && log_buf[0] != '\0') {
  747. ret = -LIBBPF_ERRNO__VERIFY;
  748. pr_warning("-- BEGIN DUMP LOG ---\n");
  749. pr_warning("\n%s\n", log_buf);
  750. pr_warning("-- END LOG --\n");
  751. } else {
  752. if (insns_cnt >= BPF_MAXINSNS) {
  753. pr_warning("Program too large (%d insns), at most %d insns\n",
  754. insns_cnt, BPF_MAXINSNS);
  755. ret = -LIBBPF_ERRNO__PROG2BIG;
  756. } else if (log_buf) {
  757. pr_warning("log buffer is empty\n");
  758. ret = -LIBBPF_ERRNO__KVER;
  759. }
  760. }
  761. out:
  762. free(log_buf);
  763. return ret;
  764. }
  765. static int
  766. bpf_program__load(struct bpf_program *prog,
  767. char *license, u32 kern_version)
  768. {
  769. int err = 0, fd, i;
  770. if (prog->instances.nr < 0 || !prog->instances.fds) {
  771. if (prog->preprocessor) {
  772. pr_warning("Internal error: can't load program '%s'\n",
  773. prog->section_name);
  774. return -LIBBPF_ERRNO__INTERNAL;
  775. }
  776. prog->instances.fds = malloc(sizeof(int));
  777. if (!prog->instances.fds) {
  778. pr_warning("Not enough memory for BPF fds\n");
  779. return -ENOMEM;
  780. }
  781. prog->instances.nr = 1;
  782. prog->instances.fds[0] = -1;
  783. }
  784. if (!prog->preprocessor) {
  785. if (prog->instances.nr != 1) {
  786. pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
  787. prog->section_name, prog->instances.nr);
  788. }
  789. err = load_program(prog->insns, prog->insns_cnt,
  790. license, kern_version, &fd);
  791. if (!err)
  792. prog->instances.fds[0] = fd;
  793. goto out;
  794. }
  795. for (i = 0; i < prog->instances.nr; i++) {
  796. struct bpf_prog_prep_result result;
  797. bpf_program_prep_t preprocessor = prog->preprocessor;
  798. bzero(&result, sizeof(result));
  799. err = preprocessor(prog, i, prog->insns,
  800. prog->insns_cnt, &result);
  801. if (err) {
  802. pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
  803. i, prog->section_name);
  804. goto out;
  805. }
  806. if (!result.new_insn_ptr || !result.new_insn_cnt) {
  807. pr_debug("Skip loading the %dth instance of program '%s'\n",
  808. i, prog->section_name);
  809. prog->instances.fds[i] = -1;
  810. if (result.pfd)
  811. *result.pfd = -1;
  812. continue;
  813. }
  814. err = load_program(result.new_insn_ptr,
  815. result.new_insn_cnt,
  816. license, kern_version, &fd);
  817. if (err) {
  818. pr_warning("Loading the %dth instance of program '%s' failed\n",
  819. i, prog->section_name);
  820. goto out;
  821. }
  822. if (result.pfd)
  823. *result.pfd = fd;
  824. prog->instances.fds[i] = fd;
  825. }
  826. out:
  827. if (err)
  828. pr_warning("failed to load program '%s'\n",
  829. prog->section_name);
  830. zfree(&prog->insns);
  831. prog->insns_cnt = 0;
  832. return err;
  833. }
  834. static int
  835. bpf_object__load_progs(struct bpf_object *obj)
  836. {
  837. size_t i;
  838. int err;
  839. for (i = 0; i < obj->nr_programs; i++) {
  840. err = bpf_program__load(&obj->programs[i],
  841. obj->license,
  842. obj->kern_version);
  843. if (err)
  844. return err;
  845. }
  846. return 0;
  847. }
  848. static int bpf_object__validate(struct bpf_object *obj)
  849. {
  850. if (obj->kern_version == 0) {
  851. pr_warning("%s doesn't provide kernel version\n",
  852. obj->path);
  853. return -LIBBPF_ERRNO__KVERSION;
  854. }
  855. return 0;
  856. }
  857. static struct bpf_object *
  858. __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz)
  859. {
  860. struct bpf_object *obj;
  861. int err;
  862. if (elf_version(EV_CURRENT) == EV_NONE) {
  863. pr_warning("failed to init libelf for %s\n", path);
  864. return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
  865. }
  866. obj = bpf_object__new(path, obj_buf, obj_buf_sz);
  867. if (IS_ERR(obj))
  868. return obj;
  869. CHECK_ERR(bpf_object__elf_init(obj), err, out);
  870. CHECK_ERR(bpf_object__check_endianness(obj), err, out);
  871. CHECK_ERR(bpf_object__elf_collect(obj), err, out);
  872. CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
  873. CHECK_ERR(bpf_object__validate(obj), err, out);
  874. bpf_object__elf_finish(obj);
  875. return obj;
  876. out:
  877. bpf_object__close(obj);
  878. return ERR_PTR(err);
  879. }
  880. struct bpf_object *bpf_object__open(const char *path)
  881. {
  882. /* param validation */
  883. if (!path)
  884. return NULL;
  885. pr_debug("loading %s\n", path);
  886. return __bpf_object__open(path, NULL, 0);
  887. }
  888. struct bpf_object *bpf_object__open_buffer(void *obj_buf,
  889. size_t obj_buf_sz,
  890. const char *name)
  891. {
  892. char tmp_name[64];
  893. /* param validation */
  894. if (!obj_buf || obj_buf_sz <= 0)
  895. return NULL;
  896. if (!name) {
  897. snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
  898. (unsigned long)obj_buf,
  899. (unsigned long)obj_buf_sz);
  900. tmp_name[sizeof(tmp_name) - 1] = '\0';
  901. name = tmp_name;
  902. }
  903. pr_debug("loading object '%s' from buffer\n",
  904. name);
  905. return __bpf_object__open(name, obj_buf, obj_buf_sz);
  906. }
  907. int bpf_object__unload(struct bpf_object *obj)
  908. {
  909. size_t i;
  910. if (!obj)
  911. return -EINVAL;
  912. for (i = 0; i < obj->nr_maps; i++)
  913. zclose(obj->maps[i].fd);
  914. for (i = 0; i < obj->nr_programs; i++)
  915. bpf_program__unload(&obj->programs[i]);
  916. return 0;
  917. }
  918. int bpf_object__load(struct bpf_object *obj)
  919. {
  920. int err;
  921. if (!obj)
  922. return -EINVAL;
  923. if (obj->loaded) {
  924. pr_warning("object should not be loaded twice\n");
  925. return -EINVAL;
  926. }
  927. obj->loaded = true;
  928. CHECK_ERR(bpf_object__create_maps(obj), err, out);
  929. CHECK_ERR(bpf_object__relocate(obj), err, out);
  930. CHECK_ERR(bpf_object__load_progs(obj), err, out);
  931. return 0;
  932. out:
  933. bpf_object__unload(obj);
  934. pr_warning("failed to load object '%s'\n", obj->path);
  935. return err;
  936. }
  937. void bpf_object__close(struct bpf_object *obj)
  938. {
  939. size_t i;
  940. if (!obj)
  941. return;
  942. bpf_object__elf_finish(obj);
  943. bpf_object__unload(obj);
  944. for (i = 0; i < obj->nr_maps; i++) {
  945. zfree(&obj->maps[i].name);
  946. if (obj->maps[i].clear_priv)
  947. obj->maps[i].clear_priv(&obj->maps[i],
  948. obj->maps[i].priv);
  949. obj->maps[i].priv = NULL;
  950. obj->maps[i].clear_priv = NULL;
  951. }
  952. zfree(&obj->maps);
  953. obj->nr_maps = 0;
  954. if (obj->programs && obj->nr_programs) {
  955. for (i = 0; i < obj->nr_programs; i++)
  956. bpf_program__exit(&obj->programs[i]);
  957. }
  958. zfree(&obj->programs);
  959. list_del(&obj->list);
  960. free(obj);
  961. }
  962. struct bpf_object *
  963. bpf_object__next(struct bpf_object *prev)
  964. {
  965. struct bpf_object *next;
  966. if (!prev)
  967. next = list_first_entry(&bpf_objects_list,
  968. struct bpf_object,
  969. list);
  970. else
  971. next = list_next_entry(prev, list);
  972. /* Empty list is noticed here so don't need checking on entry. */
  973. if (&next->list == &bpf_objects_list)
  974. return NULL;
  975. return next;
  976. }
  977. const char *
  978. bpf_object__get_name(struct bpf_object *obj)
  979. {
  980. if (!obj)
  981. return ERR_PTR(-EINVAL);
  982. return obj->path;
  983. }
  984. unsigned int
  985. bpf_object__get_kversion(struct bpf_object *obj)
  986. {
  987. if (!obj)
  988. return 0;
  989. return obj->kern_version;
  990. }
  991. struct bpf_program *
  992. bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
  993. {
  994. size_t idx;
  995. if (!obj->programs)
  996. return NULL;
  997. /* First handler */
  998. if (prev == NULL)
  999. return &obj->programs[0];
  1000. if (prev->obj != obj) {
  1001. pr_warning("error: program handler doesn't match object\n");
  1002. return NULL;
  1003. }
  1004. idx = (prev - obj->programs) + 1;
  1005. if (idx >= obj->nr_programs)
  1006. return NULL;
  1007. return &obj->programs[idx];
  1008. }
  1009. int bpf_program__set_private(struct bpf_program *prog,
  1010. void *priv,
  1011. bpf_program_clear_priv_t clear_priv)
  1012. {
  1013. if (prog->priv && prog->clear_priv)
  1014. prog->clear_priv(prog, prog->priv);
  1015. prog->priv = priv;
  1016. prog->clear_priv = clear_priv;
  1017. return 0;
  1018. }
  1019. int bpf_program__get_private(struct bpf_program *prog, void **ppriv)
  1020. {
  1021. *ppriv = prog->priv;
  1022. return 0;
  1023. }
  1024. const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
  1025. {
  1026. const char *title;
  1027. title = prog->section_name;
  1028. if (needs_copy) {
  1029. title = strdup(title);
  1030. if (!title) {
  1031. pr_warning("failed to strdup program title\n");
  1032. return ERR_PTR(-ENOMEM);
  1033. }
  1034. }
  1035. return title;
  1036. }
  1037. int bpf_program__fd(struct bpf_program *prog)
  1038. {
  1039. return bpf_program__nth_fd(prog, 0);
  1040. }
  1041. int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
  1042. bpf_program_prep_t prep)
  1043. {
  1044. int *instances_fds;
  1045. if (nr_instances <= 0 || !prep)
  1046. return -EINVAL;
  1047. if (prog->instances.nr > 0 || prog->instances.fds) {
  1048. pr_warning("Can't set pre-processor after loading\n");
  1049. return -EINVAL;
  1050. }
  1051. instances_fds = malloc(sizeof(int) * nr_instances);
  1052. if (!instances_fds) {
  1053. pr_warning("alloc memory failed for fds\n");
  1054. return -ENOMEM;
  1055. }
  1056. /* fill all fd with -1 */
  1057. memset(instances_fds, -1, sizeof(int) * nr_instances);
  1058. prog->instances.nr = nr_instances;
  1059. prog->instances.fds = instances_fds;
  1060. prog->preprocessor = prep;
  1061. return 0;
  1062. }
  1063. int bpf_program__nth_fd(struct bpf_program *prog, int n)
  1064. {
  1065. int fd;
  1066. if (n >= prog->instances.nr || n < 0) {
  1067. pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
  1068. n, prog->section_name, prog->instances.nr);
  1069. return -EINVAL;
  1070. }
  1071. fd = prog->instances.fds[n];
  1072. if (fd < 0) {
  1073. pr_warning("%dth instance of program '%s' is invalid\n",
  1074. n, prog->section_name);
  1075. return -ENOENT;
  1076. }
  1077. return fd;
  1078. }
  1079. int bpf_map__get_fd(struct bpf_map *map)
  1080. {
  1081. if (!map)
  1082. return -EINVAL;
  1083. return map->fd;
  1084. }
  1085. int bpf_map__get_def(struct bpf_map *map, struct bpf_map_def *pdef)
  1086. {
  1087. if (!map || !pdef)
  1088. return -EINVAL;
  1089. *pdef = map->def;
  1090. return 0;
  1091. }
  1092. const char *bpf_map__get_name(struct bpf_map *map)
  1093. {
  1094. if (!map)
  1095. return NULL;
  1096. return map->name;
  1097. }
  1098. int bpf_map__set_private(struct bpf_map *map, void *priv,
  1099. bpf_map_clear_priv_t clear_priv)
  1100. {
  1101. if (!map)
  1102. return -EINVAL;
  1103. if (map->priv) {
  1104. if (map->clear_priv)
  1105. map->clear_priv(map, map->priv);
  1106. }
  1107. map->priv = priv;
  1108. map->clear_priv = clear_priv;
  1109. return 0;
  1110. }
  1111. int bpf_map__get_private(struct bpf_map *map, void **ppriv)
  1112. {
  1113. if (!map)
  1114. return -EINVAL;
  1115. if (ppriv)
  1116. *ppriv = map->priv;
  1117. return 0;
  1118. }
  1119. struct bpf_map *
  1120. bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
  1121. {
  1122. size_t idx;
  1123. struct bpf_map *s, *e;
  1124. if (!obj || !obj->maps)
  1125. return NULL;
  1126. s = obj->maps;
  1127. e = obj->maps + obj->nr_maps;
  1128. if (prev == NULL)
  1129. return s;
  1130. if ((prev < s) || (prev >= e)) {
  1131. pr_warning("error in %s: map handler doesn't belong to object\n",
  1132. __func__);
  1133. return NULL;
  1134. }
  1135. idx = (prev - obj->maps) + 1;
  1136. if (idx >= obj->nr_maps)
  1137. return NULL;
  1138. return &obj->maps[idx];
  1139. }
  1140. struct bpf_map *
  1141. bpf_object__get_map_by_name(struct bpf_object *obj, const char *name)
  1142. {
  1143. struct bpf_map *pos;
  1144. bpf_map__for_each(pos, obj) {
  1145. if (strcmp(pos->name, name) == 0)
  1146. return pos;
  1147. }
  1148. return NULL;
  1149. }