libbpf.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464
  1. // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
  2. /*
  3. * Common eBPF ELF object loading operations.
  4. *
  5. * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
  6. * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  7. * Copyright (C) 2015 Huawei Inc.
  8. * Copyright (C) 2017 Nicira, Inc.
  9. */
  10. #define _GNU_SOURCE
  11. #include <stdlib.h>
  12. #include <stdio.h>
  13. #include <stdarg.h>
  14. #include <libgen.h>
  15. #include <inttypes.h>
  16. #include <string.h>
  17. #include <unistd.h>
  18. #include <fcntl.h>
  19. #include <errno.h>
  20. #include <asm/unistd.h>
  21. #include <linux/err.h>
  22. #include <linux/kernel.h>
  23. #include <linux/bpf.h>
  24. #include <linux/btf.h>
  25. #include <linux/list.h>
  26. #include <linux/limits.h>
  27. #include <linux/perf_event.h>
  28. #include <linux/ring_buffer.h>
  29. #include <sys/stat.h>
  30. #include <sys/types.h>
  31. #include <sys/vfs.h>
  32. #include <tools/libc_compat.h>
  33. #include <libelf.h>
  34. #include <gelf.h>
  35. #include "libbpf.h"
  36. #include "bpf.h"
  37. #include "btf.h"
  38. #include "str_error.h"
  39. #ifndef EM_BPF
  40. #define EM_BPF 247
  41. #endif
  42. #ifndef BPF_FS_MAGIC
  43. #define BPF_FS_MAGIC 0xcafe4a11
  44. #endif
  45. #define __printf(a, b) __attribute__((format(printf, a, b)))
  46. __printf(1, 2)
  47. static int __base_pr(const char *format, ...)
  48. {
  49. va_list args;
  50. int err;
  51. va_start(args, format);
  52. err = vfprintf(stderr, format, args);
  53. va_end(args);
  54. return err;
  55. }
  56. static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
  57. static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
  58. static __printf(1, 2) libbpf_print_fn_t __pr_debug;
  59. #define __pr(func, fmt, ...) \
  60. do { \
  61. if ((func)) \
  62. (func)("libbpf: " fmt, ##__VA_ARGS__); \
  63. } while (0)
  64. #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
  65. #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
  66. #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
  67. void libbpf_set_print(libbpf_print_fn_t warn,
  68. libbpf_print_fn_t info,
  69. libbpf_print_fn_t debug)
  70. {
  71. __pr_warning = warn;
  72. __pr_info = info;
  73. __pr_debug = debug;
  74. }
  75. #define STRERR_BUFSIZE 128
  76. #define CHECK_ERR(action, err, out) do { \
  77. err = action; \
  78. if (err) \
  79. goto out; \
  80. } while(0)
  81. /* Copied from tools/perf/util/util.h */
  82. #ifndef zfree
  83. # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
  84. #endif
  85. #ifndef zclose
  86. # define zclose(fd) ({ \
  87. int ___err = 0; \
  88. if ((fd) >= 0) \
  89. ___err = close((fd)); \
  90. fd = -1; \
  91. ___err; })
  92. #endif
  93. #ifdef HAVE_LIBELF_MMAP_SUPPORT
  94. # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
  95. #else
  96. # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
  97. #endif
  98. /*
  99. * bpf_prog should be a better name but it has been used in
  100. * linux/filter.h.
  101. */
  102. struct bpf_program {
  103. /* Index in elf obj file, for relocation use. */
  104. int idx;
  105. char *name;
  106. int prog_ifindex;
  107. char *section_name;
  108. struct bpf_insn *insns;
  109. size_t insns_cnt, main_prog_cnt;
  110. enum bpf_prog_type type;
  111. struct reloc_desc {
  112. enum {
  113. RELO_LD64,
  114. RELO_CALL,
  115. } type;
  116. int insn_idx;
  117. union {
  118. int map_idx;
  119. int text_off;
  120. };
  121. } *reloc_desc;
  122. int nr_reloc;
  123. struct {
  124. int nr;
  125. int *fds;
  126. } instances;
  127. bpf_program_prep_t preprocessor;
  128. struct bpf_object *obj;
  129. void *priv;
  130. bpf_program_clear_priv_t clear_priv;
  131. enum bpf_attach_type expected_attach_type;
  132. };
  133. struct bpf_map {
  134. int fd;
  135. char *name;
  136. size_t offset;
  137. int map_ifindex;
  138. struct bpf_map_def def;
  139. __u32 btf_key_type_id;
  140. __u32 btf_value_type_id;
  141. void *priv;
  142. bpf_map_clear_priv_t clear_priv;
  143. };
  144. static LIST_HEAD(bpf_objects_list);
  145. struct bpf_object {
  146. char license[64];
  147. __u32 kern_version;
  148. struct bpf_program *programs;
  149. size_t nr_programs;
  150. struct bpf_map *maps;
  151. size_t nr_maps;
  152. bool loaded;
  153. bool has_pseudo_calls;
  154. /*
  155. * Information when doing elf related work. Only valid if fd
  156. * is valid.
  157. */
  158. struct {
  159. int fd;
  160. void *obj_buf;
  161. size_t obj_buf_sz;
  162. Elf *elf;
  163. GElf_Ehdr ehdr;
  164. Elf_Data *symbols;
  165. size_t strtabidx;
  166. struct {
  167. GElf_Shdr shdr;
  168. Elf_Data *data;
  169. } *reloc;
  170. int nr_reloc;
  171. int maps_shndx;
  172. int text_shndx;
  173. } efile;
  174. /*
  175. * All loaded bpf_object is linked in a list, which is
  176. * hidden to caller. bpf_objects__<func> handlers deal with
  177. * all objects.
  178. */
  179. struct list_head list;
  180. struct btf *btf;
  181. void *priv;
  182. bpf_object_clear_priv_t clear_priv;
  183. char path[];
  184. };
  185. #define obj_elf_valid(o) ((o)->efile.elf)
  186. void bpf_program__unload(struct bpf_program *prog)
  187. {
  188. int i;
  189. if (!prog)
  190. return;
  191. /*
  192. * If the object is opened but the program was never loaded,
  193. * it is possible that prog->instances.nr == -1.
  194. */
  195. if (prog->instances.nr > 0) {
  196. for (i = 0; i < prog->instances.nr; i++)
  197. zclose(prog->instances.fds[i]);
  198. } else if (prog->instances.nr != -1) {
  199. pr_warning("Internal error: instances.nr is %d\n",
  200. prog->instances.nr);
  201. }
  202. prog->instances.nr = -1;
  203. zfree(&prog->instances.fds);
  204. }
  205. static void bpf_program__exit(struct bpf_program *prog)
  206. {
  207. if (!prog)
  208. return;
  209. if (prog->clear_priv)
  210. prog->clear_priv(prog, prog->priv);
  211. prog->priv = NULL;
  212. prog->clear_priv = NULL;
  213. bpf_program__unload(prog);
  214. zfree(&prog->name);
  215. zfree(&prog->section_name);
  216. zfree(&prog->insns);
  217. zfree(&prog->reloc_desc);
  218. prog->nr_reloc = 0;
  219. prog->insns_cnt = 0;
  220. prog->idx = -1;
  221. }
  222. static int
  223. bpf_program__init(void *data, size_t size, char *section_name, int idx,
  224. struct bpf_program *prog)
  225. {
  226. if (size < sizeof(struct bpf_insn)) {
  227. pr_warning("corrupted section '%s'\n", section_name);
  228. return -EINVAL;
  229. }
  230. bzero(prog, sizeof(*prog));
  231. prog->section_name = strdup(section_name);
  232. if (!prog->section_name) {
  233. pr_warning("failed to alloc name for prog under section(%d) %s\n",
  234. idx, section_name);
  235. goto errout;
  236. }
  237. prog->insns = malloc(size);
  238. if (!prog->insns) {
  239. pr_warning("failed to alloc insns for prog under section %s\n",
  240. section_name);
  241. goto errout;
  242. }
  243. prog->insns_cnt = size / sizeof(struct bpf_insn);
  244. memcpy(prog->insns, data,
  245. prog->insns_cnt * sizeof(struct bpf_insn));
  246. prog->idx = idx;
  247. prog->instances.fds = NULL;
  248. prog->instances.nr = -1;
  249. prog->type = BPF_PROG_TYPE_KPROBE;
  250. return 0;
  251. errout:
  252. bpf_program__exit(prog);
  253. return -ENOMEM;
  254. }
  255. static int
  256. bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
  257. char *section_name, int idx)
  258. {
  259. struct bpf_program prog, *progs;
  260. int nr_progs, err;
  261. err = bpf_program__init(data, size, section_name, idx, &prog);
  262. if (err)
  263. return err;
  264. progs = obj->programs;
  265. nr_progs = obj->nr_programs;
  266. progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
  267. if (!progs) {
  268. /*
  269. * In this case the original obj->programs
  270. * is still valid, so don't need special treat for
  271. * bpf_close_object().
  272. */
  273. pr_warning("failed to alloc a new program under section '%s'\n",
  274. section_name);
  275. bpf_program__exit(&prog);
  276. return -ENOMEM;
  277. }
  278. pr_debug("found program %s\n", prog.section_name);
  279. obj->programs = progs;
  280. obj->nr_programs = nr_progs + 1;
  281. prog.obj = obj;
  282. progs[nr_progs] = prog;
  283. return 0;
  284. }
  285. static int
  286. bpf_object__init_prog_names(struct bpf_object *obj)
  287. {
  288. Elf_Data *symbols = obj->efile.symbols;
  289. struct bpf_program *prog;
  290. size_t pi, si;
  291. for (pi = 0; pi < obj->nr_programs; pi++) {
  292. const char *name = NULL;
  293. prog = &obj->programs[pi];
  294. for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
  295. si++) {
  296. GElf_Sym sym;
  297. if (!gelf_getsym(symbols, si, &sym))
  298. continue;
  299. if (sym.st_shndx != prog->idx)
  300. continue;
  301. if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
  302. continue;
  303. name = elf_strptr(obj->efile.elf,
  304. obj->efile.strtabidx,
  305. sym.st_name);
  306. if (!name) {
  307. pr_warning("failed to get sym name string for prog %s\n",
  308. prog->section_name);
  309. return -LIBBPF_ERRNO__LIBELF;
  310. }
  311. }
  312. if (!name && prog->idx == obj->efile.text_shndx)
  313. name = ".text";
  314. if (!name) {
  315. pr_warning("failed to find sym for prog %s\n",
  316. prog->section_name);
  317. return -EINVAL;
  318. }
  319. prog->name = strdup(name);
  320. if (!prog->name) {
  321. pr_warning("failed to allocate memory for prog sym %s\n",
  322. name);
  323. return -ENOMEM;
  324. }
  325. }
  326. return 0;
  327. }
  328. static struct bpf_object *bpf_object__new(const char *path,
  329. void *obj_buf,
  330. size_t obj_buf_sz)
  331. {
  332. struct bpf_object *obj;
  333. obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
  334. if (!obj) {
  335. pr_warning("alloc memory failed for %s\n", path);
  336. return ERR_PTR(-ENOMEM);
  337. }
  338. strcpy(obj->path, path);
  339. obj->efile.fd = -1;
  340. /*
  341. * Caller of this function should also calls
  342. * bpf_object__elf_finish() after data collection to return
  343. * obj_buf to user. If not, we should duplicate the buffer to
  344. * avoid user freeing them before elf finish.
  345. */
  346. obj->efile.obj_buf = obj_buf;
  347. obj->efile.obj_buf_sz = obj_buf_sz;
  348. obj->efile.maps_shndx = -1;
  349. obj->loaded = false;
  350. INIT_LIST_HEAD(&obj->list);
  351. list_add(&obj->list, &bpf_objects_list);
  352. return obj;
  353. }
  354. static void bpf_object__elf_finish(struct bpf_object *obj)
  355. {
  356. if (!obj_elf_valid(obj))
  357. return;
  358. if (obj->efile.elf) {
  359. elf_end(obj->efile.elf);
  360. obj->efile.elf = NULL;
  361. }
  362. obj->efile.symbols = NULL;
  363. zfree(&obj->efile.reloc);
  364. obj->efile.nr_reloc = 0;
  365. zclose(obj->efile.fd);
  366. obj->efile.obj_buf = NULL;
  367. obj->efile.obj_buf_sz = 0;
  368. }
  369. static int bpf_object__elf_init(struct bpf_object *obj)
  370. {
  371. int err = 0;
  372. GElf_Ehdr *ep;
  373. if (obj_elf_valid(obj)) {
  374. pr_warning("elf init: internal error\n");
  375. return -LIBBPF_ERRNO__LIBELF;
  376. }
  377. if (obj->efile.obj_buf_sz > 0) {
  378. /*
  379. * obj_buf should have been validated by
  380. * bpf_object__open_buffer().
  381. */
  382. obj->efile.elf = elf_memory(obj->efile.obj_buf,
  383. obj->efile.obj_buf_sz);
  384. } else {
  385. obj->efile.fd = open(obj->path, O_RDONLY);
  386. if (obj->efile.fd < 0) {
  387. char errmsg[STRERR_BUFSIZE];
  388. char *cp = libbpf_strerror_r(errno, errmsg,
  389. sizeof(errmsg));
  390. pr_warning("failed to open %s: %s\n", obj->path, cp);
  391. return -errno;
  392. }
  393. obj->efile.elf = elf_begin(obj->efile.fd,
  394. LIBBPF_ELF_C_READ_MMAP,
  395. NULL);
  396. }
  397. if (!obj->efile.elf) {
  398. pr_warning("failed to open %s as ELF file\n",
  399. obj->path);
  400. err = -LIBBPF_ERRNO__LIBELF;
  401. goto errout;
  402. }
  403. if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
  404. pr_warning("failed to get EHDR from %s\n",
  405. obj->path);
  406. err = -LIBBPF_ERRNO__FORMAT;
  407. goto errout;
  408. }
  409. ep = &obj->efile.ehdr;
  410. /* Old LLVM set e_machine to EM_NONE */
  411. if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
  412. pr_warning("%s is not an eBPF object file\n",
  413. obj->path);
  414. err = -LIBBPF_ERRNO__FORMAT;
  415. goto errout;
  416. }
  417. return 0;
  418. errout:
  419. bpf_object__elf_finish(obj);
  420. return err;
  421. }
  422. static int
  423. bpf_object__check_endianness(struct bpf_object *obj)
  424. {
  425. static unsigned int const endian = 1;
  426. switch (obj->efile.ehdr.e_ident[EI_DATA]) {
  427. case ELFDATA2LSB:
  428. /* We are big endian, BPF obj is little endian. */
  429. if (*(unsigned char const *)&endian != 1)
  430. goto mismatch;
  431. break;
  432. case ELFDATA2MSB:
  433. /* We are little endian, BPF obj is big endian. */
  434. if (*(unsigned char const *)&endian != 0)
  435. goto mismatch;
  436. break;
  437. default:
  438. return -LIBBPF_ERRNO__ENDIAN;
  439. }
  440. return 0;
  441. mismatch:
  442. pr_warning("Error: endianness mismatch.\n");
  443. return -LIBBPF_ERRNO__ENDIAN;
  444. }
  445. static int
  446. bpf_object__init_license(struct bpf_object *obj,
  447. void *data, size_t size)
  448. {
  449. memcpy(obj->license, data,
  450. min(size, sizeof(obj->license) - 1));
  451. pr_debug("license of %s is %s\n", obj->path, obj->license);
  452. return 0;
  453. }
  454. static int
  455. bpf_object__init_kversion(struct bpf_object *obj,
  456. void *data, size_t size)
  457. {
  458. __u32 kver;
  459. if (size != sizeof(kver)) {
  460. pr_warning("invalid kver section in %s\n", obj->path);
  461. return -LIBBPF_ERRNO__FORMAT;
  462. }
  463. memcpy(&kver, data, sizeof(kver));
  464. obj->kern_version = kver;
  465. pr_debug("kernel version of %s is %x\n", obj->path,
  466. obj->kern_version);
  467. return 0;
  468. }
  469. static int compare_bpf_map(const void *_a, const void *_b)
  470. {
  471. const struct bpf_map *a = _a;
  472. const struct bpf_map *b = _b;
  473. return a->offset - b->offset;
  474. }
  475. static int
  476. bpf_object__init_maps(struct bpf_object *obj, int flags)
  477. {
  478. bool strict = !(flags & MAPS_RELAX_COMPAT);
  479. int i, map_idx, map_def_sz, nr_maps = 0;
  480. Elf_Scn *scn;
  481. Elf_Data *data;
  482. Elf_Data *symbols = obj->efile.symbols;
  483. if (obj->efile.maps_shndx < 0)
  484. return -EINVAL;
  485. if (!symbols)
  486. return -EINVAL;
  487. scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
  488. if (scn)
  489. data = elf_getdata(scn, NULL);
  490. if (!scn || !data) {
  491. pr_warning("failed to get Elf_Data from map section %d\n",
  492. obj->efile.maps_shndx);
  493. return -EINVAL;
  494. }
  495. /*
  496. * Count number of maps. Each map has a name.
  497. * Array of maps is not supported: only the first element is
  498. * considered.
  499. *
  500. * TODO: Detect array of map and report error.
  501. */
  502. for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
  503. GElf_Sym sym;
  504. if (!gelf_getsym(symbols, i, &sym))
  505. continue;
  506. if (sym.st_shndx != obj->efile.maps_shndx)
  507. continue;
  508. nr_maps++;
  509. }
  510. /* Alloc obj->maps and fill nr_maps. */
  511. pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
  512. nr_maps, data->d_size);
  513. if (!nr_maps)
  514. return 0;
  515. /* Assume equally sized map definitions */
  516. map_def_sz = data->d_size / nr_maps;
  517. if (!data->d_size || (data->d_size % nr_maps) != 0) {
  518. pr_warning("unable to determine map definition size "
  519. "section %s, %d maps in %zd bytes\n",
  520. obj->path, nr_maps, data->d_size);
  521. return -EINVAL;
  522. }
  523. obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
  524. if (!obj->maps) {
  525. pr_warning("alloc maps for object failed\n");
  526. return -ENOMEM;
  527. }
  528. obj->nr_maps = nr_maps;
  529. /*
  530. * fill all fd with -1 so won't close incorrect
  531. * fd (fd=0 is stdin) when failure (zclose won't close
  532. * negative fd)).
  533. */
  534. for (i = 0; i < nr_maps; i++)
  535. obj->maps[i].fd = -1;
  536. /*
  537. * Fill obj->maps using data in "maps" section.
  538. */
  539. for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
  540. GElf_Sym sym;
  541. const char *map_name;
  542. struct bpf_map_def *def;
  543. if (!gelf_getsym(symbols, i, &sym))
  544. continue;
  545. if (sym.st_shndx != obj->efile.maps_shndx)
  546. continue;
  547. map_name = elf_strptr(obj->efile.elf,
  548. obj->efile.strtabidx,
  549. sym.st_name);
  550. obj->maps[map_idx].offset = sym.st_value;
  551. if (sym.st_value + map_def_sz > data->d_size) {
  552. pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
  553. obj->path, map_name);
  554. return -EINVAL;
  555. }
  556. obj->maps[map_idx].name = strdup(map_name);
  557. if (!obj->maps[map_idx].name) {
  558. pr_warning("failed to alloc map name\n");
  559. return -ENOMEM;
  560. }
  561. pr_debug("map %d is \"%s\"\n", map_idx,
  562. obj->maps[map_idx].name);
  563. def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
  564. /*
  565. * If the definition of the map in the object file fits in
  566. * bpf_map_def, copy it. Any extra fields in our version
  567. * of bpf_map_def will default to zero as a result of the
  568. * calloc above.
  569. */
  570. if (map_def_sz <= sizeof(struct bpf_map_def)) {
  571. memcpy(&obj->maps[map_idx].def, def, map_def_sz);
  572. } else {
  573. /*
  574. * Here the map structure being read is bigger than what
  575. * we expect, truncate if the excess bits are all zero.
  576. * If they are not zero, reject this map as
  577. * incompatible.
  578. */
  579. char *b;
  580. for (b = ((char *)def) + sizeof(struct bpf_map_def);
  581. b < ((char *)def) + map_def_sz; b++) {
  582. if (*b != 0) {
  583. pr_warning("maps section in %s: \"%s\" "
  584. "has unrecognized, non-zero "
  585. "options\n",
  586. obj->path, map_name);
  587. if (strict)
  588. return -EINVAL;
  589. }
  590. }
  591. memcpy(&obj->maps[map_idx].def, def,
  592. sizeof(struct bpf_map_def));
  593. }
  594. map_idx++;
  595. }
  596. qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
  597. return 0;
  598. }
  599. static bool section_have_execinstr(struct bpf_object *obj, int idx)
  600. {
  601. Elf_Scn *scn;
  602. GElf_Shdr sh;
  603. scn = elf_getscn(obj->efile.elf, idx);
  604. if (!scn)
  605. return false;
  606. if (gelf_getshdr(scn, &sh) != &sh)
  607. return false;
  608. if (sh.sh_flags & SHF_EXECINSTR)
  609. return true;
  610. return false;
  611. }
  612. static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
  613. {
  614. Elf *elf = obj->efile.elf;
  615. GElf_Ehdr *ep = &obj->efile.ehdr;
  616. Elf_Scn *scn = NULL;
  617. int idx = 0, err = 0;
  618. /* Elf is corrupted/truncated, avoid calling elf_strptr. */
  619. if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
  620. pr_warning("failed to get e_shstrndx from %s\n",
  621. obj->path);
  622. return -LIBBPF_ERRNO__FORMAT;
  623. }
  624. while ((scn = elf_nextscn(elf, scn)) != NULL) {
  625. char *name;
  626. GElf_Shdr sh;
  627. Elf_Data *data;
  628. idx++;
  629. if (gelf_getshdr(scn, &sh) != &sh) {
  630. pr_warning("failed to get section(%d) header from %s\n",
  631. idx, obj->path);
  632. err = -LIBBPF_ERRNO__FORMAT;
  633. goto out;
  634. }
  635. name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
  636. if (!name) {
  637. pr_warning("failed to get section(%d) name from %s\n",
  638. idx, obj->path);
  639. err = -LIBBPF_ERRNO__FORMAT;
  640. goto out;
  641. }
  642. data = elf_getdata(scn, 0);
  643. if (!data) {
  644. pr_warning("failed to get section(%d) data from %s(%s)\n",
  645. idx, name, obj->path);
  646. err = -LIBBPF_ERRNO__FORMAT;
  647. goto out;
  648. }
  649. pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
  650. idx, name, (unsigned long)data->d_size,
  651. (int)sh.sh_link, (unsigned long)sh.sh_flags,
  652. (int)sh.sh_type);
  653. if (strcmp(name, "license") == 0)
  654. err = bpf_object__init_license(obj,
  655. data->d_buf,
  656. data->d_size);
  657. else if (strcmp(name, "version") == 0)
  658. err = bpf_object__init_kversion(obj,
  659. data->d_buf,
  660. data->d_size);
  661. else if (strcmp(name, "maps") == 0)
  662. obj->efile.maps_shndx = idx;
  663. else if (strcmp(name, BTF_ELF_SEC) == 0) {
  664. obj->btf = btf__new(data->d_buf, data->d_size,
  665. __pr_debug);
  666. if (IS_ERR(obj->btf)) {
  667. pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
  668. BTF_ELF_SEC, PTR_ERR(obj->btf));
  669. obj->btf = NULL;
  670. }
  671. } else if (sh.sh_type == SHT_SYMTAB) {
  672. if (obj->efile.symbols) {
  673. pr_warning("bpf: multiple SYMTAB in %s\n",
  674. obj->path);
  675. err = -LIBBPF_ERRNO__FORMAT;
  676. } else {
  677. obj->efile.symbols = data;
  678. obj->efile.strtabidx = sh.sh_link;
  679. }
  680. } else if ((sh.sh_type == SHT_PROGBITS) &&
  681. (sh.sh_flags & SHF_EXECINSTR) &&
  682. (data->d_size > 0)) {
  683. if (strcmp(name, ".text") == 0)
  684. obj->efile.text_shndx = idx;
  685. err = bpf_object__add_program(obj, data->d_buf,
  686. data->d_size, name, idx);
  687. if (err) {
  688. char errmsg[STRERR_BUFSIZE];
  689. char *cp = libbpf_strerror_r(-err, errmsg,
  690. sizeof(errmsg));
  691. pr_warning("failed to alloc program %s (%s): %s",
  692. name, obj->path, cp);
  693. }
  694. } else if (sh.sh_type == SHT_REL) {
  695. void *reloc = obj->efile.reloc;
  696. int nr_reloc = obj->efile.nr_reloc + 1;
  697. int sec = sh.sh_info; /* points to other section */
  698. /* Only do relo for section with exec instructions */
  699. if (!section_have_execinstr(obj, sec)) {
  700. pr_debug("skip relo %s(%d) for section(%d)\n",
  701. name, idx, sec);
  702. continue;
  703. }
  704. reloc = reallocarray(reloc, nr_reloc,
  705. sizeof(*obj->efile.reloc));
  706. if (!reloc) {
  707. pr_warning("realloc failed\n");
  708. err = -ENOMEM;
  709. } else {
  710. int n = nr_reloc - 1;
  711. obj->efile.reloc = reloc;
  712. obj->efile.nr_reloc = nr_reloc;
  713. obj->efile.reloc[n].shdr = sh;
  714. obj->efile.reloc[n].data = data;
  715. }
  716. } else {
  717. pr_debug("skip section(%d) %s\n", idx, name);
  718. }
  719. if (err)
  720. goto out;
  721. }
  722. if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
  723. pr_warning("Corrupted ELF file: index of strtab invalid\n");
  724. return LIBBPF_ERRNO__FORMAT;
  725. }
  726. if (obj->efile.maps_shndx >= 0) {
  727. err = bpf_object__init_maps(obj, flags);
  728. if (err)
  729. goto out;
  730. }
  731. err = bpf_object__init_prog_names(obj);
  732. out:
  733. return err;
  734. }
  735. static struct bpf_program *
  736. bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
  737. {
  738. struct bpf_program *prog;
  739. size_t i;
  740. for (i = 0; i < obj->nr_programs; i++) {
  741. prog = &obj->programs[i];
  742. if (prog->idx == idx)
  743. return prog;
  744. }
  745. return NULL;
  746. }
  747. struct bpf_program *
  748. bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
  749. {
  750. struct bpf_program *pos;
  751. bpf_object__for_each_program(pos, obj) {
  752. if (pos->section_name && !strcmp(pos->section_name, title))
  753. return pos;
  754. }
  755. return NULL;
  756. }
  757. static int
  758. bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
  759. Elf_Data *data, struct bpf_object *obj)
  760. {
  761. Elf_Data *symbols = obj->efile.symbols;
  762. int text_shndx = obj->efile.text_shndx;
  763. int maps_shndx = obj->efile.maps_shndx;
  764. struct bpf_map *maps = obj->maps;
  765. size_t nr_maps = obj->nr_maps;
  766. int i, nrels;
  767. pr_debug("collecting relocating info for: '%s'\n",
  768. prog->section_name);
  769. nrels = shdr->sh_size / shdr->sh_entsize;
  770. prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
  771. if (!prog->reloc_desc) {
  772. pr_warning("failed to alloc memory in relocation\n");
  773. return -ENOMEM;
  774. }
  775. prog->nr_reloc = nrels;
  776. for (i = 0; i < nrels; i++) {
  777. GElf_Sym sym;
  778. GElf_Rel rel;
  779. unsigned int insn_idx;
  780. struct bpf_insn *insns = prog->insns;
  781. size_t map_idx;
  782. if (!gelf_getrel(data, i, &rel)) {
  783. pr_warning("relocation: failed to get %d reloc\n", i);
  784. return -LIBBPF_ERRNO__FORMAT;
  785. }
  786. if (!gelf_getsym(symbols,
  787. GELF_R_SYM(rel.r_info),
  788. &sym)) {
  789. pr_warning("relocation: symbol %"PRIx64" not found\n",
  790. GELF_R_SYM(rel.r_info));
  791. return -LIBBPF_ERRNO__FORMAT;
  792. }
  793. pr_debug("relo for %lld value %lld name %d\n",
  794. (long long) (rel.r_info >> 32),
  795. (long long) sym.st_value, sym.st_name);
  796. if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
  797. pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
  798. prog->section_name, sym.st_shndx);
  799. return -LIBBPF_ERRNO__RELOC;
  800. }
  801. insn_idx = rel.r_offset / sizeof(struct bpf_insn);
  802. pr_debug("relocation: insn_idx=%u\n", insn_idx);
  803. if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
  804. if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
  805. pr_warning("incorrect bpf_call opcode\n");
  806. return -LIBBPF_ERRNO__RELOC;
  807. }
  808. prog->reloc_desc[i].type = RELO_CALL;
  809. prog->reloc_desc[i].insn_idx = insn_idx;
  810. prog->reloc_desc[i].text_off = sym.st_value;
  811. obj->has_pseudo_calls = true;
  812. continue;
  813. }
  814. if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
  815. pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
  816. insn_idx, insns[insn_idx].code);
  817. return -LIBBPF_ERRNO__RELOC;
  818. }
  819. /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
  820. for (map_idx = 0; map_idx < nr_maps; map_idx++) {
  821. if (maps[map_idx].offset == sym.st_value) {
  822. pr_debug("relocation: find map %zd (%s) for insn %u\n",
  823. map_idx, maps[map_idx].name, insn_idx);
  824. break;
  825. }
  826. }
  827. if (map_idx >= nr_maps) {
  828. pr_warning("bpf relocation: map_idx %d large than %d\n",
  829. (int)map_idx, (int)nr_maps - 1);
  830. return -LIBBPF_ERRNO__RELOC;
  831. }
  832. prog->reloc_desc[i].type = RELO_LD64;
  833. prog->reloc_desc[i].insn_idx = insn_idx;
  834. prog->reloc_desc[i].map_idx = map_idx;
  835. }
  836. return 0;
  837. }
  838. static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
  839. {
  840. const struct btf_type *container_type;
  841. const struct btf_member *key, *value;
  842. struct bpf_map_def *def = &map->def;
  843. const size_t max_name = 256;
  844. char container_name[max_name];
  845. __s64 key_size, value_size;
  846. __s32 container_id;
  847. if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
  848. max_name) {
  849. pr_warning("map:%s length of '____btf_map_%s' is too long\n",
  850. map->name, map->name);
  851. return -EINVAL;
  852. }
  853. container_id = btf__find_by_name(btf, container_name);
  854. if (container_id < 0) {
  855. pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
  856. map->name, container_name);
  857. return container_id;
  858. }
  859. container_type = btf__type_by_id(btf, container_id);
  860. if (!container_type) {
  861. pr_warning("map:%s cannot find BTF type for container_id:%u\n",
  862. map->name, container_id);
  863. return -EINVAL;
  864. }
  865. if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
  866. BTF_INFO_VLEN(container_type->info) < 2) {
  867. pr_warning("map:%s container_name:%s is an invalid container struct\n",
  868. map->name, container_name);
  869. return -EINVAL;
  870. }
  871. key = (struct btf_member *)(container_type + 1);
  872. value = key + 1;
  873. key_size = btf__resolve_size(btf, key->type);
  874. if (key_size < 0) {
  875. pr_warning("map:%s invalid BTF key_type_size\n",
  876. map->name);
  877. return key_size;
  878. }
  879. if (def->key_size != key_size) {
  880. pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
  881. map->name, (__u32)key_size, def->key_size);
  882. return -EINVAL;
  883. }
  884. value_size = btf__resolve_size(btf, value->type);
  885. if (value_size < 0) {
  886. pr_warning("map:%s invalid BTF value_type_size\n", map->name);
  887. return value_size;
  888. }
  889. if (def->value_size != value_size) {
  890. pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
  891. map->name, (__u32)value_size, def->value_size);
  892. return -EINVAL;
  893. }
  894. map->btf_key_type_id = key->type;
  895. map->btf_value_type_id = value->type;
  896. return 0;
  897. }
  898. int bpf_map__reuse_fd(struct bpf_map *map, int fd)
  899. {
  900. struct bpf_map_info info = {};
  901. __u32 len = sizeof(info);
  902. int new_fd, err;
  903. char *new_name;
  904. err = bpf_obj_get_info_by_fd(fd, &info, &len);
  905. if (err)
  906. return err;
  907. new_name = strdup(info.name);
  908. if (!new_name)
  909. return -errno;
  910. new_fd = open("/", O_RDONLY | O_CLOEXEC);
  911. if (new_fd < 0)
  912. goto err_free_new_name;
  913. new_fd = dup3(fd, new_fd, O_CLOEXEC);
  914. if (new_fd < 0)
  915. goto err_close_new_fd;
  916. err = zclose(map->fd);
  917. if (err)
  918. goto err_close_new_fd;
  919. free(map->name);
  920. map->fd = new_fd;
  921. map->name = new_name;
  922. map->def.type = info.type;
  923. map->def.key_size = info.key_size;
  924. map->def.value_size = info.value_size;
  925. map->def.max_entries = info.max_entries;
  926. map->def.map_flags = info.map_flags;
  927. map->btf_key_type_id = info.btf_key_type_id;
  928. map->btf_value_type_id = info.btf_value_type_id;
  929. return 0;
  930. err_close_new_fd:
  931. close(new_fd);
  932. err_free_new_name:
  933. free(new_name);
  934. return -errno;
  935. }
  936. static int
  937. bpf_object__create_maps(struct bpf_object *obj)
  938. {
  939. struct bpf_create_map_attr create_attr = {};
  940. unsigned int i;
  941. int err;
  942. for (i = 0; i < obj->nr_maps; i++) {
  943. struct bpf_map *map = &obj->maps[i];
  944. struct bpf_map_def *def = &map->def;
  945. char *cp, errmsg[STRERR_BUFSIZE];
  946. int *pfd = &map->fd;
  947. if (map->fd >= 0) {
  948. pr_debug("skip map create (preset) %s: fd=%d\n",
  949. map->name, map->fd);
  950. continue;
  951. }
  952. create_attr.name = map->name;
  953. create_attr.map_ifindex = map->map_ifindex;
  954. create_attr.map_type = def->type;
  955. create_attr.map_flags = def->map_flags;
  956. create_attr.key_size = def->key_size;
  957. create_attr.value_size = def->value_size;
  958. create_attr.max_entries = def->max_entries;
  959. create_attr.btf_fd = 0;
  960. create_attr.btf_key_type_id = 0;
  961. create_attr.btf_value_type_id = 0;
  962. if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
  963. create_attr.btf_fd = btf__fd(obj->btf);
  964. create_attr.btf_key_type_id = map->btf_key_type_id;
  965. create_attr.btf_value_type_id = map->btf_value_type_id;
  966. }
  967. *pfd = bpf_create_map_xattr(&create_attr);
  968. if (*pfd < 0 && create_attr.btf_key_type_id) {
  969. cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
  970. pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
  971. map->name, cp, errno);
  972. create_attr.btf_fd = 0;
  973. create_attr.btf_key_type_id = 0;
  974. create_attr.btf_value_type_id = 0;
  975. map->btf_key_type_id = 0;
  976. map->btf_value_type_id = 0;
  977. *pfd = bpf_create_map_xattr(&create_attr);
  978. }
  979. if (*pfd < 0) {
  980. size_t j;
  981. err = *pfd;
  982. cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
  983. pr_warning("failed to create map (name: '%s'): %s\n",
  984. map->name, cp);
  985. for (j = 0; j < i; j++)
  986. zclose(obj->maps[j].fd);
  987. return err;
  988. }
  989. pr_debug("create map %s: fd=%d\n", map->name, *pfd);
  990. }
  991. return 0;
  992. }
  993. static int
  994. bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
  995. struct reloc_desc *relo)
  996. {
  997. struct bpf_insn *insn, *new_insn;
  998. struct bpf_program *text;
  999. size_t new_cnt;
  1000. if (relo->type != RELO_CALL)
  1001. return -LIBBPF_ERRNO__RELOC;
  1002. if (prog->idx == obj->efile.text_shndx) {
  1003. pr_warning("relo in .text insn %d into off %d\n",
  1004. relo->insn_idx, relo->text_off);
  1005. return -LIBBPF_ERRNO__RELOC;
  1006. }
  1007. if (prog->main_prog_cnt == 0) {
  1008. text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
  1009. if (!text) {
  1010. pr_warning("no .text section found yet relo into text exist\n");
  1011. return -LIBBPF_ERRNO__RELOC;
  1012. }
  1013. new_cnt = prog->insns_cnt + text->insns_cnt;
  1014. new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
  1015. if (!new_insn) {
  1016. pr_warning("oom in prog realloc\n");
  1017. return -ENOMEM;
  1018. }
  1019. memcpy(new_insn + prog->insns_cnt, text->insns,
  1020. text->insns_cnt * sizeof(*insn));
  1021. prog->insns = new_insn;
  1022. prog->main_prog_cnt = prog->insns_cnt;
  1023. prog->insns_cnt = new_cnt;
  1024. pr_debug("added %zd insn from %s to prog %s\n",
  1025. text->insns_cnt, text->section_name,
  1026. prog->section_name);
  1027. }
  1028. insn = &prog->insns[relo->insn_idx];
  1029. insn->imm += prog->main_prog_cnt - relo->insn_idx;
  1030. return 0;
  1031. }
  1032. static int
  1033. bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
  1034. {
  1035. int i, err;
  1036. if (!prog || !prog->reloc_desc)
  1037. return 0;
  1038. for (i = 0; i < prog->nr_reloc; i++) {
  1039. if (prog->reloc_desc[i].type == RELO_LD64) {
  1040. struct bpf_insn *insns = prog->insns;
  1041. int insn_idx, map_idx;
  1042. insn_idx = prog->reloc_desc[i].insn_idx;
  1043. map_idx = prog->reloc_desc[i].map_idx;
  1044. if (insn_idx >= (int)prog->insns_cnt) {
  1045. pr_warning("relocation out of range: '%s'\n",
  1046. prog->section_name);
  1047. return -LIBBPF_ERRNO__RELOC;
  1048. }
  1049. insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
  1050. insns[insn_idx].imm = obj->maps[map_idx].fd;
  1051. } else {
  1052. err = bpf_program__reloc_text(prog, obj,
  1053. &prog->reloc_desc[i]);
  1054. if (err)
  1055. return err;
  1056. }
  1057. }
  1058. zfree(&prog->reloc_desc);
  1059. prog->nr_reloc = 0;
  1060. return 0;
  1061. }
  1062. static int
  1063. bpf_object__relocate(struct bpf_object *obj)
  1064. {
  1065. struct bpf_program *prog;
  1066. size_t i;
  1067. int err;
  1068. for (i = 0; i < obj->nr_programs; i++) {
  1069. prog = &obj->programs[i];
  1070. err = bpf_program__relocate(prog, obj);
  1071. if (err) {
  1072. pr_warning("failed to relocate '%s'\n",
  1073. prog->section_name);
  1074. return err;
  1075. }
  1076. }
  1077. return 0;
  1078. }
  1079. static int bpf_object__collect_reloc(struct bpf_object *obj)
  1080. {
  1081. int i, err;
  1082. if (!obj_elf_valid(obj)) {
  1083. pr_warning("Internal error: elf object is closed\n");
  1084. return -LIBBPF_ERRNO__INTERNAL;
  1085. }
  1086. for (i = 0; i < obj->efile.nr_reloc; i++) {
  1087. GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
  1088. Elf_Data *data = obj->efile.reloc[i].data;
  1089. int idx = shdr->sh_info;
  1090. struct bpf_program *prog;
  1091. if (shdr->sh_type != SHT_REL) {
  1092. pr_warning("internal error at %d\n", __LINE__);
  1093. return -LIBBPF_ERRNO__INTERNAL;
  1094. }
  1095. prog = bpf_object__find_prog_by_idx(obj, idx);
  1096. if (!prog) {
  1097. pr_warning("relocation failed: no section(%d)\n", idx);
  1098. return -LIBBPF_ERRNO__RELOC;
  1099. }
  1100. err = bpf_program__collect_reloc(prog,
  1101. shdr, data,
  1102. obj);
  1103. if (err)
  1104. return err;
  1105. }
  1106. return 0;
  1107. }
  1108. static int
  1109. load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
  1110. const char *name, struct bpf_insn *insns, int insns_cnt,
  1111. char *license, __u32 kern_version, int *pfd, int prog_ifindex)
  1112. {
  1113. struct bpf_load_program_attr load_attr;
  1114. char *cp, errmsg[STRERR_BUFSIZE];
  1115. char *log_buf;
  1116. int ret;
  1117. memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
  1118. load_attr.prog_type = type;
  1119. load_attr.expected_attach_type = expected_attach_type;
  1120. load_attr.name = name;
  1121. load_attr.insns = insns;
  1122. load_attr.insns_cnt = insns_cnt;
  1123. load_attr.license = license;
  1124. load_attr.kern_version = kern_version;
  1125. load_attr.prog_ifindex = prog_ifindex;
  1126. if (!load_attr.insns || !load_attr.insns_cnt)
  1127. return -EINVAL;
  1128. log_buf = malloc(BPF_LOG_BUF_SIZE);
  1129. if (!log_buf)
  1130. pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
  1131. ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
  1132. if (ret >= 0) {
  1133. *pfd = ret;
  1134. ret = 0;
  1135. goto out;
  1136. }
  1137. ret = -LIBBPF_ERRNO__LOAD;
  1138. cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
  1139. pr_warning("load bpf program failed: %s\n", cp);
  1140. if (log_buf && log_buf[0] != '\0') {
  1141. ret = -LIBBPF_ERRNO__VERIFY;
  1142. pr_warning("-- BEGIN DUMP LOG ---\n");
  1143. pr_warning("\n%s\n", log_buf);
  1144. pr_warning("-- END LOG --\n");
  1145. } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
  1146. pr_warning("Program too large (%zu insns), at most %d insns\n",
  1147. load_attr.insns_cnt, BPF_MAXINSNS);
  1148. ret = -LIBBPF_ERRNO__PROG2BIG;
  1149. } else {
  1150. /* Wrong program type? */
  1151. if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
  1152. int fd;
  1153. load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
  1154. load_attr.expected_attach_type = 0;
  1155. fd = bpf_load_program_xattr(&load_attr, NULL, 0);
  1156. if (fd >= 0) {
  1157. close(fd);
  1158. ret = -LIBBPF_ERRNO__PROGTYPE;
  1159. goto out;
  1160. }
  1161. }
  1162. if (log_buf)
  1163. ret = -LIBBPF_ERRNO__KVER;
  1164. }
  1165. out:
  1166. free(log_buf);
  1167. return ret;
  1168. }
  1169. int
  1170. bpf_program__load(struct bpf_program *prog,
  1171. char *license, __u32 kern_version)
  1172. {
  1173. int err = 0, fd, i;
  1174. if (prog->instances.nr < 0 || !prog->instances.fds) {
  1175. if (prog->preprocessor) {
  1176. pr_warning("Internal error: can't load program '%s'\n",
  1177. prog->section_name);
  1178. return -LIBBPF_ERRNO__INTERNAL;
  1179. }
  1180. prog->instances.fds = malloc(sizeof(int));
  1181. if (!prog->instances.fds) {
  1182. pr_warning("Not enough memory for BPF fds\n");
  1183. return -ENOMEM;
  1184. }
  1185. prog->instances.nr = 1;
  1186. prog->instances.fds[0] = -1;
  1187. }
  1188. if (!prog->preprocessor) {
  1189. if (prog->instances.nr != 1) {
  1190. pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
  1191. prog->section_name, prog->instances.nr);
  1192. }
  1193. err = load_program(prog->type, prog->expected_attach_type,
  1194. prog->name, prog->insns, prog->insns_cnt,
  1195. license, kern_version, &fd,
  1196. prog->prog_ifindex);
  1197. if (!err)
  1198. prog->instances.fds[0] = fd;
  1199. goto out;
  1200. }
  1201. for (i = 0; i < prog->instances.nr; i++) {
  1202. struct bpf_prog_prep_result result;
  1203. bpf_program_prep_t preprocessor = prog->preprocessor;
  1204. bzero(&result, sizeof(result));
  1205. err = preprocessor(prog, i, prog->insns,
  1206. prog->insns_cnt, &result);
  1207. if (err) {
  1208. pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
  1209. i, prog->section_name);
  1210. goto out;
  1211. }
  1212. if (!result.new_insn_ptr || !result.new_insn_cnt) {
  1213. pr_debug("Skip loading the %dth instance of program '%s'\n",
  1214. i, prog->section_name);
  1215. prog->instances.fds[i] = -1;
  1216. if (result.pfd)
  1217. *result.pfd = -1;
  1218. continue;
  1219. }
  1220. err = load_program(prog->type, prog->expected_attach_type,
  1221. prog->name, result.new_insn_ptr,
  1222. result.new_insn_cnt,
  1223. license, kern_version, &fd,
  1224. prog->prog_ifindex);
  1225. if (err) {
  1226. pr_warning("Loading the %dth instance of program '%s' failed\n",
  1227. i, prog->section_name);
  1228. goto out;
  1229. }
  1230. if (result.pfd)
  1231. *result.pfd = fd;
  1232. prog->instances.fds[i] = fd;
  1233. }
  1234. out:
  1235. if (err)
  1236. pr_warning("failed to load program '%s'\n",
  1237. prog->section_name);
  1238. zfree(&prog->insns);
  1239. prog->insns_cnt = 0;
  1240. return err;
  1241. }
  1242. static bool bpf_program__is_function_storage(struct bpf_program *prog,
  1243. struct bpf_object *obj)
  1244. {
  1245. return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
  1246. }
  1247. static int
  1248. bpf_object__load_progs(struct bpf_object *obj)
  1249. {
  1250. size_t i;
  1251. int err;
  1252. for (i = 0; i < obj->nr_programs; i++) {
  1253. if (bpf_program__is_function_storage(&obj->programs[i], obj))
  1254. continue;
  1255. err = bpf_program__load(&obj->programs[i],
  1256. obj->license,
  1257. obj->kern_version);
  1258. if (err)
  1259. return err;
  1260. }
  1261. return 0;
  1262. }
  1263. static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
  1264. {
  1265. switch (type) {
  1266. case BPF_PROG_TYPE_SOCKET_FILTER:
  1267. case BPF_PROG_TYPE_SCHED_CLS:
  1268. case BPF_PROG_TYPE_SCHED_ACT:
  1269. case BPF_PROG_TYPE_XDP:
  1270. case BPF_PROG_TYPE_CGROUP_SKB:
  1271. case BPF_PROG_TYPE_CGROUP_SOCK:
  1272. case BPF_PROG_TYPE_LWT_IN:
  1273. case BPF_PROG_TYPE_LWT_OUT:
  1274. case BPF_PROG_TYPE_LWT_XMIT:
  1275. case BPF_PROG_TYPE_LWT_SEG6LOCAL:
  1276. case BPF_PROG_TYPE_SOCK_OPS:
  1277. case BPF_PROG_TYPE_SK_SKB:
  1278. case BPF_PROG_TYPE_CGROUP_DEVICE:
  1279. case BPF_PROG_TYPE_SK_MSG:
  1280. case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
  1281. case BPF_PROG_TYPE_LIRC_MODE2:
  1282. case BPF_PROG_TYPE_SK_REUSEPORT:
  1283. case BPF_PROG_TYPE_FLOW_DISSECTOR:
  1284. return false;
  1285. case BPF_PROG_TYPE_UNSPEC:
  1286. case BPF_PROG_TYPE_KPROBE:
  1287. case BPF_PROG_TYPE_TRACEPOINT:
  1288. case BPF_PROG_TYPE_PERF_EVENT:
  1289. case BPF_PROG_TYPE_RAW_TRACEPOINT:
  1290. default:
  1291. return true;
  1292. }
  1293. }
  1294. static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
  1295. {
  1296. if (needs_kver && obj->kern_version == 0) {
  1297. pr_warning("%s doesn't provide kernel version\n",
  1298. obj->path);
  1299. return -LIBBPF_ERRNO__KVERSION;
  1300. }
  1301. return 0;
  1302. }
  1303. static struct bpf_object *
  1304. __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
  1305. bool needs_kver, int flags)
  1306. {
  1307. struct bpf_object *obj;
  1308. int err;
  1309. if (elf_version(EV_CURRENT) == EV_NONE) {
  1310. pr_warning("failed to init libelf for %s\n", path);
  1311. return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
  1312. }
  1313. obj = bpf_object__new(path, obj_buf, obj_buf_sz);
  1314. if (IS_ERR(obj))
  1315. return obj;
  1316. CHECK_ERR(bpf_object__elf_init(obj), err, out);
  1317. CHECK_ERR(bpf_object__check_endianness(obj), err, out);
  1318. CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
  1319. CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
  1320. CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
  1321. bpf_object__elf_finish(obj);
  1322. return obj;
  1323. out:
  1324. bpf_object__close(obj);
  1325. return ERR_PTR(err);
  1326. }
  1327. struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
  1328. int flags)
  1329. {
  1330. /* param validation */
  1331. if (!attr->file)
  1332. return NULL;
  1333. pr_debug("loading %s\n", attr->file);
  1334. return __bpf_object__open(attr->file, NULL, 0,
  1335. bpf_prog_type__needs_kver(attr->prog_type),
  1336. flags);
  1337. }
  1338. struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
  1339. {
  1340. return __bpf_object__open_xattr(attr, 0);
  1341. }
  1342. struct bpf_object *bpf_object__open(const char *path)
  1343. {
  1344. struct bpf_object_open_attr attr = {
  1345. .file = path,
  1346. .prog_type = BPF_PROG_TYPE_UNSPEC,
  1347. };
  1348. return bpf_object__open_xattr(&attr);
  1349. }
  1350. struct bpf_object *bpf_object__open_buffer(void *obj_buf,
  1351. size_t obj_buf_sz,
  1352. const char *name)
  1353. {
  1354. char tmp_name[64];
  1355. /* param validation */
  1356. if (!obj_buf || obj_buf_sz <= 0)
  1357. return NULL;
  1358. if (!name) {
  1359. snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
  1360. (unsigned long)obj_buf,
  1361. (unsigned long)obj_buf_sz);
  1362. tmp_name[sizeof(tmp_name) - 1] = '\0';
  1363. name = tmp_name;
  1364. }
  1365. pr_debug("loading object '%s' from buffer\n",
  1366. name);
  1367. return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
  1368. }
  1369. int bpf_object__unload(struct bpf_object *obj)
  1370. {
  1371. size_t i;
  1372. if (!obj)
  1373. return -EINVAL;
  1374. for (i = 0; i < obj->nr_maps; i++)
  1375. zclose(obj->maps[i].fd);
  1376. for (i = 0; i < obj->nr_programs; i++)
  1377. bpf_program__unload(&obj->programs[i]);
  1378. return 0;
  1379. }
  1380. int bpf_object__load(struct bpf_object *obj)
  1381. {
  1382. int err;
  1383. if (!obj)
  1384. return -EINVAL;
  1385. if (obj->loaded) {
  1386. pr_warning("object should not be loaded twice\n");
  1387. return -EINVAL;
  1388. }
  1389. obj->loaded = true;
  1390. CHECK_ERR(bpf_object__create_maps(obj), err, out);
  1391. CHECK_ERR(bpf_object__relocate(obj), err, out);
  1392. CHECK_ERR(bpf_object__load_progs(obj), err, out);
  1393. return 0;
  1394. out:
  1395. bpf_object__unload(obj);
  1396. pr_warning("failed to load object '%s'\n", obj->path);
  1397. return err;
  1398. }
  1399. static int check_path(const char *path)
  1400. {
  1401. char *cp, errmsg[STRERR_BUFSIZE];
  1402. struct statfs st_fs;
  1403. char *dname, *dir;
  1404. int err = 0;
  1405. if (path == NULL)
  1406. return -EINVAL;
  1407. dname = strdup(path);
  1408. if (dname == NULL)
  1409. return -ENOMEM;
  1410. dir = dirname(dname);
  1411. if (statfs(dir, &st_fs)) {
  1412. cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
  1413. pr_warning("failed to statfs %s: %s\n", dir, cp);
  1414. err = -errno;
  1415. }
  1416. free(dname);
  1417. if (!err && st_fs.f_type != BPF_FS_MAGIC) {
  1418. pr_warning("specified path %s is not on BPF FS\n", path);
  1419. err = -EINVAL;
  1420. }
  1421. return err;
  1422. }
  1423. int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
  1424. int instance)
  1425. {
  1426. char *cp, errmsg[STRERR_BUFSIZE];
  1427. int err;
  1428. err = check_path(path);
  1429. if (err)
  1430. return err;
  1431. if (prog == NULL) {
  1432. pr_warning("invalid program pointer\n");
  1433. return -EINVAL;
  1434. }
  1435. if (instance < 0 || instance >= prog->instances.nr) {
  1436. pr_warning("invalid prog instance %d of prog %s (max %d)\n",
  1437. instance, prog->section_name, prog->instances.nr);
  1438. return -EINVAL;
  1439. }
  1440. if (bpf_obj_pin(prog->instances.fds[instance], path)) {
  1441. cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
  1442. pr_warning("failed to pin program: %s\n", cp);
  1443. return -errno;
  1444. }
  1445. pr_debug("pinned program '%s'\n", path);
  1446. return 0;
  1447. }
  1448. static int make_dir(const char *path)
  1449. {
  1450. char *cp, errmsg[STRERR_BUFSIZE];
  1451. int err = 0;
  1452. if (mkdir(path, 0700) && errno != EEXIST)
  1453. err = -errno;
  1454. if (err) {
  1455. cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
  1456. pr_warning("failed to mkdir %s: %s\n", path, cp);
  1457. }
  1458. return err;
  1459. }
  1460. int bpf_program__pin(struct bpf_program *prog, const char *path)
  1461. {
  1462. int i, err;
  1463. err = check_path(path);
  1464. if (err)
  1465. return err;
  1466. if (prog == NULL) {
  1467. pr_warning("invalid program pointer\n");
  1468. return -EINVAL;
  1469. }
  1470. if (prog->instances.nr <= 0) {
  1471. pr_warning("no instances of prog %s to pin\n",
  1472. prog->section_name);
  1473. return -EINVAL;
  1474. }
  1475. err = make_dir(path);
  1476. if (err)
  1477. return err;
  1478. for (i = 0; i < prog->instances.nr; i++) {
  1479. char buf[PATH_MAX];
  1480. int len;
  1481. len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
  1482. if (len < 0)
  1483. return -EINVAL;
  1484. else if (len >= PATH_MAX)
  1485. return -ENAMETOOLONG;
  1486. err = bpf_program__pin_instance(prog, buf, i);
  1487. if (err)
  1488. return err;
  1489. }
  1490. return 0;
  1491. }
  1492. int bpf_map__pin(struct bpf_map *map, const char *path)
  1493. {
  1494. char *cp, errmsg[STRERR_BUFSIZE];
  1495. int err;
  1496. err = check_path(path);
  1497. if (err)
  1498. return err;
  1499. if (map == NULL) {
  1500. pr_warning("invalid map pointer\n");
  1501. return -EINVAL;
  1502. }
  1503. if (bpf_obj_pin(map->fd, path)) {
  1504. cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
  1505. pr_warning("failed to pin map: %s\n", cp);
  1506. return -errno;
  1507. }
  1508. pr_debug("pinned map '%s'\n", path);
  1509. return 0;
  1510. }
  1511. int bpf_object__pin(struct bpf_object *obj, const char *path)
  1512. {
  1513. struct bpf_program *prog;
  1514. struct bpf_map *map;
  1515. int err;
  1516. if (!obj)
  1517. return -ENOENT;
  1518. if (!obj->loaded) {
  1519. pr_warning("object not yet loaded; load it first\n");
  1520. return -ENOENT;
  1521. }
  1522. err = make_dir(path);
  1523. if (err)
  1524. return err;
  1525. bpf_map__for_each(map, obj) {
  1526. char buf[PATH_MAX];
  1527. int len;
  1528. len = snprintf(buf, PATH_MAX, "%s/%s", path,
  1529. bpf_map__name(map));
  1530. if (len < 0)
  1531. return -EINVAL;
  1532. else if (len >= PATH_MAX)
  1533. return -ENAMETOOLONG;
  1534. err = bpf_map__pin(map, buf);
  1535. if (err)
  1536. return err;
  1537. }
  1538. bpf_object__for_each_program(prog, obj) {
  1539. char buf[PATH_MAX];
  1540. int len;
  1541. len = snprintf(buf, PATH_MAX, "%s/%s", path,
  1542. prog->section_name);
  1543. if (len < 0)
  1544. return -EINVAL;
  1545. else if (len >= PATH_MAX)
  1546. return -ENAMETOOLONG;
  1547. err = bpf_program__pin(prog, buf);
  1548. if (err)
  1549. return err;
  1550. }
  1551. return 0;
  1552. }
  1553. void bpf_object__close(struct bpf_object *obj)
  1554. {
  1555. size_t i;
  1556. if (!obj)
  1557. return;
  1558. if (obj->clear_priv)
  1559. obj->clear_priv(obj, obj->priv);
  1560. bpf_object__elf_finish(obj);
  1561. bpf_object__unload(obj);
  1562. btf__free(obj->btf);
  1563. for (i = 0; i < obj->nr_maps; i++) {
  1564. zfree(&obj->maps[i].name);
  1565. if (obj->maps[i].clear_priv)
  1566. obj->maps[i].clear_priv(&obj->maps[i],
  1567. obj->maps[i].priv);
  1568. obj->maps[i].priv = NULL;
  1569. obj->maps[i].clear_priv = NULL;
  1570. }
  1571. zfree(&obj->maps);
  1572. obj->nr_maps = 0;
  1573. if (obj->programs && obj->nr_programs) {
  1574. for (i = 0; i < obj->nr_programs; i++)
  1575. bpf_program__exit(&obj->programs[i]);
  1576. }
  1577. zfree(&obj->programs);
  1578. list_del(&obj->list);
  1579. free(obj);
  1580. }
  1581. struct bpf_object *
  1582. bpf_object__next(struct bpf_object *prev)
  1583. {
  1584. struct bpf_object *next;
  1585. if (!prev)
  1586. next = list_first_entry(&bpf_objects_list,
  1587. struct bpf_object,
  1588. list);
  1589. else
  1590. next = list_next_entry(prev, list);
  1591. /* Empty list is noticed here so don't need checking on entry. */
  1592. if (&next->list == &bpf_objects_list)
  1593. return NULL;
  1594. return next;
  1595. }
  1596. const char *bpf_object__name(struct bpf_object *obj)
  1597. {
  1598. return obj ? obj->path : ERR_PTR(-EINVAL);
  1599. }
  1600. unsigned int bpf_object__kversion(struct bpf_object *obj)
  1601. {
  1602. return obj ? obj->kern_version : 0;
  1603. }
  1604. int bpf_object__btf_fd(const struct bpf_object *obj)
  1605. {
  1606. return obj->btf ? btf__fd(obj->btf) : -1;
  1607. }
  1608. int bpf_object__set_priv(struct bpf_object *obj, void *priv,
  1609. bpf_object_clear_priv_t clear_priv)
  1610. {
  1611. if (obj->priv && obj->clear_priv)
  1612. obj->clear_priv(obj, obj->priv);
  1613. obj->priv = priv;
  1614. obj->clear_priv = clear_priv;
  1615. return 0;
  1616. }
  1617. void *bpf_object__priv(struct bpf_object *obj)
  1618. {
  1619. return obj ? obj->priv : ERR_PTR(-EINVAL);
  1620. }
  1621. static struct bpf_program *
  1622. __bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
  1623. {
  1624. size_t idx;
  1625. if (!obj->programs)
  1626. return NULL;
  1627. /* First handler */
  1628. if (prev == NULL)
  1629. return &obj->programs[0];
  1630. if (prev->obj != obj) {
  1631. pr_warning("error: program handler doesn't match object\n");
  1632. return NULL;
  1633. }
  1634. idx = (prev - obj->programs) + 1;
  1635. if (idx >= obj->nr_programs)
  1636. return NULL;
  1637. return &obj->programs[idx];
  1638. }
  1639. struct bpf_program *
  1640. bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
  1641. {
  1642. struct bpf_program *prog = prev;
  1643. do {
  1644. prog = __bpf_program__next(prog, obj);
  1645. } while (prog && bpf_program__is_function_storage(prog, obj));
  1646. return prog;
  1647. }
  1648. int bpf_program__set_priv(struct bpf_program *prog, void *priv,
  1649. bpf_program_clear_priv_t clear_priv)
  1650. {
  1651. if (prog->priv && prog->clear_priv)
  1652. prog->clear_priv(prog, prog->priv);
  1653. prog->priv = priv;
  1654. prog->clear_priv = clear_priv;
  1655. return 0;
  1656. }
  1657. void *bpf_program__priv(struct bpf_program *prog)
  1658. {
  1659. return prog ? prog->priv : ERR_PTR(-EINVAL);
  1660. }
  1661. void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
  1662. {
  1663. prog->prog_ifindex = ifindex;
  1664. }
  1665. const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
  1666. {
  1667. const char *title;
  1668. title = prog->section_name;
  1669. if (needs_copy) {
  1670. title = strdup(title);
  1671. if (!title) {
  1672. pr_warning("failed to strdup program title\n");
  1673. return ERR_PTR(-ENOMEM);
  1674. }
  1675. }
  1676. return title;
  1677. }
  1678. int bpf_program__fd(struct bpf_program *prog)
  1679. {
  1680. return bpf_program__nth_fd(prog, 0);
  1681. }
  1682. int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
  1683. bpf_program_prep_t prep)
  1684. {
  1685. int *instances_fds;
  1686. if (nr_instances <= 0 || !prep)
  1687. return -EINVAL;
  1688. if (prog->instances.nr > 0 || prog->instances.fds) {
  1689. pr_warning("Can't set pre-processor after loading\n");
  1690. return -EINVAL;
  1691. }
  1692. instances_fds = malloc(sizeof(int) * nr_instances);
  1693. if (!instances_fds) {
  1694. pr_warning("alloc memory failed for fds\n");
  1695. return -ENOMEM;
  1696. }
  1697. /* fill all fd with -1 */
  1698. memset(instances_fds, -1, sizeof(int) * nr_instances);
  1699. prog->instances.nr = nr_instances;
  1700. prog->instances.fds = instances_fds;
  1701. prog->preprocessor = prep;
  1702. return 0;
  1703. }
  1704. int bpf_program__nth_fd(struct bpf_program *prog, int n)
  1705. {
  1706. int fd;
  1707. if (!prog)
  1708. return -EINVAL;
  1709. if (n >= prog->instances.nr || n < 0) {
  1710. pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
  1711. n, prog->section_name, prog->instances.nr);
  1712. return -EINVAL;
  1713. }
  1714. fd = prog->instances.fds[n];
  1715. if (fd < 0) {
  1716. pr_warning("%dth instance of program '%s' is invalid\n",
  1717. n, prog->section_name);
  1718. return -ENOENT;
  1719. }
  1720. return fd;
  1721. }
  1722. void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
  1723. {
  1724. prog->type = type;
  1725. }
  1726. static bool bpf_program__is_type(struct bpf_program *prog,
  1727. enum bpf_prog_type type)
  1728. {
  1729. return prog ? (prog->type == type) : false;
  1730. }
  1731. #define BPF_PROG_TYPE_FNS(NAME, TYPE) \
  1732. int bpf_program__set_##NAME(struct bpf_program *prog) \
  1733. { \
  1734. if (!prog) \
  1735. return -EINVAL; \
  1736. bpf_program__set_type(prog, TYPE); \
  1737. return 0; \
  1738. } \
  1739. \
  1740. bool bpf_program__is_##NAME(struct bpf_program *prog) \
  1741. { \
  1742. return bpf_program__is_type(prog, TYPE); \
  1743. } \
  1744. BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
  1745. BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
  1746. BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
  1747. BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
  1748. BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
  1749. BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
  1750. BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
  1751. BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
  1752. void bpf_program__set_expected_attach_type(struct bpf_program *prog,
  1753. enum bpf_attach_type type)
  1754. {
  1755. prog->expected_attach_type = type;
  1756. }
  1757. #define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
  1758. { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
  1759. /* Programs that can NOT be attached. */
  1760. #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
  1761. /* Programs that can be attached. */
  1762. #define BPF_APROG_SEC(string, ptype, atype) \
  1763. BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
  1764. /* Programs that must specify expected attach type at load time. */
  1765. #define BPF_EAPROG_SEC(string, ptype, eatype) \
  1766. BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
  1767. /* Programs that can be attached but attach type can't be identified by section
  1768. * name. Kept for backward compatibility.
  1769. */
  1770. #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
  1771. static const struct {
  1772. const char *sec;
  1773. size_t len;
  1774. enum bpf_prog_type prog_type;
  1775. enum bpf_attach_type expected_attach_type;
  1776. int is_attachable;
  1777. enum bpf_attach_type attach_type;
  1778. } section_names[] = {
  1779. BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
  1780. BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
  1781. BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
  1782. BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
  1783. BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
  1784. BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
  1785. BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
  1786. BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
  1787. BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
  1788. BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
  1789. BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
  1790. BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
  1791. BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
  1792. BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
  1793. BPF_CGROUP_INET_INGRESS),
  1794. BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
  1795. BPF_CGROUP_INET_EGRESS),
  1796. BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
  1797. BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
  1798. BPF_CGROUP_INET_SOCK_CREATE),
  1799. BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
  1800. BPF_CGROUP_INET4_POST_BIND),
  1801. BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
  1802. BPF_CGROUP_INET6_POST_BIND),
  1803. BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
  1804. BPF_CGROUP_DEVICE),
  1805. BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
  1806. BPF_CGROUP_SOCK_OPS),
  1807. BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
  1808. BPF_SK_SKB_STREAM_PARSER),
  1809. BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
  1810. BPF_SK_SKB_STREAM_VERDICT),
  1811. BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
  1812. BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
  1813. BPF_SK_MSG_VERDICT),
  1814. BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
  1815. BPF_LIRC_MODE2),
  1816. BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
  1817. BPF_FLOW_DISSECTOR),
  1818. BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
  1819. BPF_CGROUP_INET4_BIND),
  1820. BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
  1821. BPF_CGROUP_INET6_BIND),
  1822. BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
  1823. BPF_CGROUP_INET4_CONNECT),
  1824. BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
  1825. BPF_CGROUP_INET6_CONNECT),
  1826. BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
  1827. BPF_CGROUP_UDP4_SENDMSG),
  1828. BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
  1829. BPF_CGROUP_UDP6_SENDMSG),
  1830. };
  1831. #undef BPF_PROG_SEC_IMPL
  1832. #undef BPF_PROG_SEC
  1833. #undef BPF_APROG_SEC
  1834. #undef BPF_EAPROG_SEC
  1835. #undef BPF_APROG_COMPAT
  1836. int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
  1837. enum bpf_attach_type *expected_attach_type)
  1838. {
  1839. int i;
  1840. if (!name)
  1841. return -EINVAL;
  1842. for (i = 0; i < ARRAY_SIZE(section_names); i++) {
  1843. if (strncmp(name, section_names[i].sec, section_names[i].len))
  1844. continue;
  1845. *prog_type = section_names[i].prog_type;
  1846. *expected_attach_type = section_names[i].expected_attach_type;
  1847. return 0;
  1848. }
  1849. return -EINVAL;
  1850. }
  1851. int libbpf_attach_type_by_name(const char *name,
  1852. enum bpf_attach_type *attach_type)
  1853. {
  1854. int i;
  1855. if (!name)
  1856. return -EINVAL;
  1857. for (i = 0; i < ARRAY_SIZE(section_names); i++) {
  1858. if (strncmp(name, section_names[i].sec, section_names[i].len))
  1859. continue;
  1860. if (!section_names[i].is_attachable)
  1861. return -EINVAL;
  1862. *attach_type = section_names[i].attach_type;
  1863. return 0;
  1864. }
  1865. return -EINVAL;
  1866. }
  1867. static int
  1868. bpf_program__identify_section(struct bpf_program *prog,
  1869. enum bpf_prog_type *prog_type,
  1870. enum bpf_attach_type *expected_attach_type)
  1871. {
  1872. return libbpf_prog_type_by_name(prog->section_name, prog_type,
  1873. expected_attach_type);
  1874. }
  1875. int bpf_map__fd(struct bpf_map *map)
  1876. {
  1877. return map ? map->fd : -EINVAL;
  1878. }
  1879. const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
  1880. {
  1881. return map ? &map->def : ERR_PTR(-EINVAL);
  1882. }
  1883. const char *bpf_map__name(struct bpf_map *map)
  1884. {
  1885. return map ? map->name : NULL;
  1886. }
  1887. __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
  1888. {
  1889. return map ? map->btf_key_type_id : 0;
  1890. }
  1891. __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
  1892. {
  1893. return map ? map->btf_value_type_id : 0;
  1894. }
  1895. int bpf_map__set_priv(struct bpf_map *map, void *priv,
  1896. bpf_map_clear_priv_t clear_priv)
  1897. {
  1898. if (!map)
  1899. return -EINVAL;
  1900. if (map->priv) {
  1901. if (map->clear_priv)
  1902. map->clear_priv(map, map->priv);
  1903. }
  1904. map->priv = priv;
  1905. map->clear_priv = clear_priv;
  1906. return 0;
  1907. }
  1908. void *bpf_map__priv(struct bpf_map *map)
  1909. {
  1910. return map ? map->priv : ERR_PTR(-EINVAL);
  1911. }
  1912. bool bpf_map__is_offload_neutral(struct bpf_map *map)
  1913. {
  1914. return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
  1915. }
  1916. void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
  1917. {
  1918. map->map_ifindex = ifindex;
  1919. }
  1920. struct bpf_map *
  1921. bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
  1922. {
  1923. size_t idx;
  1924. struct bpf_map *s, *e;
  1925. if (!obj || !obj->maps)
  1926. return NULL;
  1927. s = obj->maps;
  1928. e = obj->maps + obj->nr_maps;
  1929. if (prev == NULL)
  1930. return s;
  1931. if ((prev < s) || (prev >= e)) {
  1932. pr_warning("error in %s: map handler doesn't belong to object\n",
  1933. __func__);
  1934. return NULL;
  1935. }
  1936. idx = (prev - obj->maps) + 1;
  1937. if (idx >= obj->nr_maps)
  1938. return NULL;
  1939. return &obj->maps[idx];
  1940. }
  1941. struct bpf_map *
  1942. bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
  1943. {
  1944. struct bpf_map *pos;
  1945. bpf_map__for_each(pos, obj) {
  1946. if (pos->name && !strcmp(pos->name, name))
  1947. return pos;
  1948. }
  1949. return NULL;
  1950. }
  1951. struct bpf_map *
  1952. bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
  1953. {
  1954. int i;
  1955. for (i = 0; i < obj->nr_maps; i++) {
  1956. if (obj->maps[i].offset == offset)
  1957. return &obj->maps[i];
  1958. }
  1959. return ERR_PTR(-ENOENT);
  1960. }
  1961. long libbpf_get_error(const void *ptr)
  1962. {
  1963. if (IS_ERR(ptr))
  1964. return PTR_ERR(ptr);
  1965. return 0;
  1966. }
  1967. int bpf_prog_load(const char *file, enum bpf_prog_type type,
  1968. struct bpf_object **pobj, int *prog_fd)
  1969. {
  1970. struct bpf_prog_load_attr attr;
  1971. memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
  1972. attr.file = file;
  1973. attr.prog_type = type;
  1974. attr.expected_attach_type = 0;
  1975. return bpf_prog_load_xattr(&attr, pobj, prog_fd);
  1976. }
  1977. int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
  1978. struct bpf_object **pobj, int *prog_fd)
  1979. {
  1980. struct bpf_object_open_attr open_attr = {
  1981. .file = attr->file,
  1982. .prog_type = attr->prog_type,
  1983. };
  1984. struct bpf_program *prog, *first_prog = NULL;
  1985. enum bpf_attach_type expected_attach_type;
  1986. enum bpf_prog_type prog_type;
  1987. struct bpf_object *obj;
  1988. struct bpf_map *map;
  1989. int err;
  1990. if (!attr)
  1991. return -EINVAL;
  1992. if (!attr->file)
  1993. return -EINVAL;
  1994. obj = bpf_object__open_xattr(&open_attr);
  1995. if (IS_ERR_OR_NULL(obj))
  1996. return -ENOENT;
  1997. bpf_object__for_each_program(prog, obj) {
  1998. /*
  1999. * If type is not specified, try to guess it based on
  2000. * section name.
  2001. */
  2002. prog_type = attr->prog_type;
  2003. prog->prog_ifindex = attr->ifindex;
  2004. expected_attach_type = attr->expected_attach_type;
  2005. if (prog_type == BPF_PROG_TYPE_UNSPEC) {
  2006. err = bpf_program__identify_section(prog, &prog_type,
  2007. &expected_attach_type);
  2008. if (err < 0) {
  2009. pr_warning("failed to guess program type based on section name %s\n",
  2010. prog->section_name);
  2011. bpf_object__close(obj);
  2012. return -EINVAL;
  2013. }
  2014. }
  2015. bpf_program__set_type(prog, prog_type);
  2016. bpf_program__set_expected_attach_type(prog,
  2017. expected_attach_type);
  2018. if (!first_prog)
  2019. first_prog = prog;
  2020. }
  2021. bpf_map__for_each(map, obj) {
  2022. if (!bpf_map__is_offload_neutral(map))
  2023. map->map_ifindex = attr->ifindex;
  2024. }
  2025. if (!first_prog) {
  2026. pr_warning("object file doesn't contain bpf program\n");
  2027. bpf_object__close(obj);
  2028. return -ENOENT;
  2029. }
  2030. err = bpf_object__load(obj);
  2031. if (err) {
  2032. bpf_object__close(obj);
  2033. return -EINVAL;
  2034. }
  2035. *pobj = obj;
  2036. *prog_fd = bpf_program__fd(first_prog);
  2037. return 0;
  2038. }
  2039. enum bpf_perf_event_ret
  2040. bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
  2041. void **copy_mem, size_t *copy_size,
  2042. bpf_perf_event_print_t fn, void *private_data)
  2043. {
  2044. struct perf_event_mmap_page *header = mmap_mem;
  2045. __u64 data_head = ring_buffer_read_head(header);
  2046. __u64 data_tail = header->data_tail;
  2047. void *base = ((__u8 *)header) + page_size;
  2048. int ret = LIBBPF_PERF_EVENT_CONT;
  2049. struct perf_event_header *ehdr;
  2050. size_t ehdr_size;
  2051. while (data_head != data_tail) {
  2052. ehdr = base + (data_tail & (mmap_size - 1));
  2053. ehdr_size = ehdr->size;
  2054. if (((void *)ehdr) + ehdr_size > base + mmap_size) {
  2055. void *copy_start = ehdr;
  2056. size_t len_first = base + mmap_size - copy_start;
  2057. size_t len_secnd = ehdr_size - len_first;
  2058. if (*copy_size < ehdr_size) {
  2059. free(*copy_mem);
  2060. *copy_mem = malloc(ehdr_size);
  2061. if (!*copy_mem) {
  2062. *copy_size = 0;
  2063. ret = LIBBPF_PERF_EVENT_ERROR;
  2064. break;
  2065. }
  2066. *copy_size = ehdr_size;
  2067. }
  2068. memcpy(*copy_mem, copy_start, len_first);
  2069. memcpy(*copy_mem + len_first, base, len_secnd);
  2070. ehdr = *copy_mem;
  2071. }
  2072. ret = fn(ehdr, private_data);
  2073. data_tail += ehdr_size;
  2074. if (ret != LIBBPF_PERF_EVENT_CONT)
  2075. break;
  2076. }
  2077. ring_buffer_write_tail(header, data_tail);
  2078. return ret;
  2079. }