libbpf.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365
  1. // SPDX-License-Identifier: LGPL-2.1
  2. /*
  3. * Common eBPF ELF object loading operations.
  4. *
  5. * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
  6. * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  7. * Copyright (C) 2015 Huawei Inc.
  8. * Copyright (C) 2017 Nicira, Inc.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation;
  13. * version 2.1 of the License (not later!)
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with this program; if not, see <http://www.gnu.org/licenses>
  22. */
  23. #include <stdlib.h>
  24. #include <stdio.h>
  25. #include <stdarg.h>
  26. #include <libgen.h>
  27. #include <inttypes.h>
  28. #include <string.h>
  29. #include <unistd.h>
  30. #include <fcntl.h>
  31. #include <errno.h>
  32. #include <perf-sys.h>
  33. #include <asm/unistd.h>
  34. #include <linux/err.h>
  35. #include <linux/kernel.h>
  36. #include <linux/bpf.h>
  37. #include <linux/list.h>
  38. #include <linux/limits.h>
  39. #include <sys/stat.h>
  40. #include <sys/types.h>
  41. #include <sys/vfs.h>
  42. #include <libelf.h>
  43. #include <gelf.h>
  44. #include "libbpf.h"
  45. #include "bpf.h"
  46. #include "btf.h"
  47. #ifndef EM_BPF
  48. #define EM_BPF 247
  49. #endif
  50. #ifndef BPF_FS_MAGIC
  51. #define BPF_FS_MAGIC 0xcafe4a11
  52. #endif
  53. #define __printf(a, b) __attribute__((format(printf, a, b)))
  54. __printf(1, 2)
  55. static int __base_pr(const char *format, ...)
  56. {
  57. va_list args;
  58. int err;
  59. va_start(args, format);
  60. err = vfprintf(stderr, format, args);
  61. va_end(args);
  62. return err;
  63. }
  64. static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
  65. static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
  66. static __printf(1, 2) libbpf_print_fn_t __pr_debug;
  67. #define __pr(func, fmt, ...) \
  68. do { \
  69. if ((func)) \
  70. (func)("libbpf: " fmt, ##__VA_ARGS__); \
  71. } while (0)
  72. #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
  73. #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
  74. #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
  75. void libbpf_set_print(libbpf_print_fn_t warn,
  76. libbpf_print_fn_t info,
  77. libbpf_print_fn_t debug)
  78. {
  79. __pr_warning = warn;
  80. __pr_info = info;
  81. __pr_debug = debug;
  82. }
  83. #define STRERR_BUFSIZE 128
  84. #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
  85. #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
  86. #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
  87. static const char *libbpf_strerror_table[NR_ERRNO] = {
  88. [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
  89. [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
  90. [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
  91. [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
  92. [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
  93. [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
  94. [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
  95. [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
  96. [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
  97. [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
  98. [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message",
  99. [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence",
  100. };
  101. int libbpf_strerror(int err, char *buf, size_t size)
  102. {
  103. if (!buf || !size)
  104. return -1;
  105. err = err > 0 ? err : -err;
  106. if (err < __LIBBPF_ERRNO__START) {
  107. int ret;
  108. ret = strerror_r(err, buf, size);
  109. buf[size - 1] = '\0';
  110. return ret;
  111. }
  112. if (err < __LIBBPF_ERRNO__END) {
  113. const char *msg;
  114. msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
  115. snprintf(buf, size, "%s", msg);
  116. buf[size - 1] = '\0';
  117. return 0;
  118. }
  119. snprintf(buf, size, "Unknown libbpf error %d", err);
  120. buf[size - 1] = '\0';
  121. return -1;
  122. }
  123. #define CHECK_ERR(action, err, out) do { \
  124. err = action; \
  125. if (err) \
  126. goto out; \
  127. } while(0)
  128. /* Copied from tools/perf/util/util.h */
  129. #ifndef zfree
  130. # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
  131. #endif
  132. #ifndef zclose
  133. # define zclose(fd) ({ \
  134. int ___err = 0; \
  135. if ((fd) >= 0) \
  136. ___err = close((fd)); \
  137. fd = -1; \
  138. ___err; })
  139. #endif
  140. #ifdef HAVE_LIBELF_MMAP_SUPPORT
  141. # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
  142. #else
  143. # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
  144. #endif
  145. /*
  146. * bpf_prog should be a better name but it has been used in
  147. * linux/filter.h.
  148. */
  149. struct bpf_program {
  150. /* Index in elf obj file, for relocation use. */
  151. int idx;
  152. char *name;
  153. int prog_ifindex;
  154. char *section_name;
  155. struct bpf_insn *insns;
  156. size_t insns_cnt, main_prog_cnt;
  157. enum bpf_prog_type type;
  158. struct reloc_desc {
  159. enum {
  160. RELO_LD64,
  161. RELO_CALL,
  162. } type;
  163. int insn_idx;
  164. union {
  165. int map_idx;
  166. int text_off;
  167. };
  168. } *reloc_desc;
  169. int nr_reloc;
  170. struct {
  171. int nr;
  172. int *fds;
  173. } instances;
  174. bpf_program_prep_t preprocessor;
  175. struct bpf_object *obj;
  176. void *priv;
  177. bpf_program_clear_priv_t clear_priv;
  178. enum bpf_attach_type expected_attach_type;
  179. };
  180. struct bpf_map {
  181. int fd;
  182. char *name;
  183. size_t offset;
  184. int map_ifindex;
  185. struct bpf_map_def def;
  186. uint32_t btf_key_type_id;
  187. uint32_t btf_value_type_id;
  188. void *priv;
  189. bpf_map_clear_priv_t clear_priv;
  190. };
  191. static LIST_HEAD(bpf_objects_list);
  192. struct bpf_object {
  193. char license[64];
  194. u32 kern_version;
  195. struct bpf_program *programs;
  196. size_t nr_programs;
  197. struct bpf_map *maps;
  198. size_t nr_maps;
  199. bool loaded;
  200. bool has_pseudo_calls;
  201. /*
  202. * Information when doing elf related work. Only valid if fd
  203. * is valid.
  204. */
  205. struct {
  206. int fd;
  207. void *obj_buf;
  208. size_t obj_buf_sz;
  209. Elf *elf;
  210. GElf_Ehdr ehdr;
  211. Elf_Data *symbols;
  212. size_t strtabidx;
  213. struct {
  214. GElf_Shdr shdr;
  215. Elf_Data *data;
  216. } *reloc;
  217. int nr_reloc;
  218. int maps_shndx;
  219. int text_shndx;
  220. } efile;
  221. /*
  222. * All loaded bpf_object is linked in a list, which is
  223. * hidden to caller. bpf_objects__<func> handlers deal with
  224. * all objects.
  225. */
  226. struct list_head list;
  227. struct btf *btf;
  228. void *priv;
  229. bpf_object_clear_priv_t clear_priv;
  230. char path[];
  231. };
  232. #define obj_elf_valid(o) ((o)->efile.elf)
  233. static void bpf_program__unload(struct bpf_program *prog)
  234. {
  235. int i;
  236. if (!prog)
  237. return;
  238. /*
  239. * If the object is opened but the program was never loaded,
  240. * it is possible that prog->instances.nr == -1.
  241. */
  242. if (prog->instances.nr > 0) {
  243. for (i = 0; i < prog->instances.nr; i++)
  244. zclose(prog->instances.fds[i]);
  245. } else if (prog->instances.nr != -1) {
  246. pr_warning("Internal error: instances.nr is %d\n",
  247. prog->instances.nr);
  248. }
  249. prog->instances.nr = -1;
  250. zfree(&prog->instances.fds);
  251. }
  252. static void bpf_program__exit(struct bpf_program *prog)
  253. {
  254. if (!prog)
  255. return;
  256. if (prog->clear_priv)
  257. prog->clear_priv(prog, prog->priv);
  258. prog->priv = NULL;
  259. prog->clear_priv = NULL;
  260. bpf_program__unload(prog);
  261. zfree(&prog->name);
  262. zfree(&prog->section_name);
  263. zfree(&prog->insns);
  264. zfree(&prog->reloc_desc);
  265. prog->nr_reloc = 0;
  266. prog->insns_cnt = 0;
  267. prog->idx = -1;
  268. }
  269. static int
  270. bpf_program__init(void *data, size_t size, char *section_name, int idx,
  271. struct bpf_program *prog)
  272. {
  273. if (size < sizeof(struct bpf_insn)) {
  274. pr_warning("corrupted section '%s'\n", section_name);
  275. return -EINVAL;
  276. }
  277. bzero(prog, sizeof(*prog));
  278. prog->section_name = strdup(section_name);
  279. if (!prog->section_name) {
  280. pr_warning("failed to alloc name for prog under section(%d) %s\n",
  281. idx, section_name);
  282. goto errout;
  283. }
  284. prog->insns = malloc(size);
  285. if (!prog->insns) {
  286. pr_warning("failed to alloc insns for prog under section %s\n",
  287. section_name);
  288. goto errout;
  289. }
  290. prog->insns_cnt = size / sizeof(struct bpf_insn);
  291. memcpy(prog->insns, data,
  292. prog->insns_cnt * sizeof(struct bpf_insn));
  293. prog->idx = idx;
  294. prog->instances.fds = NULL;
  295. prog->instances.nr = -1;
  296. prog->type = BPF_PROG_TYPE_KPROBE;
  297. return 0;
  298. errout:
  299. bpf_program__exit(prog);
  300. return -ENOMEM;
  301. }
  302. static int
  303. bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
  304. char *section_name, int idx)
  305. {
  306. struct bpf_program prog, *progs;
  307. int nr_progs, err;
  308. err = bpf_program__init(data, size, section_name, idx, &prog);
  309. if (err)
  310. return err;
  311. progs = obj->programs;
  312. nr_progs = obj->nr_programs;
  313. progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
  314. if (!progs) {
  315. /*
  316. * In this case the original obj->programs
  317. * is still valid, so don't need special treat for
  318. * bpf_close_object().
  319. */
  320. pr_warning("failed to alloc a new program under section '%s'\n",
  321. section_name);
  322. bpf_program__exit(&prog);
  323. return -ENOMEM;
  324. }
  325. pr_debug("found program %s\n", prog.section_name);
  326. obj->programs = progs;
  327. obj->nr_programs = nr_progs + 1;
  328. prog.obj = obj;
  329. progs[nr_progs] = prog;
  330. return 0;
  331. }
  332. static int
  333. bpf_object__init_prog_names(struct bpf_object *obj)
  334. {
  335. Elf_Data *symbols = obj->efile.symbols;
  336. struct bpf_program *prog;
  337. size_t pi, si;
  338. for (pi = 0; pi < obj->nr_programs; pi++) {
  339. const char *name = NULL;
  340. prog = &obj->programs[pi];
  341. for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
  342. si++) {
  343. GElf_Sym sym;
  344. if (!gelf_getsym(symbols, si, &sym))
  345. continue;
  346. if (sym.st_shndx != prog->idx)
  347. continue;
  348. if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
  349. continue;
  350. name = elf_strptr(obj->efile.elf,
  351. obj->efile.strtabidx,
  352. sym.st_name);
  353. if (!name) {
  354. pr_warning("failed to get sym name string for prog %s\n",
  355. prog->section_name);
  356. return -LIBBPF_ERRNO__LIBELF;
  357. }
  358. }
  359. if (!name && prog->idx == obj->efile.text_shndx)
  360. name = ".text";
  361. if (!name) {
  362. pr_warning("failed to find sym for prog %s\n",
  363. prog->section_name);
  364. return -EINVAL;
  365. }
  366. prog->name = strdup(name);
  367. if (!prog->name) {
  368. pr_warning("failed to allocate memory for prog sym %s\n",
  369. name);
  370. return -ENOMEM;
  371. }
  372. }
  373. return 0;
  374. }
  375. static struct bpf_object *bpf_object__new(const char *path,
  376. void *obj_buf,
  377. size_t obj_buf_sz)
  378. {
  379. struct bpf_object *obj;
  380. obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
  381. if (!obj) {
  382. pr_warning("alloc memory failed for %s\n", path);
  383. return ERR_PTR(-ENOMEM);
  384. }
  385. strcpy(obj->path, path);
  386. obj->efile.fd = -1;
  387. /*
  388. * Caller of this function should also calls
  389. * bpf_object__elf_finish() after data collection to return
  390. * obj_buf to user. If not, we should duplicate the buffer to
  391. * avoid user freeing them before elf finish.
  392. */
  393. obj->efile.obj_buf = obj_buf;
  394. obj->efile.obj_buf_sz = obj_buf_sz;
  395. obj->efile.maps_shndx = -1;
  396. obj->loaded = false;
  397. INIT_LIST_HEAD(&obj->list);
  398. list_add(&obj->list, &bpf_objects_list);
  399. return obj;
  400. }
  401. static void bpf_object__elf_finish(struct bpf_object *obj)
  402. {
  403. if (!obj_elf_valid(obj))
  404. return;
  405. if (obj->efile.elf) {
  406. elf_end(obj->efile.elf);
  407. obj->efile.elf = NULL;
  408. }
  409. obj->efile.symbols = NULL;
  410. zfree(&obj->efile.reloc);
  411. obj->efile.nr_reloc = 0;
  412. zclose(obj->efile.fd);
  413. obj->efile.obj_buf = NULL;
  414. obj->efile.obj_buf_sz = 0;
  415. }
  416. static int bpf_object__elf_init(struct bpf_object *obj)
  417. {
  418. int err = 0;
  419. GElf_Ehdr *ep;
  420. if (obj_elf_valid(obj)) {
  421. pr_warning("elf init: internal error\n");
  422. return -LIBBPF_ERRNO__LIBELF;
  423. }
  424. if (obj->efile.obj_buf_sz > 0) {
  425. /*
  426. * obj_buf should have been validated by
  427. * bpf_object__open_buffer().
  428. */
  429. obj->efile.elf = elf_memory(obj->efile.obj_buf,
  430. obj->efile.obj_buf_sz);
  431. } else {
  432. obj->efile.fd = open(obj->path, O_RDONLY);
  433. if (obj->efile.fd < 0) {
  434. pr_warning("failed to open %s: %s\n", obj->path,
  435. strerror(errno));
  436. return -errno;
  437. }
  438. obj->efile.elf = elf_begin(obj->efile.fd,
  439. LIBBPF_ELF_C_READ_MMAP,
  440. NULL);
  441. }
  442. if (!obj->efile.elf) {
  443. pr_warning("failed to open %s as ELF file\n",
  444. obj->path);
  445. err = -LIBBPF_ERRNO__LIBELF;
  446. goto errout;
  447. }
  448. if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
  449. pr_warning("failed to get EHDR from %s\n",
  450. obj->path);
  451. err = -LIBBPF_ERRNO__FORMAT;
  452. goto errout;
  453. }
  454. ep = &obj->efile.ehdr;
  455. /* Old LLVM set e_machine to EM_NONE */
  456. if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
  457. pr_warning("%s is not an eBPF object file\n",
  458. obj->path);
  459. err = -LIBBPF_ERRNO__FORMAT;
  460. goto errout;
  461. }
  462. return 0;
  463. errout:
  464. bpf_object__elf_finish(obj);
  465. return err;
  466. }
  467. static int
  468. bpf_object__check_endianness(struct bpf_object *obj)
  469. {
  470. static unsigned int const endian = 1;
  471. switch (obj->efile.ehdr.e_ident[EI_DATA]) {
  472. case ELFDATA2LSB:
  473. /* We are big endian, BPF obj is little endian. */
  474. if (*(unsigned char const *)&endian != 1)
  475. goto mismatch;
  476. break;
  477. case ELFDATA2MSB:
  478. /* We are little endian, BPF obj is big endian. */
  479. if (*(unsigned char const *)&endian != 0)
  480. goto mismatch;
  481. break;
  482. default:
  483. return -LIBBPF_ERRNO__ENDIAN;
  484. }
  485. return 0;
  486. mismatch:
  487. pr_warning("Error: endianness mismatch.\n");
  488. return -LIBBPF_ERRNO__ENDIAN;
  489. }
  490. static int
  491. bpf_object__init_license(struct bpf_object *obj,
  492. void *data, size_t size)
  493. {
  494. memcpy(obj->license, data,
  495. min(size, sizeof(obj->license) - 1));
  496. pr_debug("license of %s is %s\n", obj->path, obj->license);
  497. return 0;
  498. }
  499. static int
  500. bpf_object__init_kversion(struct bpf_object *obj,
  501. void *data, size_t size)
  502. {
  503. u32 kver;
  504. if (size != sizeof(kver)) {
  505. pr_warning("invalid kver section in %s\n", obj->path);
  506. return -LIBBPF_ERRNO__FORMAT;
  507. }
  508. memcpy(&kver, data, sizeof(kver));
  509. obj->kern_version = kver;
  510. pr_debug("kernel version of %s is %x\n", obj->path,
  511. obj->kern_version);
  512. return 0;
  513. }
  514. static int compare_bpf_map(const void *_a, const void *_b)
  515. {
  516. const struct bpf_map *a = _a;
  517. const struct bpf_map *b = _b;
  518. return a->offset - b->offset;
  519. }
  520. static int
  521. bpf_object__init_maps(struct bpf_object *obj)
  522. {
  523. int i, map_idx, map_def_sz, nr_maps = 0;
  524. Elf_Scn *scn;
  525. Elf_Data *data;
  526. Elf_Data *symbols = obj->efile.symbols;
  527. if (obj->efile.maps_shndx < 0)
  528. return -EINVAL;
  529. if (!symbols)
  530. return -EINVAL;
  531. scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
  532. if (scn)
  533. data = elf_getdata(scn, NULL);
  534. if (!scn || !data) {
  535. pr_warning("failed to get Elf_Data from map section %d\n",
  536. obj->efile.maps_shndx);
  537. return -EINVAL;
  538. }
  539. /*
  540. * Count number of maps. Each map has a name.
  541. * Array of maps is not supported: only the first element is
  542. * considered.
  543. *
  544. * TODO: Detect array of map and report error.
  545. */
  546. for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
  547. GElf_Sym sym;
  548. if (!gelf_getsym(symbols, i, &sym))
  549. continue;
  550. if (sym.st_shndx != obj->efile.maps_shndx)
  551. continue;
  552. nr_maps++;
  553. }
  554. /* Alloc obj->maps and fill nr_maps. */
  555. pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
  556. nr_maps, data->d_size);
  557. if (!nr_maps)
  558. return 0;
  559. /* Assume equally sized map definitions */
  560. map_def_sz = data->d_size / nr_maps;
  561. if (!data->d_size || (data->d_size % nr_maps) != 0) {
  562. pr_warning("unable to determine map definition size "
  563. "section %s, %d maps in %zd bytes\n",
  564. obj->path, nr_maps, data->d_size);
  565. return -EINVAL;
  566. }
  567. obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
  568. if (!obj->maps) {
  569. pr_warning("alloc maps for object failed\n");
  570. return -ENOMEM;
  571. }
  572. obj->nr_maps = nr_maps;
  573. /*
  574. * fill all fd with -1 so won't close incorrect
  575. * fd (fd=0 is stdin) when failure (zclose won't close
  576. * negative fd)).
  577. */
  578. for (i = 0; i < nr_maps; i++)
  579. obj->maps[i].fd = -1;
  580. /*
  581. * Fill obj->maps using data in "maps" section.
  582. */
  583. for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
  584. GElf_Sym sym;
  585. const char *map_name;
  586. struct bpf_map_def *def;
  587. if (!gelf_getsym(symbols, i, &sym))
  588. continue;
  589. if (sym.st_shndx != obj->efile.maps_shndx)
  590. continue;
  591. map_name = elf_strptr(obj->efile.elf,
  592. obj->efile.strtabidx,
  593. sym.st_name);
  594. obj->maps[map_idx].offset = sym.st_value;
  595. if (sym.st_value + map_def_sz > data->d_size) {
  596. pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
  597. obj->path, map_name);
  598. return -EINVAL;
  599. }
  600. obj->maps[map_idx].name = strdup(map_name);
  601. if (!obj->maps[map_idx].name) {
  602. pr_warning("failed to alloc map name\n");
  603. return -ENOMEM;
  604. }
  605. pr_debug("map %d is \"%s\"\n", map_idx,
  606. obj->maps[map_idx].name);
  607. def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
  608. /*
  609. * If the definition of the map in the object file fits in
  610. * bpf_map_def, copy it. Any extra fields in our version
  611. * of bpf_map_def will default to zero as a result of the
  612. * calloc above.
  613. */
  614. if (map_def_sz <= sizeof(struct bpf_map_def)) {
  615. memcpy(&obj->maps[map_idx].def, def, map_def_sz);
  616. } else {
  617. /*
  618. * Here the map structure being read is bigger than what
  619. * we expect, truncate if the excess bits are all zero.
  620. * If they are not zero, reject this map as
  621. * incompatible.
  622. */
  623. char *b;
  624. for (b = ((char *)def) + sizeof(struct bpf_map_def);
  625. b < ((char *)def) + map_def_sz; b++) {
  626. if (*b != 0) {
  627. pr_warning("maps section in %s: \"%s\" "
  628. "has unrecognized, non-zero "
  629. "options\n",
  630. obj->path, map_name);
  631. return -EINVAL;
  632. }
  633. }
  634. memcpy(&obj->maps[map_idx].def, def,
  635. sizeof(struct bpf_map_def));
  636. }
  637. map_idx++;
  638. }
  639. qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
  640. return 0;
  641. }
  642. static bool section_have_execinstr(struct bpf_object *obj, int idx)
  643. {
  644. Elf_Scn *scn;
  645. GElf_Shdr sh;
  646. scn = elf_getscn(obj->efile.elf, idx);
  647. if (!scn)
  648. return false;
  649. if (gelf_getshdr(scn, &sh) != &sh)
  650. return false;
  651. if (sh.sh_flags & SHF_EXECINSTR)
  652. return true;
  653. return false;
  654. }
  655. static int bpf_object__elf_collect(struct bpf_object *obj)
  656. {
  657. Elf *elf = obj->efile.elf;
  658. GElf_Ehdr *ep = &obj->efile.ehdr;
  659. Elf_Scn *scn = NULL;
  660. int idx = 0, err = 0;
  661. /* Elf is corrupted/truncated, avoid calling elf_strptr. */
  662. if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
  663. pr_warning("failed to get e_shstrndx from %s\n",
  664. obj->path);
  665. return -LIBBPF_ERRNO__FORMAT;
  666. }
  667. while ((scn = elf_nextscn(elf, scn)) != NULL) {
  668. char *name;
  669. GElf_Shdr sh;
  670. Elf_Data *data;
  671. idx++;
  672. if (gelf_getshdr(scn, &sh) != &sh) {
  673. pr_warning("failed to get section(%d) header from %s\n",
  674. idx, obj->path);
  675. err = -LIBBPF_ERRNO__FORMAT;
  676. goto out;
  677. }
  678. name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
  679. if (!name) {
  680. pr_warning("failed to get section(%d) name from %s\n",
  681. idx, obj->path);
  682. err = -LIBBPF_ERRNO__FORMAT;
  683. goto out;
  684. }
  685. data = elf_getdata(scn, 0);
  686. if (!data) {
  687. pr_warning("failed to get section(%d) data from %s(%s)\n",
  688. idx, name, obj->path);
  689. err = -LIBBPF_ERRNO__FORMAT;
  690. goto out;
  691. }
  692. pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
  693. idx, name, (unsigned long)data->d_size,
  694. (int)sh.sh_link, (unsigned long)sh.sh_flags,
  695. (int)sh.sh_type);
  696. if (strcmp(name, "license") == 0)
  697. err = bpf_object__init_license(obj,
  698. data->d_buf,
  699. data->d_size);
  700. else if (strcmp(name, "version") == 0)
  701. err = bpf_object__init_kversion(obj,
  702. data->d_buf,
  703. data->d_size);
  704. else if (strcmp(name, "maps") == 0)
  705. obj->efile.maps_shndx = idx;
  706. else if (strcmp(name, BTF_ELF_SEC) == 0) {
  707. obj->btf = btf__new(data->d_buf, data->d_size,
  708. __pr_debug);
  709. if (IS_ERR(obj->btf)) {
  710. pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
  711. BTF_ELF_SEC, PTR_ERR(obj->btf));
  712. obj->btf = NULL;
  713. }
  714. } else if (sh.sh_type == SHT_SYMTAB) {
  715. if (obj->efile.symbols) {
  716. pr_warning("bpf: multiple SYMTAB in %s\n",
  717. obj->path);
  718. err = -LIBBPF_ERRNO__FORMAT;
  719. } else {
  720. obj->efile.symbols = data;
  721. obj->efile.strtabidx = sh.sh_link;
  722. }
  723. } else if ((sh.sh_type == SHT_PROGBITS) &&
  724. (sh.sh_flags & SHF_EXECINSTR) &&
  725. (data->d_size > 0)) {
  726. if (strcmp(name, ".text") == 0)
  727. obj->efile.text_shndx = idx;
  728. err = bpf_object__add_program(obj, data->d_buf,
  729. data->d_size, name, idx);
  730. if (err) {
  731. char errmsg[STRERR_BUFSIZE];
  732. strerror_r(-err, errmsg, sizeof(errmsg));
  733. pr_warning("failed to alloc program %s (%s): %s",
  734. name, obj->path, errmsg);
  735. }
  736. } else if (sh.sh_type == SHT_REL) {
  737. void *reloc = obj->efile.reloc;
  738. int nr_reloc = obj->efile.nr_reloc + 1;
  739. int sec = sh.sh_info; /* points to other section */
  740. /* Only do relo for section with exec instructions */
  741. if (!section_have_execinstr(obj, sec)) {
  742. pr_debug("skip relo %s(%d) for section(%d)\n",
  743. name, idx, sec);
  744. continue;
  745. }
  746. reloc = realloc(reloc,
  747. sizeof(*obj->efile.reloc) * nr_reloc);
  748. if (!reloc) {
  749. pr_warning("realloc failed\n");
  750. err = -ENOMEM;
  751. } else {
  752. int n = nr_reloc - 1;
  753. obj->efile.reloc = reloc;
  754. obj->efile.nr_reloc = nr_reloc;
  755. obj->efile.reloc[n].shdr = sh;
  756. obj->efile.reloc[n].data = data;
  757. }
  758. } else {
  759. pr_debug("skip section(%d) %s\n", idx, name);
  760. }
  761. if (err)
  762. goto out;
  763. }
  764. if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
  765. pr_warning("Corrupted ELF file: index of strtab invalid\n");
  766. return LIBBPF_ERRNO__FORMAT;
  767. }
  768. if (obj->efile.maps_shndx >= 0) {
  769. err = bpf_object__init_maps(obj);
  770. if (err)
  771. goto out;
  772. }
  773. err = bpf_object__init_prog_names(obj);
  774. out:
  775. return err;
  776. }
  777. static struct bpf_program *
  778. bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
  779. {
  780. struct bpf_program *prog;
  781. size_t i;
  782. for (i = 0; i < obj->nr_programs; i++) {
  783. prog = &obj->programs[i];
  784. if (prog->idx == idx)
  785. return prog;
  786. }
  787. return NULL;
  788. }
  789. static int
  790. bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
  791. Elf_Data *data, struct bpf_object *obj)
  792. {
  793. Elf_Data *symbols = obj->efile.symbols;
  794. int text_shndx = obj->efile.text_shndx;
  795. int maps_shndx = obj->efile.maps_shndx;
  796. struct bpf_map *maps = obj->maps;
  797. size_t nr_maps = obj->nr_maps;
  798. int i, nrels;
  799. pr_debug("collecting relocating info for: '%s'\n",
  800. prog->section_name);
  801. nrels = shdr->sh_size / shdr->sh_entsize;
  802. prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
  803. if (!prog->reloc_desc) {
  804. pr_warning("failed to alloc memory in relocation\n");
  805. return -ENOMEM;
  806. }
  807. prog->nr_reloc = nrels;
  808. for (i = 0; i < nrels; i++) {
  809. GElf_Sym sym;
  810. GElf_Rel rel;
  811. unsigned int insn_idx;
  812. struct bpf_insn *insns = prog->insns;
  813. size_t map_idx;
  814. if (!gelf_getrel(data, i, &rel)) {
  815. pr_warning("relocation: failed to get %d reloc\n", i);
  816. return -LIBBPF_ERRNO__FORMAT;
  817. }
  818. if (!gelf_getsym(symbols,
  819. GELF_R_SYM(rel.r_info),
  820. &sym)) {
  821. pr_warning("relocation: symbol %"PRIx64" not found\n",
  822. GELF_R_SYM(rel.r_info));
  823. return -LIBBPF_ERRNO__FORMAT;
  824. }
  825. pr_debug("relo for %lld value %lld name %d\n",
  826. (long long) (rel.r_info >> 32),
  827. (long long) sym.st_value, sym.st_name);
  828. if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
  829. pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
  830. prog->section_name, sym.st_shndx);
  831. return -LIBBPF_ERRNO__RELOC;
  832. }
  833. insn_idx = rel.r_offset / sizeof(struct bpf_insn);
  834. pr_debug("relocation: insn_idx=%u\n", insn_idx);
  835. if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
  836. if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
  837. pr_warning("incorrect bpf_call opcode\n");
  838. return -LIBBPF_ERRNO__RELOC;
  839. }
  840. prog->reloc_desc[i].type = RELO_CALL;
  841. prog->reloc_desc[i].insn_idx = insn_idx;
  842. prog->reloc_desc[i].text_off = sym.st_value;
  843. obj->has_pseudo_calls = true;
  844. continue;
  845. }
  846. if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
  847. pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
  848. insn_idx, insns[insn_idx].code);
  849. return -LIBBPF_ERRNO__RELOC;
  850. }
  851. /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
  852. for (map_idx = 0; map_idx < nr_maps; map_idx++) {
  853. if (maps[map_idx].offset == sym.st_value) {
  854. pr_debug("relocation: find map %zd (%s) for insn %u\n",
  855. map_idx, maps[map_idx].name, insn_idx);
  856. break;
  857. }
  858. }
  859. if (map_idx >= nr_maps) {
  860. pr_warning("bpf relocation: map_idx %d large than %d\n",
  861. (int)map_idx, (int)nr_maps - 1);
  862. return -LIBBPF_ERRNO__RELOC;
  863. }
  864. prog->reloc_desc[i].type = RELO_LD64;
  865. prog->reloc_desc[i].insn_idx = insn_idx;
  866. prog->reloc_desc[i].map_idx = map_idx;
  867. }
  868. return 0;
  869. }
  870. static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
  871. {
  872. struct bpf_map_def *def = &map->def;
  873. const size_t max_name = 256;
  874. int64_t key_size, value_size;
  875. int32_t key_id, value_id;
  876. char name[max_name];
  877. /* Find key type by name from BTF */
  878. if (snprintf(name, max_name, "%s_key", map->name) == max_name) {
  879. pr_warning("map:%s length of BTF key_type:%s_key is too long\n",
  880. map->name, map->name);
  881. return -EINVAL;
  882. }
  883. key_id = btf__find_by_name(btf, name);
  884. if (key_id < 0) {
  885. pr_debug("map:%s key_type:%s cannot be found in BTF\n",
  886. map->name, name);
  887. return key_id;
  888. }
  889. key_size = btf__resolve_size(btf, key_id);
  890. if (key_size < 0) {
  891. pr_warning("map:%s key_type:%s cannot get the BTF type_size\n",
  892. map->name, name);
  893. return key_size;
  894. }
  895. if (def->key_size != key_size) {
  896. pr_warning("map:%s key_type:%s has BTF type_size:%u != key_size:%u\n",
  897. map->name, name, (unsigned int)key_size, def->key_size);
  898. return -EINVAL;
  899. }
  900. /* Find value type from BTF */
  901. if (snprintf(name, max_name, "%s_value", map->name) == max_name) {
  902. pr_warning("map:%s length of BTF value_type:%s_value is too long\n",
  903. map->name, map->name);
  904. return -EINVAL;
  905. }
  906. value_id = btf__find_by_name(btf, name);
  907. if (value_id < 0) {
  908. pr_debug("map:%s value_type:%s cannot be found in BTF\n",
  909. map->name, name);
  910. return value_id;
  911. }
  912. value_size = btf__resolve_size(btf, value_id);
  913. if (value_size < 0) {
  914. pr_warning("map:%s value_type:%s cannot get the BTF type_size\n",
  915. map->name, name);
  916. return value_size;
  917. }
  918. if (def->value_size != value_size) {
  919. pr_warning("map:%s value_type:%s has BTF type_size:%u != value_size:%u\n",
  920. map->name, name, (unsigned int)value_size, def->value_size);
  921. return -EINVAL;
  922. }
  923. map->btf_key_type_id = key_id;
  924. map->btf_value_type_id = value_id;
  925. return 0;
  926. }
  927. static int
  928. bpf_object__create_maps(struct bpf_object *obj)
  929. {
  930. struct bpf_create_map_attr create_attr = {};
  931. unsigned int i;
  932. int err;
  933. for (i = 0; i < obj->nr_maps; i++) {
  934. struct bpf_map *map = &obj->maps[i];
  935. struct bpf_map_def *def = &map->def;
  936. int *pfd = &map->fd;
  937. create_attr.name = map->name;
  938. create_attr.map_ifindex = map->map_ifindex;
  939. create_attr.map_type = def->type;
  940. create_attr.map_flags = def->map_flags;
  941. create_attr.key_size = def->key_size;
  942. create_attr.value_size = def->value_size;
  943. create_attr.max_entries = def->max_entries;
  944. create_attr.btf_fd = 0;
  945. create_attr.btf_key_type_id = 0;
  946. create_attr.btf_value_type_id = 0;
  947. if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
  948. create_attr.btf_fd = btf__fd(obj->btf);
  949. create_attr.btf_key_type_id = map->btf_key_type_id;
  950. create_attr.btf_value_type_id = map->btf_value_type_id;
  951. }
  952. *pfd = bpf_create_map_xattr(&create_attr);
  953. if (*pfd < 0 && create_attr.btf_key_type_id) {
  954. pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
  955. map->name, strerror(errno), errno);
  956. create_attr.btf_fd = 0;
  957. create_attr.btf_key_type_id = 0;
  958. create_attr.btf_value_type_id = 0;
  959. map->btf_key_type_id = 0;
  960. map->btf_value_type_id = 0;
  961. *pfd = bpf_create_map_xattr(&create_attr);
  962. }
  963. if (*pfd < 0) {
  964. size_t j;
  965. err = *pfd;
  966. pr_warning("failed to create map (name: '%s'): %s\n",
  967. map->name,
  968. strerror(errno));
  969. for (j = 0; j < i; j++)
  970. zclose(obj->maps[j].fd);
  971. return err;
  972. }
  973. pr_debug("create map %s: fd=%d\n", map->name, *pfd);
  974. }
  975. return 0;
  976. }
  977. static int
  978. bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
  979. struct reloc_desc *relo)
  980. {
  981. struct bpf_insn *insn, *new_insn;
  982. struct bpf_program *text;
  983. size_t new_cnt;
  984. if (relo->type != RELO_CALL)
  985. return -LIBBPF_ERRNO__RELOC;
  986. if (prog->idx == obj->efile.text_shndx) {
  987. pr_warning("relo in .text insn %d into off %d\n",
  988. relo->insn_idx, relo->text_off);
  989. return -LIBBPF_ERRNO__RELOC;
  990. }
  991. if (prog->main_prog_cnt == 0) {
  992. text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
  993. if (!text) {
  994. pr_warning("no .text section found yet relo into text exist\n");
  995. return -LIBBPF_ERRNO__RELOC;
  996. }
  997. new_cnt = prog->insns_cnt + text->insns_cnt;
  998. new_insn = realloc(prog->insns, new_cnt * sizeof(*insn));
  999. if (!new_insn) {
  1000. pr_warning("oom in prog realloc\n");
  1001. return -ENOMEM;
  1002. }
  1003. memcpy(new_insn + prog->insns_cnt, text->insns,
  1004. text->insns_cnt * sizeof(*insn));
  1005. prog->insns = new_insn;
  1006. prog->main_prog_cnt = prog->insns_cnt;
  1007. prog->insns_cnt = new_cnt;
  1008. pr_debug("added %zd insn from %s to prog %s\n",
  1009. text->insns_cnt, text->section_name,
  1010. prog->section_name);
  1011. }
  1012. insn = &prog->insns[relo->insn_idx];
  1013. insn->imm += prog->main_prog_cnt - relo->insn_idx;
  1014. return 0;
  1015. }
  1016. static int
  1017. bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
  1018. {
  1019. int i, err;
  1020. if (!prog || !prog->reloc_desc)
  1021. return 0;
  1022. for (i = 0; i < prog->nr_reloc; i++) {
  1023. if (prog->reloc_desc[i].type == RELO_LD64) {
  1024. struct bpf_insn *insns = prog->insns;
  1025. int insn_idx, map_idx;
  1026. insn_idx = prog->reloc_desc[i].insn_idx;
  1027. map_idx = prog->reloc_desc[i].map_idx;
  1028. if (insn_idx >= (int)prog->insns_cnt) {
  1029. pr_warning("relocation out of range: '%s'\n",
  1030. prog->section_name);
  1031. return -LIBBPF_ERRNO__RELOC;
  1032. }
  1033. insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
  1034. insns[insn_idx].imm = obj->maps[map_idx].fd;
  1035. } else {
  1036. err = bpf_program__reloc_text(prog, obj,
  1037. &prog->reloc_desc[i]);
  1038. if (err)
  1039. return err;
  1040. }
  1041. }
  1042. zfree(&prog->reloc_desc);
  1043. prog->nr_reloc = 0;
  1044. return 0;
  1045. }
  1046. static int
  1047. bpf_object__relocate(struct bpf_object *obj)
  1048. {
  1049. struct bpf_program *prog;
  1050. size_t i;
  1051. int err;
  1052. for (i = 0; i < obj->nr_programs; i++) {
  1053. prog = &obj->programs[i];
  1054. err = bpf_program__relocate(prog, obj);
  1055. if (err) {
  1056. pr_warning("failed to relocate '%s'\n",
  1057. prog->section_name);
  1058. return err;
  1059. }
  1060. }
  1061. return 0;
  1062. }
  1063. static int bpf_object__collect_reloc(struct bpf_object *obj)
  1064. {
  1065. int i, err;
  1066. if (!obj_elf_valid(obj)) {
  1067. pr_warning("Internal error: elf object is closed\n");
  1068. return -LIBBPF_ERRNO__INTERNAL;
  1069. }
  1070. for (i = 0; i < obj->efile.nr_reloc; i++) {
  1071. GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
  1072. Elf_Data *data = obj->efile.reloc[i].data;
  1073. int idx = shdr->sh_info;
  1074. struct bpf_program *prog;
  1075. if (shdr->sh_type != SHT_REL) {
  1076. pr_warning("internal error at %d\n", __LINE__);
  1077. return -LIBBPF_ERRNO__INTERNAL;
  1078. }
  1079. prog = bpf_object__find_prog_by_idx(obj, idx);
  1080. if (!prog) {
  1081. pr_warning("relocation failed: no section(%d)\n", idx);
  1082. return -LIBBPF_ERRNO__RELOC;
  1083. }
  1084. err = bpf_program__collect_reloc(prog,
  1085. shdr, data,
  1086. obj);
  1087. if (err)
  1088. return err;
  1089. }
  1090. return 0;
  1091. }
  1092. static int
  1093. load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
  1094. const char *name, struct bpf_insn *insns, int insns_cnt,
  1095. char *license, u32 kern_version, int *pfd, int prog_ifindex)
  1096. {
  1097. struct bpf_load_program_attr load_attr;
  1098. char *log_buf;
  1099. int ret;
  1100. memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
  1101. load_attr.prog_type = type;
  1102. load_attr.expected_attach_type = expected_attach_type;
  1103. load_attr.name = name;
  1104. load_attr.insns = insns;
  1105. load_attr.insns_cnt = insns_cnt;
  1106. load_attr.license = license;
  1107. load_attr.kern_version = kern_version;
  1108. load_attr.prog_ifindex = prog_ifindex;
  1109. if (!load_attr.insns || !load_attr.insns_cnt)
  1110. return -EINVAL;
  1111. log_buf = malloc(BPF_LOG_BUF_SIZE);
  1112. if (!log_buf)
  1113. pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
  1114. ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
  1115. if (ret >= 0) {
  1116. *pfd = ret;
  1117. ret = 0;
  1118. goto out;
  1119. }
  1120. ret = -LIBBPF_ERRNO__LOAD;
  1121. pr_warning("load bpf program failed: %s\n", strerror(errno));
  1122. if (log_buf && log_buf[0] != '\0') {
  1123. ret = -LIBBPF_ERRNO__VERIFY;
  1124. pr_warning("-- BEGIN DUMP LOG ---\n");
  1125. pr_warning("\n%s\n", log_buf);
  1126. pr_warning("-- END LOG --\n");
  1127. } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
  1128. pr_warning("Program too large (%zu insns), at most %d insns\n",
  1129. load_attr.insns_cnt, BPF_MAXINSNS);
  1130. ret = -LIBBPF_ERRNO__PROG2BIG;
  1131. } else {
  1132. /* Wrong program type? */
  1133. if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
  1134. int fd;
  1135. load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
  1136. load_attr.expected_attach_type = 0;
  1137. fd = bpf_load_program_xattr(&load_attr, NULL, 0);
  1138. if (fd >= 0) {
  1139. close(fd);
  1140. ret = -LIBBPF_ERRNO__PROGTYPE;
  1141. goto out;
  1142. }
  1143. }
  1144. if (log_buf)
  1145. ret = -LIBBPF_ERRNO__KVER;
  1146. }
  1147. out:
  1148. free(log_buf);
  1149. return ret;
  1150. }
  1151. static int
  1152. bpf_program__load(struct bpf_program *prog,
  1153. char *license, u32 kern_version)
  1154. {
  1155. int err = 0, fd, i;
  1156. if (prog->instances.nr < 0 || !prog->instances.fds) {
  1157. if (prog->preprocessor) {
  1158. pr_warning("Internal error: can't load program '%s'\n",
  1159. prog->section_name);
  1160. return -LIBBPF_ERRNO__INTERNAL;
  1161. }
  1162. prog->instances.fds = malloc(sizeof(int));
  1163. if (!prog->instances.fds) {
  1164. pr_warning("Not enough memory for BPF fds\n");
  1165. return -ENOMEM;
  1166. }
  1167. prog->instances.nr = 1;
  1168. prog->instances.fds[0] = -1;
  1169. }
  1170. if (!prog->preprocessor) {
  1171. if (prog->instances.nr != 1) {
  1172. pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
  1173. prog->section_name, prog->instances.nr);
  1174. }
  1175. err = load_program(prog->type, prog->expected_attach_type,
  1176. prog->name, prog->insns, prog->insns_cnt,
  1177. license, kern_version, &fd,
  1178. prog->prog_ifindex);
  1179. if (!err)
  1180. prog->instances.fds[0] = fd;
  1181. goto out;
  1182. }
  1183. for (i = 0; i < prog->instances.nr; i++) {
  1184. struct bpf_prog_prep_result result;
  1185. bpf_program_prep_t preprocessor = prog->preprocessor;
  1186. bzero(&result, sizeof(result));
  1187. err = preprocessor(prog, i, prog->insns,
  1188. prog->insns_cnt, &result);
  1189. if (err) {
  1190. pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
  1191. i, prog->section_name);
  1192. goto out;
  1193. }
  1194. if (!result.new_insn_ptr || !result.new_insn_cnt) {
  1195. pr_debug("Skip loading the %dth instance of program '%s'\n",
  1196. i, prog->section_name);
  1197. prog->instances.fds[i] = -1;
  1198. if (result.pfd)
  1199. *result.pfd = -1;
  1200. continue;
  1201. }
  1202. err = load_program(prog->type, prog->expected_attach_type,
  1203. prog->name, result.new_insn_ptr,
  1204. result.new_insn_cnt,
  1205. license, kern_version, &fd,
  1206. prog->prog_ifindex);
  1207. if (err) {
  1208. pr_warning("Loading the %dth instance of program '%s' failed\n",
  1209. i, prog->section_name);
  1210. goto out;
  1211. }
  1212. if (result.pfd)
  1213. *result.pfd = fd;
  1214. prog->instances.fds[i] = fd;
  1215. }
  1216. out:
  1217. if (err)
  1218. pr_warning("failed to load program '%s'\n",
  1219. prog->section_name);
  1220. zfree(&prog->insns);
  1221. prog->insns_cnt = 0;
  1222. return err;
  1223. }
  1224. static bool bpf_program__is_function_storage(struct bpf_program *prog,
  1225. struct bpf_object *obj)
  1226. {
  1227. return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
  1228. }
  1229. static int
  1230. bpf_object__load_progs(struct bpf_object *obj)
  1231. {
  1232. size_t i;
  1233. int err;
  1234. for (i = 0; i < obj->nr_programs; i++) {
  1235. if (bpf_program__is_function_storage(&obj->programs[i], obj))
  1236. continue;
  1237. err = bpf_program__load(&obj->programs[i],
  1238. obj->license,
  1239. obj->kern_version);
  1240. if (err)
  1241. return err;
  1242. }
  1243. return 0;
  1244. }
  1245. static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
  1246. {
  1247. switch (type) {
  1248. case BPF_PROG_TYPE_SOCKET_FILTER:
  1249. case BPF_PROG_TYPE_SCHED_CLS:
  1250. case BPF_PROG_TYPE_SCHED_ACT:
  1251. case BPF_PROG_TYPE_XDP:
  1252. case BPF_PROG_TYPE_CGROUP_SKB:
  1253. case BPF_PROG_TYPE_CGROUP_SOCK:
  1254. case BPF_PROG_TYPE_LWT_IN:
  1255. case BPF_PROG_TYPE_LWT_OUT:
  1256. case BPF_PROG_TYPE_LWT_XMIT:
  1257. case BPF_PROG_TYPE_LWT_SEG6LOCAL:
  1258. case BPF_PROG_TYPE_SOCK_OPS:
  1259. case BPF_PROG_TYPE_SK_SKB:
  1260. case BPF_PROG_TYPE_CGROUP_DEVICE:
  1261. case BPF_PROG_TYPE_SK_MSG:
  1262. case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
  1263. case BPF_PROG_TYPE_LIRC_MODE2:
  1264. return false;
  1265. case BPF_PROG_TYPE_UNSPEC:
  1266. case BPF_PROG_TYPE_KPROBE:
  1267. case BPF_PROG_TYPE_TRACEPOINT:
  1268. case BPF_PROG_TYPE_PERF_EVENT:
  1269. case BPF_PROG_TYPE_RAW_TRACEPOINT:
  1270. default:
  1271. return true;
  1272. }
  1273. }
  1274. static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
  1275. {
  1276. if (needs_kver && obj->kern_version == 0) {
  1277. pr_warning("%s doesn't provide kernel version\n",
  1278. obj->path);
  1279. return -LIBBPF_ERRNO__KVERSION;
  1280. }
  1281. return 0;
  1282. }
  1283. static struct bpf_object *
  1284. __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
  1285. bool needs_kver)
  1286. {
  1287. struct bpf_object *obj;
  1288. int err;
  1289. if (elf_version(EV_CURRENT) == EV_NONE) {
  1290. pr_warning("failed to init libelf for %s\n", path);
  1291. return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
  1292. }
  1293. obj = bpf_object__new(path, obj_buf, obj_buf_sz);
  1294. if (IS_ERR(obj))
  1295. return obj;
  1296. CHECK_ERR(bpf_object__elf_init(obj), err, out);
  1297. CHECK_ERR(bpf_object__check_endianness(obj), err, out);
  1298. CHECK_ERR(bpf_object__elf_collect(obj), err, out);
  1299. CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
  1300. CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
  1301. bpf_object__elf_finish(obj);
  1302. return obj;
  1303. out:
  1304. bpf_object__close(obj);
  1305. return ERR_PTR(err);
  1306. }
  1307. struct bpf_object *bpf_object__open(const char *path)
  1308. {
  1309. /* param validation */
  1310. if (!path)
  1311. return NULL;
  1312. pr_debug("loading %s\n", path);
  1313. return __bpf_object__open(path, NULL, 0, true);
  1314. }
  1315. struct bpf_object *bpf_object__open_buffer(void *obj_buf,
  1316. size_t obj_buf_sz,
  1317. const char *name)
  1318. {
  1319. char tmp_name[64];
  1320. /* param validation */
  1321. if (!obj_buf || obj_buf_sz <= 0)
  1322. return NULL;
  1323. if (!name) {
  1324. snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
  1325. (unsigned long)obj_buf,
  1326. (unsigned long)obj_buf_sz);
  1327. tmp_name[sizeof(tmp_name) - 1] = '\0';
  1328. name = tmp_name;
  1329. }
  1330. pr_debug("loading object '%s' from buffer\n",
  1331. name);
  1332. return __bpf_object__open(name, obj_buf, obj_buf_sz, true);
  1333. }
  1334. int bpf_object__unload(struct bpf_object *obj)
  1335. {
  1336. size_t i;
  1337. if (!obj)
  1338. return -EINVAL;
  1339. for (i = 0; i < obj->nr_maps; i++)
  1340. zclose(obj->maps[i].fd);
  1341. for (i = 0; i < obj->nr_programs; i++)
  1342. bpf_program__unload(&obj->programs[i]);
  1343. return 0;
  1344. }
  1345. int bpf_object__load(struct bpf_object *obj)
  1346. {
  1347. int err;
  1348. if (!obj)
  1349. return -EINVAL;
  1350. if (obj->loaded) {
  1351. pr_warning("object should not be loaded twice\n");
  1352. return -EINVAL;
  1353. }
  1354. obj->loaded = true;
  1355. CHECK_ERR(bpf_object__create_maps(obj), err, out);
  1356. CHECK_ERR(bpf_object__relocate(obj), err, out);
  1357. CHECK_ERR(bpf_object__load_progs(obj), err, out);
  1358. return 0;
  1359. out:
  1360. bpf_object__unload(obj);
  1361. pr_warning("failed to load object '%s'\n", obj->path);
  1362. return err;
  1363. }
  1364. static int check_path(const char *path)
  1365. {
  1366. struct statfs st_fs;
  1367. char *dname, *dir;
  1368. int err = 0;
  1369. if (path == NULL)
  1370. return -EINVAL;
  1371. dname = strdup(path);
  1372. if (dname == NULL)
  1373. return -ENOMEM;
  1374. dir = dirname(dname);
  1375. if (statfs(dir, &st_fs)) {
  1376. pr_warning("failed to statfs %s: %s\n", dir, strerror(errno));
  1377. err = -errno;
  1378. }
  1379. free(dname);
  1380. if (!err && st_fs.f_type != BPF_FS_MAGIC) {
  1381. pr_warning("specified path %s is not on BPF FS\n", path);
  1382. err = -EINVAL;
  1383. }
  1384. return err;
  1385. }
  1386. int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
  1387. int instance)
  1388. {
  1389. int err;
  1390. err = check_path(path);
  1391. if (err)
  1392. return err;
  1393. if (prog == NULL) {
  1394. pr_warning("invalid program pointer\n");
  1395. return -EINVAL;
  1396. }
  1397. if (instance < 0 || instance >= prog->instances.nr) {
  1398. pr_warning("invalid prog instance %d of prog %s (max %d)\n",
  1399. instance, prog->section_name, prog->instances.nr);
  1400. return -EINVAL;
  1401. }
  1402. if (bpf_obj_pin(prog->instances.fds[instance], path)) {
  1403. pr_warning("failed to pin program: %s\n", strerror(errno));
  1404. return -errno;
  1405. }
  1406. pr_debug("pinned program '%s'\n", path);
  1407. return 0;
  1408. }
  1409. static int make_dir(const char *path)
  1410. {
  1411. int err = 0;
  1412. if (mkdir(path, 0700) && errno != EEXIST)
  1413. err = -errno;
  1414. if (err)
  1415. pr_warning("failed to mkdir %s: %s\n", path, strerror(-err));
  1416. return err;
  1417. }
  1418. int bpf_program__pin(struct bpf_program *prog, const char *path)
  1419. {
  1420. int i, err;
  1421. err = check_path(path);
  1422. if (err)
  1423. return err;
  1424. if (prog == NULL) {
  1425. pr_warning("invalid program pointer\n");
  1426. return -EINVAL;
  1427. }
  1428. if (prog->instances.nr <= 0) {
  1429. pr_warning("no instances of prog %s to pin\n",
  1430. prog->section_name);
  1431. return -EINVAL;
  1432. }
  1433. err = make_dir(path);
  1434. if (err)
  1435. return err;
  1436. for (i = 0; i < prog->instances.nr; i++) {
  1437. char buf[PATH_MAX];
  1438. int len;
  1439. len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
  1440. if (len < 0)
  1441. return -EINVAL;
  1442. else if (len >= PATH_MAX)
  1443. return -ENAMETOOLONG;
  1444. err = bpf_program__pin_instance(prog, buf, i);
  1445. if (err)
  1446. return err;
  1447. }
  1448. return 0;
  1449. }
  1450. int bpf_map__pin(struct bpf_map *map, const char *path)
  1451. {
  1452. int err;
  1453. err = check_path(path);
  1454. if (err)
  1455. return err;
  1456. if (map == NULL) {
  1457. pr_warning("invalid map pointer\n");
  1458. return -EINVAL;
  1459. }
  1460. if (bpf_obj_pin(map->fd, path)) {
  1461. pr_warning("failed to pin map: %s\n", strerror(errno));
  1462. return -errno;
  1463. }
  1464. pr_debug("pinned map '%s'\n", path);
  1465. return 0;
  1466. }
  1467. int bpf_object__pin(struct bpf_object *obj, const char *path)
  1468. {
  1469. struct bpf_program *prog;
  1470. struct bpf_map *map;
  1471. int err;
  1472. if (!obj)
  1473. return -ENOENT;
  1474. if (!obj->loaded) {
  1475. pr_warning("object not yet loaded; load it first\n");
  1476. return -ENOENT;
  1477. }
  1478. err = make_dir(path);
  1479. if (err)
  1480. return err;
  1481. bpf_map__for_each(map, obj) {
  1482. char buf[PATH_MAX];
  1483. int len;
  1484. len = snprintf(buf, PATH_MAX, "%s/%s", path,
  1485. bpf_map__name(map));
  1486. if (len < 0)
  1487. return -EINVAL;
  1488. else if (len >= PATH_MAX)
  1489. return -ENAMETOOLONG;
  1490. err = bpf_map__pin(map, buf);
  1491. if (err)
  1492. return err;
  1493. }
  1494. bpf_object__for_each_program(prog, obj) {
  1495. char buf[PATH_MAX];
  1496. int len;
  1497. len = snprintf(buf, PATH_MAX, "%s/%s", path,
  1498. prog->section_name);
  1499. if (len < 0)
  1500. return -EINVAL;
  1501. else if (len >= PATH_MAX)
  1502. return -ENAMETOOLONG;
  1503. err = bpf_program__pin(prog, buf);
  1504. if (err)
  1505. return err;
  1506. }
  1507. return 0;
  1508. }
  1509. void bpf_object__close(struct bpf_object *obj)
  1510. {
  1511. size_t i;
  1512. if (!obj)
  1513. return;
  1514. if (obj->clear_priv)
  1515. obj->clear_priv(obj, obj->priv);
  1516. bpf_object__elf_finish(obj);
  1517. bpf_object__unload(obj);
  1518. btf__free(obj->btf);
  1519. for (i = 0; i < obj->nr_maps; i++) {
  1520. zfree(&obj->maps[i].name);
  1521. if (obj->maps[i].clear_priv)
  1522. obj->maps[i].clear_priv(&obj->maps[i],
  1523. obj->maps[i].priv);
  1524. obj->maps[i].priv = NULL;
  1525. obj->maps[i].clear_priv = NULL;
  1526. }
  1527. zfree(&obj->maps);
  1528. obj->nr_maps = 0;
  1529. if (obj->programs && obj->nr_programs) {
  1530. for (i = 0; i < obj->nr_programs; i++)
  1531. bpf_program__exit(&obj->programs[i]);
  1532. }
  1533. zfree(&obj->programs);
  1534. list_del(&obj->list);
  1535. free(obj);
  1536. }
  1537. struct bpf_object *
  1538. bpf_object__next(struct bpf_object *prev)
  1539. {
  1540. struct bpf_object *next;
  1541. if (!prev)
  1542. next = list_first_entry(&bpf_objects_list,
  1543. struct bpf_object,
  1544. list);
  1545. else
  1546. next = list_next_entry(prev, list);
  1547. /* Empty list is noticed here so don't need checking on entry. */
  1548. if (&next->list == &bpf_objects_list)
  1549. return NULL;
  1550. return next;
  1551. }
  1552. const char *bpf_object__name(struct bpf_object *obj)
  1553. {
  1554. return obj ? obj->path : ERR_PTR(-EINVAL);
  1555. }
  1556. unsigned int bpf_object__kversion(struct bpf_object *obj)
  1557. {
  1558. return obj ? obj->kern_version : 0;
  1559. }
  1560. int bpf_object__btf_fd(const struct bpf_object *obj)
  1561. {
  1562. return obj->btf ? btf__fd(obj->btf) : -1;
  1563. }
  1564. int bpf_object__set_priv(struct bpf_object *obj, void *priv,
  1565. bpf_object_clear_priv_t clear_priv)
  1566. {
  1567. if (obj->priv && obj->clear_priv)
  1568. obj->clear_priv(obj, obj->priv);
  1569. obj->priv = priv;
  1570. obj->clear_priv = clear_priv;
  1571. return 0;
  1572. }
  1573. void *bpf_object__priv(struct bpf_object *obj)
  1574. {
  1575. return obj ? obj->priv : ERR_PTR(-EINVAL);
  1576. }
  1577. static struct bpf_program *
  1578. __bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
  1579. {
  1580. size_t idx;
  1581. if (!obj->programs)
  1582. return NULL;
  1583. /* First handler */
  1584. if (prev == NULL)
  1585. return &obj->programs[0];
  1586. if (prev->obj != obj) {
  1587. pr_warning("error: program handler doesn't match object\n");
  1588. return NULL;
  1589. }
  1590. idx = (prev - obj->programs) + 1;
  1591. if (idx >= obj->nr_programs)
  1592. return NULL;
  1593. return &obj->programs[idx];
  1594. }
  1595. struct bpf_program *
  1596. bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
  1597. {
  1598. struct bpf_program *prog = prev;
  1599. do {
  1600. prog = __bpf_program__next(prog, obj);
  1601. } while (prog && bpf_program__is_function_storage(prog, obj));
  1602. return prog;
  1603. }
  1604. int bpf_program__set_priv(struct bpf_program *prog, void *priv,
  1605. bpf_program_clear_priv_t clear_priv)
  1606. {
  1607. if (prog->priv && prog->clear_priv)
  1608. prog->clear_priv(prog, prog->priv);
  1609. prog->priv = priv;
  1610. prog->clear_priv = clear_priv;
  1611. return 0;
  1612. }
  1613. void *bpf_program__priv(struct bpf_program *prog)
  1614. {
  1615. return prog ? prog->priv : ERR_PTR(-EINVAL);
  1616. }
  1617. void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
  1618. {
  1619. prog->prog_ifindex = ifindex;
  1620. }
  1621. const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
  1622. {
  1623. const char *title;
  1624. title = prog->section_name;
  1625. if (needs_copy) {
  1626. title = strdup(title);
  1627. if (!title) {
  1628. pr_warning("failed to strdup program title\n");
  1629. return ERR_PTR(-ENOMEM);
  1630. }
  1631. }
  1632. return title;
  1633. }
  1634. int bpf_program__fd(struct bpf_program *prog)
  1635. {
  1636. return bpf_program__nth_fd(prog, 0);
  1637. }
  1638. int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
  1639. bpf_program_prep_t prep)
  1640. {
  1641. int *instances_fds;
  1642. if (nr_instances <= 0 || !prep)
  1643. return -EINVAL;
  1644. if (prog->instances.nr > 0 || prog->instances.fds) {
  1645. pr_warning("Can't set pre-processor after loading\n");
  1646. return -EINVAL;
  1647. }
  1648. instances_fds = malloc(sizeof(int) * nr_instances);
  1649. if (!instances_fds) {
  1650. pr_warning("alloc memory failed for fds\n");
  1651. return -ENOMEM;
  1652. }
  1653. /* fill all fd with -1 */
  1654. memset(instances_fds, -1, sizeof(int) * nr_instances);
  1655. prog->instances.nr = nr_instances;
  1656. prog->instances.fds = instances_fds;
  1657. prog->preprocessor = prep;
  1658. return 0;
  1659. }
  1660. int bpf_program__nth_fd(struct bpf_program *prog, int n)
  1661. {
  1662. int fd;
  1663. if (n >= prog->instances.nr || n < 0) {
  1664. pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
  1665. n, prog->section_name, prog->instances.nr);
  1666. return -EINVAL;
  1667. }
  1668. fd = prog->instances.fds[n];
  1669. if (fd < 0) {
  1670. pr_warning("%dth instance of program '%s' is invalid\n",
  1671. n, prog->section_name);
  1672. return -ENOENT;
  1673. }
  1674. return fd;
  1675. }
  1676. void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
  1677. {
  1678. prog->type = type;
  1679. }
  1680. static bool bpf_program__is_type(struct bpf_program *prog,
  1681. enum bpf_prog_type type)
  1682. {
  1683. return prog ? (prog->type == type) : false;
  1684. }
  1685. #define BPF_PROG_TYPE_FNS(NAME, TYPE) \
  1686. int bpf_program__set_##NAME(struct bpf_program *prog) \
  1687. { \
  1688. if (!prog) \
  1689. return -EINVAL; \
  1690. bpf_program__set_type(prog, TYPE); \
  1691. return 0; \
  1692. } \
  1693. \
  1694. bool bpf_program__is_##NAME(struct bpf_program *prog) \
  1695. { \
  1696. return bpf_program__is_type(prog, TYPE); \
  1697. } \
  1698. BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
  1699. BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
  1700. BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
  1701. BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
  1702. BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
  1703. BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
  1704. BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
  1705. BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
  1706. void bpf_program__set_expected_attach_type(struct bpf_program *prog,
  1707. enum bpf_attach_type type)
  1708. {
  1709. prog->expected_attach_type = type;
  1710. }
  1711. #define BPF_PROG_SEC_FULL(string, ptype, atype) \
  1712. { string, sizeof(string) - 1, ptype, atype }
  1713. #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_FULL(string, ptype, 0)
  1714. #define BPF_S_PROG_SEC(string, ptype) \
  1715. BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK, ptype)
  1716. #define BPF_SA_PROG_SEC(string, ptype) \
  1717. BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, ptype)
  1718. static const struct {
  1719. const char *sec;
  1720. size_t len;
  1721. enum bpf_prog_type prog_type;
  1722. enum bpf_attach_type expected_attach_type;
  1723. } section_names[] = {
  1724. BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
  1725. BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
  1726. BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
  1727. BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
  1728. BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
  1729. BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
  1730. BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
  1731. BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
  1732. BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
  1733. BPF_PROG_SEC("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
  1734. BPF_PROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK),
  1735. BPF_PROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE),
  1736. BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
  1737. BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
  1738. BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
  1739. BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
  1740. BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS),
  1741. BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB),
  1742. BPF_PROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG),
  1743. BPF_PROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2),
  1744. BPF_SA_PROG_SEC("cgroup/bind4", BPF_CGROUP_INET4_BIND),
  1745. BPF_SA_PROG_SEC("cgroup/bind6", BPF_CGROUP_INET6_BIND),
  1746. BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT),
  1747. BPF_SA_PROG_SEC("cgroup/connect6", BPF_CGROUP_INET6_CONNECT),
  1748. BPF_SA_PROG_SEC("cgroup/sendmsg4", BPF_CGROUP_UDP4_SENDMSG),
  1749. BPF_SA_PROG_SEC("cgroup/sendmsg6", BPF_CGROUP_UDP6_SENDMSG),
  1750. BPF_S_PROG_SEC("cgroup/post_bind4", BPF_CGROUP_INET4_POST_BIND),
  1751. BPF_S_PROG_SEC("cgroup/post_bind6", BPF_CGROUP_INET6_POST_BIND),
  1752. };
  1753. #undef BPF_PROG_SEC
  1754. #undef BPF_PROG_SEC_FULL
  1755. #undef BPF_S_PROG_SEC
  1756. #undef BPF_SA_PROG_SEC
  1757. int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
  1758. enum bpf_attach_type *expected_attach_type)
  1759. {
  1760. int i;
  1761. if (!name)
  1762. return -EINVAL;
  1763. for (i = 0; i < ARRAY_SIZE(section_names); i++) {
  1764. if (strncmp(name, section_names[i].sec, section_names[i].len))
  1765. continue;
  1766. *prog_type = section_names[i].prog_type;
  1767. *expected_attach_type = section_names[i].expected_attach_type;
  1768. return 0;
  1769. }
  1770. return -EINVAL;
  1771. }
  1772. static int
  1773. bpf_program__identify_section(struct bpf_program *prog,
  1774. enum bpf_prog_type *prog_type,
  1775. enum bpf_attach_type *expected_attach_type)
  1776. {
  1777. return libbpf_prog_type_by_name(prog->section_name, prog_type,
  1778. expected_attach_type);
  1779. }
  1780. int bpf_map__fd(struct bpf_map *map)
  1781. {
  1782. return map ? map->fd : -EINVAL;
  1783. }
  1784. const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
  1785. {
  1786. return map ? &map->def : ERR_PTR(-EINVAL);
  1787. }
  1788. const char *bpf_map__name(struct bpf_map *map)
  1789. {
  1790. return map ? map->name : NULL;
  1791. }
  1792. uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map)
  1793. {
  1794. return map ? map->btf_key_type_id : 0;
  1795. }
  1796. uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map)
  1797. {
  1798. return map ? map->btf_value_type_id : 0;
  1799. }
  1800. int bpf_map__set_priv(struct bpf_map *map, void *priv,
  1801. bpf_map_clear_priv_t clear_priv)
  1802. {
  1803. if (!map)
  1804. return -EINVAL;
  1805. if (map->priv) {
  1806. if (map->clear_priv)
  1807. map->clear_priv(map, map->priv);
  1808. }
  1809. map->priv = priv;
  1810. map->clear_priv = clear_priv;
  1811. return 0;
  1812. }
  1813. void *bpf_map__priv(struct bpf_map *map)
  1814. {
  1815. return map ? map->priv : ERR_PTR(-EINVAL);
  1816. }
  1817. bool bpf_map__is_offload_neutral(struct bpf_map *map)
  1818. {
  1819. return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
  1820. }
  1821. void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
  1822. {
  1823. map->map_ifindex = ifindex;
  1824. }
  1825. struct bpf_map *
  1826. bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
  1827. {
  1828. size_t idx;
  1829. struct bpf_map *s, *e;
  1830. if (!obj || !obj->maps)
  1831. return NULL;
  1832. s = obj->maps;
  1833. e = obj->maps + obj->nr_maps;
  1834. if (prev == NULL)
  1835. return s;
  1836. if ((prev < s) || (prev >= e)) {
  1837. pr_warning("error in %s: map handler doesn't belong to object\n",
  1838. __func__);
  1839. return NULL;
  1840. }
  1841. idx = (prev - obj->maps) + 1;
  1842. if (idx >= obj->nr_maps)
  1843. return NULL;
  1844. return &obj->maps[idx];
  1845. }
  1846. struct bpf_map *
  1847. bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
  1848. {
  1849. struct bpf_map *pos;
  1850. bpf_map__for_each(pos, obj) {
  1851. if (pos->name && !strcmp(pos->name, name))
  1852. return pos;
  1853. }
  1854. return NULL;
  1855. }
  1856. struct bpf_map *
  1857. bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
  1858. {
  1859. int i;
  1860. for (i = 0; i < obj->nr_maps; i++) {
  1861. if (obj->maps[i].offset == offset)
  1862. return &obj->maps[i];
  1863. }
  1864. return ERR_PTR(-ENOENT);
  1865. }
  1866. long libbpf_get_error(const void *ptr)
  1867. {
  1868. if (IS_ERR(ptr))
  1869. return PTR_ERR(ptr);
  1870. return 0;
  1871. }
  1872. int bpf_prog_load(const char *file, enum bpf_prog_type type,
  1873. struct bpf_object **pobj, int *prog_fd)
  1874. {
  1875. struct bpf_prog_load_attr attr;
  1876. memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
  1877. attr.file = file;
  1878. attr.prog_type = type;
  1879. attr.expected_attach_type = 0;
  1880. return bpf_prog_load_xattr(&attr, pobj, prog_fd);
  1881. }
  1882. int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
  1883. struct bpf_object **pobj, int *prog_fd)
  1884. {
  1885. struct bpf_program *prog, *first_prog = NULL;
  1886. enum bpf_attach_type expected_attach_type;
  1887. enum bpf_prog_type prog_type;
  1888. struct bpf_object *obj;
  1889. struct bpf_map *map;
  1890. int err;
  1891. if (!attr)
  1892. return -EINVAL;
  1893. if (!attr->file)
  1894. return -EINVAL;
  1895. obj = __bpf_object__open(attr->file, NULL, 0,
  1896. bpf_prog_type__needs_kver(attr->prog_type));
  1897. if (IS_ERR_OR_NULL(obj))
  1898. return -ENOENT;
  1899. bpf_object__for_each_program(prog, obj) {
  1900. /*
  1901. * If type is not specified, try to guess it based on
  1902. * section name.
  1903. */
  1904. prog_type = attr->prog_type;
  1905. prog->prog_ifindex = attr->ifindex;
  1906. expected_attach_type = attr->expected_attach_type;
  1907. if (prog_type == BPF_PROG_TYPE_UNSPEC) {
  1908. err = bpf_program__identify_section(prog, &prog_type,
  1909. &expected_attach_type);
  1910. if (err < 0) {
  1911. pr_warning("failed to guess program type based on section name %s\n",
  1912. prog->section_name);
  1913. bpf_object__close(obj);
  1914. return -EINVAL;
  1915. }
  1916. }
  1917. bpf_program__set_type(prog, prog_type);
  1918. bpf_program__set_expected_attach_type(prog,
  1919. expected_attach_type);
  1920. if (!bpf_program__is_function_storage(prog, obj) && !first_prog)
  1921. first_prog = prog;
  1922. }
  1923. bpf_map__for_each(map, obj) {
  1924. if (!bpf_map__is_offload_neutral(map))
  1925. map->map_ifindex = attr->ifindex;
  1926. }
  1927. if (!first_prog) {
  1928. pr_warning("object file doesn't contain bpf program\n");
  1929. bpf_object__close(obj);
  1930. return -ENOENT;
  1931. }
  1932. err = bpf_object__load(obj);
  1933. if (err) {
  1934. bpf_object__close(obj);
  1935. return -EINVAL;
  1936. }
  1937. *pobj = obj;
  1938. *prog_fd = bpf_program__fd(first_prog);
  1939. return 0;
  1940. }
  1941. enum bpf_perf_event_ret
  1942. bpf_perf_event_read_simple(void *mem, unsigned long size,
  1943. unsigned long page_size, void **buf, size_t *buf_len,
  1944. bpf_perf_event_print_t fn, void *priv)
  1945. {
  1946. volatile struct perf_event_mmap_page *header = mem;
  1947. __u64 data_tail = header->data_tail;
  1948. __u64 data_head = header->data_head;
  1949. void *base, *begin, *end;
  1950. int ret;
  1951. asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
  1952. if (data_head == data_tail)
  1953. return LIBBPF_PERF_EVENT_CONT;
  1954. base = ((char *)header) + page_size;
  1955. begin = base + data_tail % size;
  1956. end = base + data_head % size;
  1957. while (begin != end) {
  1958. struct perf_event_header *ehdr;
  1959. ehdr = begin;
  1960. if (begin + ehdr->size > base + size) {
  1961. long len = base + size - begin;
  1962. if (*buf_len < ehdr->size) {
  1963. free(*buf);
  1964. *buf = malloc(ehdr->size);
  1965. if (!*buf) {
  1966. ret = LIBBPF_PERF_EVENT_ERROR;
  1967. break;
  1968. }
  1969. *buf_len = ehdr->size;
  1970. }
  1971. memcpy(*buf, begin, len);
  1972. memcpy(*buf + len, base, ehdr->size - len);
  1973. ehdr = (void *)*buf;
  1974. begin = base + ehdr->size - len;
  1975. } else if (begin + ehdr->size == base + size) {
  1976. begin = base;
  1977. } else {
  1978. begin += ehdr->size;
  1979. }
  1980. ret = fn(ehdr, priv);
  1981. if (ret != LIBBPF_PERF_EVENT_CONT)
  1982. break;
  1983. data_tail += ehdr->size;
  1984. }
  1985. __sync_synchronize(); /* smp_mb() */
  1986. header->data_tail = data_tail;
  1987. return ret;
  1988. }