bpf-loader.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875
  1. /*
  2. * bpf-loader.c
  3. *
  4. * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  5. * Copyright (C) 2015 Huawei Inc.
  6. */
  7. #include <linux/bpf.h>
  8. #include <bpf/libbpf.h>
  9. #include <linux/err.h>
  10. #include <linux/string.h>
  11. #include "perf.h"
  12. #include "debug.h"
  13. #include "bpf-loader.h"
  14. #include "bpf-prologue.h"
  15. #include "llvm-utils.h"
  16. #include "probe-event.h"
  17. #include "probe-finder.h" // for MAX_PROBES
  18. #include "llvm-utils.h"
  19. #define DEFINE_PRINT_FN(name, level) \
  20. static int libbpf_##name(const char *fmt, ...) \
  21. { \
  22. va_list args; \
  23. int ret; \
  24. \
  25. va_start(args, fmt); \
  26. ret = veprintf(level, verbose, pr_fmt(fmt), args);\
  27. va_end(args); \
  28. return ret; \
  29. }
  30. DEFINE_PRINT_FN(warning, 1)
  31. DEFINE_PRINT_FN(info, 1)
  32. DEFINE_PRINT_FN(debug, 1)
  33. struct bpf_prog_priv {
  34. struct perf_probe_event pev;
  35. bool need_prologue;
  36. struct bpf_insn *insns_buf;
  37. int nr_types;
  38. int *type_mapping;
  39. };
  40. static bool libbpf_initialized;
  41. struct bpf_object *
  42. bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
  43. {
  44. struct bpf_object *obj;
  45. if (!libbpf_initialized) {
  46. libbpf_set_print(libbpf_warning,
  47. libbpf_info,
  48. libbpf_debug);
  49. libbpf_initialized = true;
  50. }
  51. obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
  52. if (IS_ERR(obj)) {
  53. pr_debug("bpf: failed to load buffer\n");
  54. return ERR_PTR(-EINVAL);
  55. }
  56. return obj;
  57. }
  58. struct bpf_object *bpf__prepare_load(const char *filename, bool source)
  59. {
  60. struct bpf_object *obj;
  61. if (!libbpf_initialized) {
  62. libbpf_set_print(libbpf_warning,
  63. libbpf_info,
  64. libbpf_debug);
  65. libbpf_initialized = true;
  66. }
  67. if (source) {
  68. int err;
  69. void *obj_buf;
  70. size_t obj_buf_sz;
  71. err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
  72. if (err)
  73. return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
  74. obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
  75. free(obj_buf);
  76. } else
  77. obj = bpf_object__open(filename);
  78. if (IS_ERR(obj)) {
  79. pr_debug("bpf: failed to load %s\n", filename);
  80. return obj;
  81. }
  82. return obj;
  83. }
  84. void bpf__clear(void)
  85. {
  86. struct bpf_object *obj, *tmp;
  87. bpf_object__for_each_safe(obj, tmp) {
  88. bpf__unprobe(obj);
  89. bpf_object__close(obj);
  90. }
  91. }
  92. static void
  93. bpf_prog_priv__clear(struct bpf_program *prog __maybe_unused,
  94. void *_priv)
  95. {
  96. struct bpf_prog_priv *priv = _priv;
  97. cleanup_perf_probe_events(&priv->pev, 1);
  98. zfree(&priv->insns_buf);
  99. zfree(&priv->type_mapping);
  100. free(priv);
  101. }
  102. static int
  103. config__exec(const char *value, struct perf_probe_event *pev)
  104. {
  105. pev->uprobes = true;
  106. pev->target = strdup(value);
  107. if (!pev->target)
  108. return -ENOMEM;
  109. return 0;
  110. }
  111. static int
  112. config__module(const char *value, struct perf_probe_event *pev)
  113. {
  114. pev->uprobes = false;
  115. pev->target = strdup(value);
  116. if (!pev->target)
  117. return -ENOMEM;
  118. return 0;
  119. }
  120. static int
  121. config__bool(const char *value,
  122. bool *pbool, bool invert)
  123. {
  124. int err;
  125. bool bool_value;
  126. if (!pbool)
  127. return -EINVAL;
  128. err = strtobool(value, &bool_value);
  129. if (err)
  130. return err;
  131. *pbool = invert ? !bool_value : bool_value;
  132. return 0;
  133. }
  134. static int
  135. config__inlines(const char *value,
  136. struct perf_probe_event *pev __maybe_unused)
  137. {
  138. return config__bool(value, &probe_conf.no_inlines, true);
  139. }
  140. static int
  141. config__force(const char *value,
  142. struct perf_probe_event *pev __maybe_unused)
  143. {
  144. return config__bool(value, &probe_conf.force_add, false);
  145. }
  146. static struct {
  147. const char *key;
  148. const char *usage;
  149. const char *desc;
  150. int (*func)(const char *, struct perf_probe_event *);
  151. } bpf_config_terms[] = {
  152. {
  153. .key = "exec",
  154. .usage = "exec=<full path of file>",
  155. .desc = "Set uprobe target",
  156. .func = config__exec,
  157. },
  158. {
  159. .key = "module",
  160. .usage = "module=<module name> ",
  161. .desc = "Set kprobe module",
  162. .func = config__module,
  163. },
  164. {
  165. .key = "inlines",
  166. .usage = "inlines=[yes|no] ",
  167. .desc = "Probe at inline symbol",
  168. .func = config__inlines,
  169. },
  170. {
  171. .key = "force",
  172. .usage = "force=[yes|no] ",
  173. .desc = "Forcibly add events with existing name",
  174. .func = config__force,
  175. },
  176. };
  177. static int
  178. do_config(const char *key, const char *value,
  179. struct perf_probe_event *pev)
  180. {
  181. unsigned int i;
  182. pr_debug("config bpf program: %s=%s\n", key, value);
  183. for (i = 0; i < ARRAY_SIZE(bpf_config_terms); i++)
  184. if (strcmp(key, bpf_config_terms[i].key) == 0)
  185. return bpf_config_terms[i].func(value, pev);
  186. pr_debug("BPF: ERROR: invalid config option in object: %s=%s\n",
  187. key, value);
  188. pr_debug("\nHint: Currently valid options are:\n");
  189. for (i = 0; i < ARRAY_SIZE(bpf_config_terms); i++)
  190. pr_debug("\t%s:\t%s\n", bpf_config_terms[i].usage,
  191. bpf_config_terms[i].desc);
  192. pr_debug("\n");
  193. return -BPF_LOADER_ERRNO__CONFIG_TERM;
  194. }
  195. static const char *
  196. parse_config_kvpair(const char *config_str, struct perf_probe_event *pev)
  197. {
  198. char *text = strdup(config_str);
  199. char *sep, *line;
  200. const char *main_str = NULL;
  201. int err = 0;
  202. if (!text) {
  203. pr_debug("No enough memory: dup config_str failed\n");
  204. return ERR_PTR(-ENOMEM);
  205. }
  206. line = text;
  207. while ((sep = strchr(line, ';'))) {
  208. char *equ;
  209. *sep = '\0';
  210. equ = strchr(line, '=');
  211. if (!equ) {
  212. pr_warning("WARNING: invalid config in BPF object: %s\n",
  213. line);
  214. pr_warning("\tShould be 'key=value'.\n");
  215. goto nextline;
  216. }
  217. *equ = '\0';
  218. err = do_config(line, equ + 1, pev);
  219. if (err)
  220. break;
  221. nextline:
  222. line = sep + 1;
  223. }
  224. if (!err)
  225. main_str = config_str + (line - text);
  226. free(text);
  227. return err ? ERR_PTR(err) : main_str;
  228. }
  229. static int
  230. parse_config(const char *config_str, struct perf_probe_event *pev)
  231. {
  232. int err;
  233. const char *main_str = parse_config_kvpair(config_str, pev);
  234. if (IS_ERR(main_str))
  235. return PTR_ERR(main_str);
  236. err = parse_perf_probe_command(main_str, pev);
  237. if (err < 0) {
  238. pr_debug("bpf: '%s' is not a valid config string\n",
  239. config_str);
  240. /* parse failed, don't need clear pev. */
  241. return -BPF_LOADER_ERRNO__CONFIG;
  242. }
  243. return 0;
  244. }
  245. static int
  246. config_bpf_program(struct bpf_program *prog)
  247. {
  248. struct perf_probe_event *pev = NULL;
  249. struct bpf_prog_priv *priv = NULL;
  250. const char *config_str;
  251. int err;
  252. /* Initialize per-program probing setting */
  253. probe_conf.no_inlines = false;
  254. probe_conf.force_add = false;
  255. config_str = bpf_program__title(prog, false);
  256. if (IS_ERR(config_str)) {
  257. pr_debug("bpf: unable to get title for program\n");
  258. return PTR_ERR(config_str);
  259. }
  260. priv = calloc(sizeof(*priv), 1);
  261. if (!priv) {
  262. pr_debug("bpf: failed to alloc priv\n");
  263. return -ENOMEM;
  264. }
  265. pev = &priv->pev;
  266. pr_debug("bpf: config program '%s'\n", config_str);
  267. err = parse_config(config_str, pev);
  268. if (err)
  269. goto errout;
  270. if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
  271. pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
  272. config_str, PERF_BPF_PROBE_GROUP);
  273. err = -BPF_LOADER_ERRNO__GROUP;
  274. goto errout;
  275. } else if (!pev->group)
  276. pev->group = strdup(PERF_BPF_PROBE_GROUP);
  277. if (!pev->group) {
  278. pr_debug("bpf: strdup failed\n");
  279. err = -ENOMEM;
  280. goto errout;
  281. }
  282. if (!pev->event) {
  283. pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
  284. config_str);
  285. err = -BPF_LOADER_ERRNO__EVENTNAME;
  286. goto errout;
  287. }
  288. pr_debug("bpf: config '%s' is ok\n", config_str);
  289. err = bpf_program__set_private(prog, priv, bpf_prog_priv__clear);
  290. if (err) {
  291. pr_debug("Failed to set priv for program '%s'\n", config_str);
  292. goto errout;
  293. }
  294. return 0;
  295. errout:
  296. if (pev)
  297. clear_perf_probe_event(pev);
  298. free(priv);
  299. return err;
  300. }
  301. static int bpf__prepare_probe(void)
  302. {
  303. static int err = 0;
  304. static bool initialized = false;
  305. /*
  306. * Make err static, so if init failed the first, bpf__prepare_probe()
  307. * fails each time without calling init_probe_symbol_maps multiple
  308. * times.
  309. */
  310. if (initialized)
  311. return err;
  312. initialized = true;
  313. err = init_probe_symbol_maps(false);
  314. if (err < 0)
  315. pr_debug("Failed to init_probe_symbol_maps\n");
  316. probe_conf.max_probes = MAX_PROBES;
  317. return err;
  318. }
  319. static int
  320. preproc_gen_prologue(struct bpf_program *prog, int n,
  321. struct bpf_insn *orig_insns, int orig_insns_cnt,
  322. struct bpf_prog_prep_result *res)
  323. {
  324. struct probe_trace_event *tev;
  325. struct perf_probe_event *pev;
  326. struct bpf_prog_priv *priv;
  327. struct bpf_insn *buf;
  328. size_t prologue_cnt = 0;
  329. int i, err;
  330. err = bpf_program__get_private(prog, (void **)&priv);
  331. if (err || !priv)
  332. goto errout;
  333. pev = &priv->pev;
  334. if (n < 0 || n >= priv->nr_types)
  335. goto errout;
  336. /* Find a tev belongs to that type */
  337. for (i = 0; i < pev->ntevs; i++) {
  338. if (priv->type_mapping[i] == n)
  339. break;
  340. }
  341. if (i >= pev->ntevs) {
  342. pr_debug("Internal error: prologue type %d not found\n", n);
  343. return -BPF_LOADER_ERRNO__PROLOGUE;
  344. }
  345. tev = &pev->tevs[i];
  346. buf = priv->insns_buf;
  347. err = bpf__gen_prologue(tev->args, tev->nargs,
  348. buf, &prologue_cnt,
  349. BPF_MAXINSNS - orig_insns_cnt);
  350. if (err) {
  351. const char *title;
  352. title = bpf_program__title(prog, false);
  353. if (!title)
  354. title = "[unknown]";
  355. pr_debug("Failed to generate prologue for program %s\n",
  356. title);
  357. return err;
  358. }
  359. memcpy(&buf[prologue_cnt], orig_insns,
  360. sizeof(struct bpf_insn) * orig_insns_cnt);
  361. res->new_insn_ptr = buf;
  362. res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
  363. res->pfd = NULL;
  364. return 0;
  365. errout:
  366. pr_debug("Internal error in preproc_gen_prologue\n");
  367. return -BPF_LOADER_ERRNO__PROLOGUE;
  368. }
  369. /*
  370. * compare_tev_args is reflexive, transitive and antisymmetric.
  371. * I can proof it but this margin is too narrow to contain.
  372. */
  373. static int compare_tev_args(const void *ptev1, const void *ptev2)
  374. {
  375. int i, ret;
  376. const struct probe_trace_event *tev1 =
  377. *(const struct probe_trace_event **)ptev1;
  378. const struct probe_trace_event *tev2 =
  379. *(const struct probe_trace_event **)ptev2;
  380. ret = tev2->nargs - tev1->nargs;
  381. if (ret)
  382. return ret;
  383. for (i = 0; i < tev1->nargs; i++) {
  384. struct probe_trace_arg *arg1, *arg2;
  385. struct probe_trace_arg_ref *ref1, *ref2;
  386. arg1 = &tev1->args[i];
  387. arg2 = &tev2->args[i];
  388. ret = strcmp(arg1->value, arg2->value);
  389. if (ret)
  390. return ret;
  391. ref1 = arg1->ref;
  392. ref2 = arg2->ref;
  393. while (ref1 && ref2) {
  394. ret = ref2->offset - ref1->offset;
  395. if (ret)
  396. return ret;
  397. ref1 = ref1->next;
  398. ref2 = ref2->next;
  399. }
  400. if (ref1 || ref2)
  401. return ref2 ? 1 : -1;
  402. }
  403. return 0;
  404. }
  405. /*
  406. * Assign a type number to each tevs in a pev.
  407. * mapping is an array with same slots as tevs in that pev.
  408. * nr_types will be set to number of types.
  409. */
  410. static int map_prologue(struct perf_probe_event *pev, int *mapping,
  411. int *nr_types)
  412. {
  413. int i, type = 0;
  414. struct probe_trace_event **ptevs;
  415. size_t array_sz = sizeof(*ptevs) * pev->ntevs;
  416. ptevs = malloc(array_sz);
  417. if (!ptevs) {
  418. pr_debug("No ehough memory: alloc ptevs failed\n");
  419. return -ENOMEM;
  420. }
  421. pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
  422. for (i = 0; i < pev->ntevs; i++)
  423. ptevs[i] = &pev->tevs[i];
  424. qsort(ptevs, pev->ntevs, sizeof(*ptevs),
  425. compare_tev_args);
  426. for (i = 0; i < pev->ntevs; i++) {
  427. int n;
  428. n = ptevs[i] - pev->tevs;
  429. if (i == 0) {
  430. mapping[n] = type;
  431. pr_debug("mapping[%d]=%d\n", n, type);
  432. continue;
  433. }
  434. if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
  435. mapping[n] = type;
  436. else
  437. mapping[n] = ++type;
  438. pr_debug("mapping[%d]=%d\n", n, mapping[n]);
  439. }
  440. free(ptevs);
  441. *nr_types = type + 1;
  442. return 0;
  443. }
  444. static int hook_load_preprocessor(struct bpf_program *prog)
  445. {
  446. struct perf_probe_event *pev;
  447. struct bpf_prog_priv *priv;
  448. bool need_prologue = false;
  449. int err, i;
  450. err = bpf_program__get_private(prog, (void **)&priv);
  451. if (err || !priv) {
  452. pr_debug("Internal error when hook preprocessor\n");
  453. return -BPF_LOADER_ERRNO__INTERNAL;
  454. }
  455. pev = &priv->pev;
  456. for (i = 0; i < pev->ntevs; i++) {
  457. struct probe_trace_event *tev = &pev->tevs[i];
  458. if (tev->nargs > 0) {
  459. need_prologue = true;
  460. break;
  461. }
  462. }
  463. /*
  464. * Since all tevs don't have argument, we don't need generate
  465. * prologue.
  466. */
  467. if (!need_prologue) {
  468. priv->need_prologue = false;
  469. return 0;
  470. }
  471. priv->need_prologue = true;
  472. priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
  473. if (!priv->insns_buf) {
  474. pr_debug("No enough memory: alloc insns_buf failed\n");
  475. return -ENOMEM;
  476. }
  477. priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
  478. if (!priv->type_mapping) {
  479. pr_debug("No enough memory: alloc type_mapping failed\n");
  480. return -ENOMEM;
  481. }
  482. memset(priv->type_mapping, -1,
  483. sizeof(int) * pev->ntevs);
  484. err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
  485. if (err)
  486. return err;
  487. err = bpf_program__set_prep(prog, priv->nr_types,
  488. preproc_gen_prologue);
  489. return err;
  490. }
  491. int bpf__probe(struct bpf_object *obj)
  492. {
  493. int err = 0;
  494. struct bpf_program *prog;
  495. struct bpf_prog_priv *priv;
  496. struct perf_probe_event *pev;
  497. err = bpf__prepare_probe();
  498. if (err) {
  499. pr_debug("bpf__prepare_probe failed\n");
  500. return err;
  501. }
  502. bpf_object__for_each_program(prog, obj) {
  503. err = config_bpf_program(prog);
  504. if (err)
  505. goto out;
  506. err = bpf_program__get_private(prog, (void **)&priv);
  507. if (err || !priv)
  508. goto out;
  509. pev = &priv->pev;
  510. err = convert_perf_probe_events(pev, 1);
  511. if (err < 0) {
  512. pr_debug("bpf_probe: failed to convert perf probe events");
  513. goto out;
  514. }
  515. err = apply_perf_probe_events(pev, 1);
  516. if (err < 0) {
  517. pr_debug("bpf_probe: failed to apply perf probe events");
  518. goto out;
  519. }
  520. /*
  521. * After probing, let's consider prologue, which
  522. * adds program fetcher to BPF programs.
  523. *
  524. * hook_load_preprocessorr() hooks pre-processor
  525. * to bpf_program, let it generate prologue
  526. * dynamically during loading.
  527. */
  528. err = hook_load_preprocessor(prog);
  529. if (err)
  530. goto out;
  531. }
  532. out:
  533. return err < 0 ? err : 0;
  534. }
  535. #define EVENTS_WRITE_BUFSIZE 4096
  536. int bpf__unprobe(struct bpf_object *obj)
  537. {
  538. int err, ret = 0;
  539. struct bpf_program *prog;
  540. struct bpf_prog_priv *priv;
  541. bpf_object__for_each_program(prog, obj) {
  542. int i;
  543. err = bpf_program__get_private(prog, (void **)&priv);
  544. if (err || !priv)
  545. continue;
  546. for (i = 0; i < priv->pev.ntevs; i++) {
  547. struct probe_trace_event *tev = &priv->pev.tevs[i];
  548. char name_buf[EVENTS_WRITE_BUFSIZE];
  549. struct strfilter *delfilter;
  550. snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
  551. "%s:%s", tev->group, tev->event);
  552. name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
  553. delfilter = strfilter__new(name_buf, NULL);
  554. if (!delfilter) {
  555. pr_debug("Failed to create filter for unprobing\n");
  556. ret = -ENOMEM;
  557. continue;
  558. }
  559. err = del_perf_probe_events(delfilter);
  560. strfilter__delete(delfilter);
  561. if (err) {
  562. pr_debug("Failed to delete %s\n", name_buf);
  563. ret = err;
  564. continue;
  565. }
  566. }
  567. }
  568. return ret;
  569. }
  570. int bpf__load(struct bpf_object *obj)
  571. {
  572. int err;
  573. err = bpf_object__load(obj);
  574. if (err) {
  575. pr_debug("bpf: load objects failed\n");
  576. return err;
  577. }
  578. return 0;
  579. }
  580. int bpf__foreach_tev(struct bpf_object *obj,
  581. bpf_prog_iter_callback_t func,
  582. void *arg)
  583. {
  584. struct bpf_program *prog;
  585. int err;
  586. bpf_object__for_each_program(prog, obj) {
  587. struct probe_trace_event *tev;
  588. struct perf_probe_event *pev;
  589. struct bpf_prog_priv *priv;
  590. int i, fd;
  591. err = bpf_program__get_private(prog,
  592. (void **)&priv);
  593. if (err || !priv) {
  594. pr_debug("bpf: failed to get private field\n");
  595. return -BPF_LOADER_ERRNO__INTERNAL;
  596. }
  597. pev = &priv->pev;
  598. for (i = 0; i < pev->ntevs; i++) {
  599. tev = &pev->tevs[i];
  600. if (priv->need_prologue) {
  601. int type = priv->type_mapping[i];
  602. fd = bpf_program__nth_fd(prog, type);
  603. } else {
  604. fd = bpf_program__fd(prog);
  605. }
  606. if (fd < 0) {
  607. pr_debug("bpf: failed to get file descriptor\n");
  608. return fd;
  609. }
  610. err = (*func)(tev, fd, arg);
  611. if (err) {
  612. pr_debug("bpf: call back failed, stop iterate\n");
  613. return err;
  614. }
  615. }
  616. }
  617. return 0;
  618. }
  619. #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
  620. #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
  621. #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
  622. static const char *bpf_loader_strerror_table[NR_ERRNO] = {
  623. [ERRCODE_OFFSET(CONFIG)] = "Invalid config string",
  624. [ERRCODE_OFFSET(GROUP)] = "Invalid group name",
  625. [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
  626. [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
  627. [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
  628. [ERRCODE_OFFSET(CONFIG_TERM)] = "Invalid config term in config string",
  629. [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
  630. [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
  631. [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
  632. };
  633. static int
  634. bpf_loader_strerror(int err, char *buf, size_t size)
  635. {
  636. char sbuf[STRERR_BUFSIZE];
  637. const char *msg;
  638. if (!buf || !size)
  639. return -1;
  640. err = err > 0 ? err : -err;
  641. if (err >= __LIBBPF_ERRNO__START)
  642. return libbpf_strerror(err, buf, size);
  643. if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
  644. msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
  645. snprintf(buf, size, "%s", msg);
  646. buf[size - 1] = '\0';
  647. return 0;
  648. }
  649. if (err >= __BPF_LOADER_ERRNO__END)
  650. snprintf(buf, size, "Unknown bpf loader error %d", err);
  651. else
  652. snprintf(buf, size, "%s",
  653. strerror_r(err, sbuf, sizeof(sbuf)));
  654. buf[size - 1] = '\0';
  655. return -1;
  656. }
  657. #define bpf__strerror_head(err, buf, size) \
  658. char sbuf[STRERR_BUFSIZE], *emsg;\
  659. if (!size)\
  660. return 0;\
  661. if (err < 0)\
  662. err = -err;\
  663. bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
  664. emsg = sbuf;\
  665. switch (err) {\
  666. default:\
  667. scnprintf(buf, size, "%s", emsg);\
  668. break;
  669. #define bpf__strerror_entry(val, fmt...)\
  670. case val: {\
  671. scnprintf(buf, size, fmt);\
  672. break;\
  673. }
  674. #define bpf__strerror_end(buf, size)\
  675. }\
  676. buf[size - 1] = '\0';
  677. int bpf__strerror_prepare_load(const char *filename, bool source,
  678. int err, char *buf, size_t size)
  679. {
  680. size_t n;
  681. int ret;
  682. n = snprintf(buf, size, "Failed to load %s%s: ",
  683. filename, source ? " from source" : "");
  684. if (n >= size) {
  685. buf[size - 1] = '\0';
  686. return 0;
  687. }
  688. buf += n;
  689. size -= n;
  690. ret = bpf_loader_strerror(err, buf, size);
  691. buf[size - 1] = '\0';
  692. return ret;
  693. }
  694. int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
  695. int err, char *buf, size_t size)
  696. {
  697. bpf__strerror_head(err, buf, size);
  698. case BPF_LOADER_ERRNO__CONFIG_TERM: {
  699. scnprintf(buf, size, "%s (add -v to see detail)", emsg);
  700. break;
  701. }
  702. bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
  703. bpf__strerror_entry(EACCES, "You need to be root");
  704. bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
  705. bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
  706. bpf__strerror_end(buf, size);
  707. return 0;
  708. }
  709. int bpf__strerror_load(struct bpf_object *obj,
  710. int err, char *buf, size_t size)
  711. {
  712. bpf__strerror_head(err, buf, size);
  713. case LIBBPF_ERRNO__KVER: {
  714. unsigned int obj_kver = bpf_object__get_kversion(obj);
  715. unsigned int real_kver;
  716. if (fetch_kernel_version(&real_kver, NULL, 0)) {
  717. scnprintf(buf, size, "Unable to fetch kernel version");
  718. break;
  719. }
  720. if (obj_kver != real_kver) {
  721. scnprintf(buf, size,
  722. "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
  723. KVER_PARAM(obj_kver),
  724. KVER_PARAM(real_kver));
  725. break;
  726. }
  727. scnprintf(buf, size, "Failed to load program for unknown reason");
  728. break;
  729. }
  730. bpf__strerror_end(buf, size);
  731. return 0;
  732. }