bpf-loader.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783
  1. /*
  2. * bpf-loader.c
  3. *
  4. * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  5. * Copyright (C) 2015 Huawei Inc.
  6. */
  7. #include <linux/bpf.h>
  8. #include <bpf/libbpf.h>
  9. #include <bpf/bpf.h>
  10. #include <linux/err.h>
  11. #include <linux/string.h>
  12. #include "perf.h"
  13. #include "debug.h"
  14. #include "bpf-loader.h"
  15. #include "bpf-prologue.h"
  16. #include "probe-event.h"
  17. #include "probe-finder.h" // for MAX_PROBES
  18. #include "parse-events.h"
  19. #include "llvm-utils.h"
  20. #include "c++/clang-c.h"
  21. #define DEFINE_PRINT_FN(name, level) \
  22. static int libbpf_##name(const char *fmt, ...) \
  23. { \
  24. va_list args; \
  25. int ret; \
  26. \
  27. va_start(args, fmt); \
  28. ret = veprintf(level, verbose, pr_fmt(fmt), args);\
  29. va_end(args); \
  30. return ret; \
  31. }
  32. DEFINE_PRINT_FN(warning, 1)
  33. DEFINE_PRINT_FN(info, 1)
  34. DEFINE_PRINT_FN(debug, 1)
  35. struct bpf_prog_priv {
  36. bool is_tp;
  37. char *sys_name;
  38. char *evt_name;
  39. struct perf_probe_event pev;
  40. bool need_prologue;
  41. struct bpf_insn *insns_buf;
  42. int nr_types;
  43. int *type_mapping;
  44. };
  45. static bool libbpf_initialized;
  46. struct bpf_object *
  47. bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
  48. {
  49. struct bpf_object *obj;
  50. if (!libbpf_initialized) {
  51. libbpf_set_print(libbpf_warning,
  52. libbpf_info,
  53. libbpf_debug);
  54. libbpf_initialized = true;
  55. }
  56. obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
  57. if (IS_ERR(obj)) {
  58. pr_debug("bpf: failed to load buffer\n");
  59. return ERR_PTR(-EINVAL);
  60. }
  61. return obj;
  62. }
  63. struct bpf_object *bpf__prepare_load(const char *filename, bool source)
  64. {
  65. struct bpf_object *obj;
  66. if (!libbpf_initialized) {
  67. libbpf_set_print(libbpf_warning,
  68. libbpf_info,
  69. libbpf_debug);
  70. libbpf_initialized = true;
  71. }
  72. if (source) {
  73. int err;
  74. void *obj_buf;
  75. size_t obj_buf_sz;
  76. perf_clang__init();
  77. err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
  78. perf_clang__cleanup();
  79. if (err) {
  80. pr_warning("bpf: builtin compilation failed: %d, try external compiler\n", err);
  81. err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
  82. if (err)
  83. return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
  84. } else
  85. pr_debug("bpf: successfull builtin compilation\n");
  86. obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
  87. if (!IS_ERR(obj) && llvm_param.dump_obj)
  88. llvm__dump_obj(filename, obj_buf, obj_buf_sz);
  89. free(obj_buf);
  90. } else
  91. obj = bpf_object__open(filename);
  92. if (IS_ERR(obj)) {
  93. pr_debug("bpf: failed to load %s\n", filename);
  94. return obj;
  95. }
  96. return obj;
  97. }
  98. void bpf__clear(void)
  99. {
  100. struct bpf_object *obj, *tmp;
  101. bpf_object__for_each_safe(obj, tmp) {
  102. bpf__unprobe(obj);
  103. bpf_object__close(obj);
  104. }
  105. }
  106. static void
  107. clear_prog_priv(struct bpf_program *prog __maybe_unused,
  108. void *_priv)
  109. {
  110. struct bpf_prog_priv *priv = _priv;
  111. cleanup_perf_probe_events(&priv->pev, 1);
  112. zfree(&priv->insns_buf);
  113. zfree(&priv->type_mapping);
  114. zfree(&priv->sys_name);
  115. zfree(&priv->evt_name);
  116. free(priv);
  117. }
  118. static int
  119. prog_config__exec(const char *value, struct perf_probe_event *pev)
  120. {
  121. pev->uprobes = true;
  122. pev->target = strdup(value);
  123. if (!pev->target)
  124. return -ENOMEM;
  125. return 0;
  126. }
  127. static int
  128. prog_config__module(const char *value, struct perf_probe_event *pev)
  129. {
  130. pev->uprobes = false;
  131. pev->target = strdup(value);
  132. if (!pev->target)
  133. return -ENOMEM;
  134. return 0;
  135. }
  136. static int
  137. prog_config__bool(const char *value, bool *pbool, bool invert)
  138. {
  139. int err;
  140. bool bool_value;
  141. if (!pbool)
  142. return -EINVAL;
  143. err = strtobool(value, &bool_value);
  144. if (err)
  145. return err;
  146. *pbool = invert ? !bool_value : bool_value;
  147. return 0;
  148. }
  149. static int
  150. prog_config__inlines(const char *value,
  151. struct perf_probe_event *pev __maybe_unused)
  152. {
  153. return prog_config__bool(value, &probe_conf.no_inlines, true);
  154. }
  155. static int
  156. prog_config__force(const char *value,
  157. struct perf_probe_event *pev __maybe_unused)
  158. {
  159. return prog_config__bool(value, &probe_conf.force_add, false);
  160. }
  161. static struct {
  162. const char *key;
  163. const char *usage;
  164. const char *desc;
  165. int (*func)(const char *, struct perf_probe_event *);
  166. } bpf_prog_config_terms[] = {
  167. {
  168. .key = "exec",
  169. .usage = "exec=<full path of file>",
  170. .desc = "Set uprobe target",
  171. .func = prog_config__exec,
  172. },
  173. {
  174. .key = "module",
  175. .usage = "module=<module name> ",
  176. .desc = "Set kprobe module",
  177. .func = prog_config__module,
  178. },
  179. {
  180. .key = "inlines",
  181. .usage = "inlines=[yes|no] ",
  182. .desc = "Probe at inline symbol",
  183. .func = prog_config__inlines,
  184. },
  185. {
  186. .key = "force",
  187. .usage = "force=[yes|no] ",
  188. .desc = "Forcibly add events with existing name",
  189. .func = prog_config__force,
  190. },
  191. };
  192. static int
  193. do_prog_config(const char *key, const char *value,
  194. struct perf_probe_event *pev)
  195. {
  196. unsigned int i;
  197. pr_debug("config bpf program: %s=%s\n", key, value);
  198. for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
  199. if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
  200. return bpf_prog_config_terms[i].func(value, pev);
  201. pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
  202. key, value);
  203. pr_debug("\nHint: Valid options are:\n");
  204. for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
  205. pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
  206. bpf_prog_config_terms[i].desc);
  207. pr_debug("\n");
  208. return -BPF_LOADER_ERRNO__PROGCONF_TERM;
  209. }
  210. static const char *
  211. parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
  212. {
  213. char *text = strdup(config_str);
  214. char *sep, *line;
  215. const char *main_str = NULL;
  216. int err = 0;
  217. if (!text) {
  218. pr_debug("Not enough memory: dup config_str failed\n");
  219. return ERR_PTR(-ENOMEM);
  220. }
  221. line = text;
  222. while ((sep = strchr(line, ';'))) {
  223. char *equ;
  224. *sep = '\0';
  225. equ = strchr(line, '=');
  226. if (!equ) {
  227. pr_warning("WARNING: invalid config in BPF object: %s\n",
  228. line);
  229. pr_warning("\tShould be 'key=value'.\n");
  230. goto nextline;
  231. }
  232. *equ = '\0';
  233. err = do_prog_config(line, equ + 1, pev);
  234. if (err)
  235. break;
  236. nextline:
  237. line = sep + 1;
  238. }
  239. if (!err)
  240. main_str = config_str + (line - text);
  241. free(text);
  242. return err ? ERR_PTR(err) : main_str;
  243. }
  244. static int
  245. parse_prog_config(const char *config_str, const char **p_main_str,
  246. bool *is_tp, struct perf_probe_event *pev)
  247. {
  248. int err;
  249. const char *main_str = parse_prog_config_kvpair(config_str, pev);
  250. if (IS_ERR(main_str))
  251. return PTR_ERR(main_str);
  252. *p_main_str = main_str;
  253. if (!strchr(main_str, '=')) {
  254. /* Is a tracepoint event? */
  255. const char *s = strchr(main_str, ':');
  256. if (!s) {
  257. pr_debug("bpf: '%s' is not a valid tracepoint\n",
  258. config_str);
  259. return -BPF_LOADER_ERRNO__CONFIG;
  260. }
  261. *is_tp = true;
  262. return 0;
  263. }
  264. *is_tp = false;
  265. err = parse_perf_probe_command(main_str, pev);
  266. if (err < 0) {
  267. pr_debug("bpf: '%s' is not a valid config string\n",
  268. config_str);
  269. /* parse failed, don't need clear pev. */
  270. return -BPF_LOADER_ERRNO__CONFIG;
  271. }
  272. return 0;
  273. }
  274. static int
  275. config_bpf_program(struct bpf_program *prog)
  276. {
  277. struct perf_probe_event *pev = NULL;
  278. struct bpf_prog_priv *priv = NULL;
  279. const char *config_str, *main_str;
  280. bool is_tp = false;
  281. int err;
  282. /* Initialize per-program probing setting */
  283. probe_conf.no_inlines = false;
  284. probe_conf.force_add = false;
  285. config_str = bpf_program__title(prog, false);
  286. if (IS_ERR(config_str)) {
  287. pr_debug("bpf: unable to get title for program\n");
  288. return PTR_ERR(config_str);
  289. }
  290. priv = calloc(sizeof(*priv), 1);
  291. if (!priv) {
  292. pr_debug("bpf: failed to alloc priv\n");
  293. return -ENOMEM;
  294. }
  295. pev = &priv->pev;
  296. pr_debug("bpf: config program '%s'\n", config_str);
  297. err = parse_prog_config(config_str, &main_str, &is_tp, pev);
  298. if (err)
  299. goto errout;
  300. if (is_tp) {
  301. char *s = strchr(main_str, ':');
  302. priv->is_tp = true;
  303. priv->sys_name = strndup(main_str, s - main_str);
  304. priv->evt_name = strdup(s + 1);
  305. goto set_priv;
  306. }
  307. if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
  308. pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
  309. config_str, PERF_BPF_PROBE_GROUP);
  310. err = -BPF_LOADER_ERRNO__GROUP;
  311. goto errout;
  312. } else if (!pev->group)
  313. pev->group = strdup(PERF_BPF_PROBE_GROUP);
  314. if (!pev->group) {
  315. pr_debug("bpf: strdup failed\n");
  316. err = -ENOMEM;
  317. goto errout;
  318. }
  319. if (!pev->event) {
  320. pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
  321. config_str);
  322. err = -BPF_LOADER_ERRNO__EVENTNAME;
  323. goto errout;
  324. }
  325. pr_debug("bpf: config '%s' is ok\n", config_str);
  326. set_priv:
  327. err = bpf_program__set_priv(prog, priv, clear_prog_priv);
  328. if (err) {
  329. pr_debug("Failed to set priv for program '%s'\n", config_str);
  330. goto errout;
  331. }
  332. return 0;
  333. errout:
  334. if (pev)
  335. clear_perf_probe_event(pev);
  336. free(priv);
  337. return err;
  338. }
  339. static int bpf__prepare_probe(void)
  340. {
  341. static int err = 0;
  342. static bool initialized = false;
  343. /*
  344. * Make err static, so if init failed the first, bpf__prepare_probe()
  345. * fails each time without calling init_probe_symbol_maps multiple
  346. * times.
  347. */
  348. if (initialized)
  349. return err;
  350. initialized = true;
  351. err = init_probe_symbol_maps(false);
  352. if (err < 0)
  353. pr_debug("Failed to init_probe_symbol_maps\n");
  354. probe_conf.max_probes = MAX_PROBES;
  355. return err;
  356. }
  357. static int
  358. preproc_gen_prologue(struct bpf_program *prog, int n,
  359. struct bpf_insn *orig_insns, int orig_insns_cnt,
  360. struct bpf_prog_prep_result *res)
  361. {
  362. struct bpf_prog_priv *priv = bpf_program__priv(prog);
  363. struct probe_trace_event *tev;
  364. struct perf_probe_event *pev;
  365. struct bpf_insn *buf;
  366. size_t prologue_cnt = 0;
  367. int i, err;
  368. if (IS_ERR(priv) || !priv || priv->is_tp)
  369. goto errout;
  370. pev = &priv->pev;
  371. if (n < 0 || n >= priv->nr_types)
  372. goto errout;
  373. /* Find a tev belongs to that type */
  374. for (i = 0; i < pev->ntevs; i++) {
  375. if (priv->type_mapping[i] == n)
  376. break;
  377. }
  378. if (i >= pev->ntevs) {
  379. pr_debug("Internal error: prologue type %d not found\n", n);
  380. return -BPF_LOADER_ERRNO__PROLOGUE;
  381. }
  382. tev = &pev->tevs[i];
  383. buf = priv->insns_buf;
  384. err = bpf__gen_prologue(tev->args, tev->nargs,
  385. buf, &prologue_cnt,
  386. BPF_MAXINSNS - orig_insns_cnt);
  387. if (err) {
  388. const char *title;
  389. title = bpf_program__title(prog, false);
  390. if (!title)
  391. title = "[unknown]";
  392. pr_debug("Failed to generate prologue for program %s\n",
  393. title);
  394. return err;
  395. }
  396. memcpy(&buf[prologue_cnt], orig_insns,
  397. sizeof(struct bpf_insn) * orig_insns_cnt);
  398. res->new_insn_ptr = buf;
  399. res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
  400. res->pfd = NULL;
  401. return 0;
  402. errout:
  403. pr_debug("Internal error in preproc_gen_prologue\n");
  404. return -BPF_LOADER_ERRNO__PROLOGUE;
  405. }
  406. /*
  407. * compare_tev_args is reflexive, transitive and antisymmetric.
  408. * I can proof it but this margin is too narrow to contain.
  409. */
  410. static int compare_tev_args(const void *ptev1, const void *ptev2)
  411. {
  412. int i, ret;
  413. const struct probe_trace_event *tev1 =
  414. *(const struct probe_trace_event **)ptev1;
  415. const struct probe_trace_event *tev2 =
  416. *(const struct probe_trace_event **)ptev2;
  417. ret = tev2->nargs - tev1->nargs;
  418. if (ret)
  419. return ret;
  420. for (i = 0; i < tev1->nargs; i++) {
  421. struct probe_trace_arg *arg1, *arg2;
  422. struct probe_trace_arg_ref *ref1, *ref2;
  423. arg1 = &tev1->args[i];
  424. arg2 = &tev2->args[i];
  425. ret = strcmp(arg1->value, arg2->value);
  426. if (ret)
  427. return ret;
  428. ref1 = arg1->ref;
  429. ref2 = arg2->ref;
  430. while (ref1 && ref2) {
  431. ret = ref2->offset - ref1->offset;
  432. if (ret)
  433. return ret;
  434. ref1 = ref1->next;
  435. ref2 = ref2->next;
  436. }
  437. if (ref1 || ref2)
  438. return ref2 ? 1 : -1;
  439. }
  440. return 0;
  441. }
  442. /*
  443. * Assign a type number to each tevs in a pev.
  444. * mapping is an array with same slots as tevs in that pev.
  445. * nr_types will be set to number of types.
  446. */
  447. static int map_prologue(struct perf_probe_event *pev, int *mapping,
  448. int *nr_types)
  449. {
  450. int i, type = 0;
  451. struct probe_trace_event **ptevs;
  452. size_t array_sz = sizeof(*ptevs) * pev->ntevs;
  453. ptevs = malloc(array_sz);
  454. if (!ptevs) {
  455. pr_debug("Not enough memory: alloc ptevs failed\n");
  456. return -ENOMEM;
  457. }
  458. pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
  459. for (i = 0; i < pev->ntevs; i++)
  460. ptevs[i] = &pev->tevs[i];
  461. qsort(ptevs, pev->ntevs, sizeof(*ptevs),
  462. compare_tev_args);
  463. for (i = 0; i < pev->ntevs; i++) {
  464. int n;
  465. n = ptevs[i] - pev->tevs;
  466. if (i == 0) {
  467. mapping[n] = type;
  468. pr_debug("mapping[%d]=%d\n", n, type);
  469. continue;
  470. }
  471. if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
  472. mapping[n] = type;
  473. else
  474. mapping[n] = ++type;
  475. pr_debug("mapping[%d]=%d\n", n, mapping[n]);
  476. }
  477. free(ptevs);
  478. *nr_types = type + 1;
  479. return 0;
  480. }
  481. static int hook_load_preprocessor(struct bpf_program *prog)
  482. {
  483. struct bpf_prog_priv *priv = bpf_program__priv(prog);
  484. struct perf_probe_event *pev;
  485. bool need_prologue = false;
  486. int err, i;
  487. if (IS_ERR(priv) || !priv) {
  488. pr_debug("Internal error when hook preprocessor\n");
  489. return -BPF_LOADER_ERRNO__INTERNAL;
  490. }
  491. if (priv->is_tp) {
  492. priv->need_prologue = false;
  493. return 0;
  494. }
  495. pev = &priv->pev;
  496. for (i = 0; i < pev->ntevs; i++) {
  497. struct probe_trace_event *tev = &pev->tevs[i];
  498. if (tev->nargs > 0) {
  499. need_prologue = true;
  500. break;
  501. }
  502. }
  503. /*
  504. * Since all tevs don't have argument, we don't need generate
  505. * prologue.
  506. */
  507. if (!need_prologue) {
  508. priv->need_prologue = false;
  509. return 0;
  510. }
  511. priv->need_prologue = true;
  512. priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
  513. if (!priv->insns_buf) {
  514. pr_debug("Not enough memory: alloc insns_buf failed\n");
  515. return -ENOMEM;
  516. }
  517. priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
  518. if (!priv->type_mapping) {
  519. pr_debug("Not enough memory: alloc type_mapping failed\n");
  520. return -ENOMEM;
  521. }
  522. memset(priv->type_mapping, -1,
  523. sizeof(int) * pev->ntevs);
  524. err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
  525. if (err)
  526. return err;
  527. err = bpf_program__set_prep(prog, priv->nr_types,
  528. preproc_gen_prologue);
  529. return err;
  530. }
  531. int bpf__probe(struct bpf_object *obj)
  532. {
  533. int err = 0;
  534. struct bpf_program *prog;
  535. struct bpf_prog_priv *priv;
  536. struct perf_probe_event *pev;
  537. err = bpf__prepare_probe();
  538. if (err) {
  539. pr_debug("bpf__prepare_probe failed\n");
  540. return err;
  541. }
  542. bpf_object__for_each_program(prog, obj) {
  543. err = config_bpf_program(prog);
  544. if (err)
  545. goto out;
  546. priv = bpf_program__priv(prog);
  547. if (IS_ERR(priv) || !priv) {
  548. err = PTR_ERR(priv);
  549. goto out;
  550. }
  551. if (priv->is_tp) {
  552. bpf_program__set_tracepoint(prog);
  553. continue;
  554. }
  555. bpf_program__set_kprobe(prog);
  556. pev = &priv->pev;
  557. err = convert_perf_probe_events(pev, 1);
  558. if (err < 0) {
  559. pr_debug("bpf_probe: failed to convert perf probe events");
  560. goto out;
  561. }
  562. err = apply_perf_probe_events(pev, 1);
  563. if (err < 0) {
  564. pr_debug("bpf_probe: failed to apply perf probe events");
  565. goto out;
  566. }
  567. /*
  568. * After probing, let's consider prologue, which
  569. * adds program fetcher to BPF programs.
  570. *
  571. * hook_load_preprocessorr() hooks pre-processor
  572. * to bpf_program, let it generate prologue
  573. * dynamically during loading.
  574. */
  575. err = hook_load_preprocessor(prog);
  576. if (err)
  577. goto out;
  578. }
  579. out:
  580. return err < 0 ? err : 0;
  581. }
  582. #define EVENTS_WRITE_BUFSIZE 4096
  583. int bpf__unprobe(struct bpf_object *obj)
  584. {
  585. int err, ret = 0;
  586. struct bpf_program *prog;
  587. bpf_object__for_each_program(prog, obj) {
  588. struct bpf_prog_priv *priv = bpf_program__priv(prog);
  589. int i;
  590. if (IS_ERR(priv) || !priv || priv->is_tp)
  591. continue;
  592. for (i = 0; i < priv->pev.ntevs; i++) {
  593. struct probe_trace_event *tev = &priv->pev.tevs[i];
  594. char name_buf[EVENTS_WRITE_BUFSIZE];
  595. struct strfilter *delfilter;
  596. snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
  597. "%s:%s", tev->group, tev->event);
  598. name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
  599. delfilter = strfilter__new(name_buf, NULL);
  600. if (!delfilter) {
  601. pr_debug("Failed to create filter for unprobing\n");
  602. ret = -ENOMEM;
  603. continue;
  604. }
  605. err = del_perf_probe_events(delfilter);
  606. strfilter__delete(delfilter);
  607. if (err) {
  608. pr_debug("Failed to delete %s\n", name_buf);
  609. ret = err;
  610. continue;
  611. }
  612. }
  613. }
  614. return ret;
  615. }
  616. int bpf__load(struct bpf_object *obj)
  617. {
  618. int err;
  619. err = bpf_object__load(obj);
  620. if (err) {
  621. pr_debug("bpf: load objects failed\n");
  622. return err;
  623. }
  624. return 0;
  625. }
  626. int bpf__foreach_event(struct bpf_object *obj,
  627. bpf_prog_iter_callback_t func,
  628. void *arg)
  629. {
  630. struct bpf_program *prog;
  631. int err;
  632. bpf_object__for_each_program(prog, obj) {
  633. struct bpf_prog_priv *priv = bpf_program__priv(prog);
  634. struct probe_trace_event *tev;
  635. struct perf_probe_event *pev;
  636. int i, fd;
  637. if (IS_ERR(priv) || !priv) {
  638. pr_debug("bpf: failed to get private field\n");
  639. return -BPF_LOADER_ERRNO__INTERNAL;
  640. }
  641. if (priv->is_tp) {
  642. fd = bpf_program__fd(prog);
  643. err = (*func)(priv->sys_name, priv->evt_name, fd, arg);
  644. if (err) {
  645. pr_debug("bpf: tracepoint call back failed, stop iterate\n");
  646. return err;
  647. }
  648. continue;
  649. }
  650. pev = &priv->pev;
  651. for (i = 0; i < pev->ntevs; i++) {
  652. tev = &pev->tevs[i];
  653. if (priv->need_prologue) {
  654. int type = priv->type_mapping[i];
  655. fd = bpf_program__nth_fd(prog, type);
  656. } else {
  657. fd = bpf_program__fd(prog);
  658. }
  659. if (fd < 0) {
  660. pr_debug("bpf: failed to get file descriptor\n");
  661. return fd;
  662. }
  663. err = (*func)(tev->group, tev->event, fd, arg);
  664. if (err) {
  665. pr_debug("bpf: call back failed, stop iterate\n");
  666. return err;
  667. }
  668. }
  669. }
  670. return 0;
  671. }
  672. enum bpf_map_op_type {
  673. BPF_MAP_OP_SET_VALUE,
  674. BPF_MAP_OP_SET_EVSEL,
  675. };
  676. enum bpf_map_key_type {
  677. BPF_MAP_KEY_ALL,
  678. BPF_MAP_KEY_RANGES,
  679. };
  680. struct bpf_map_op {
  681. struct list_head list;
  682. enum bpf_map_op_type op_type;
  683. enum bpf_map_key_type key_type;
  684. union {
  685. struct parse_events_array array;
  686. } k;
  687. union {
  688. u64 value;
  689. struct perf_evsel *evsel;
  690. } v;
  691. };
  692. struct bpf_map_priv {
  693. struct list_head ops_list;
  694. };
  695. static void
  696. bpf_map_op__delete(struct bpf_map_op *op)
  697. {
  698. if (!list_empty(&op->list))
  699. list_del(&op->list);
  700. if (op->key_type == BPF_MAP_KEY_RANGES)
  701. parse_events__clear_array(&op->k.array);
  702. free(op);
  703. }
  704. static void
  705. bpf_map_priv__purge(struct bpf_map_priv *priv)
  706. {
  707. struct bpf_map_op *pos, *n;
  708. list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
  709. list_del_init(&pos->list);
  710. bpf_map_op__delete(pos);
  711. }
  712. }
  713. static void
  714. bpf_map_priv__clear(struct bpf_map *map __maybe_unused,
  715. void *_priv)
  716. {
  717. struct bpf_map_priv *priv = _priv;
  718. bpf_map_priv__purge(priv);
  719. free(priv);
  720. }
  721. static int
  722. bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
  723. {
  724. op->key_type = BPF_MAP_KEY_ALL;
  725. if (!term)
  726. return 0;
  727. if (term->array.nr_ranges) {
  728. size_t memsz = term->array.nr_ranges *
  729. sizeof(op->k.array.ranges[0]);
  730. op->k.array.ranges = memdup(term->array.ranges, memsz);
  731. if (!op->k.array.ranges) {
  732. pr_debug("Not enough memory to alloc indices for map\n");
  733. return -ENOMEM;
  734. }
  735. op->key_type = BPF_MAP_KEY_RANGES;
  736. op->k.array.nr_ranges = term->array.nr_ranges;
  737. }
  738. return 0;
  739. }
  740. static struct bpf_map_op *
  741. bpf_map_op__new(struct parse_events_term *term)
  742. {
  743. struct bpf_map_op *op;
  744. int err;
  745. op = zalloc(sizeof(*op));
  746. if (!op) {
  747. pr_debug("Failed to alloc bpf_map_op\n");
  748. return ERR_PTR(-ENOMEM);
  749. }
  750. INIT_LIST_HEAD(&op->list);
  751. err = bpf_map_op_setkey(op, term);
  752. if (err) {
  753. free(op);
  754. return ERR_PTR(err);
  755. }
  756. return op;
  757. }
  758. static struct bpf_map_op *
  759. bpf_map_op__clone(struct bpf_map_op *op)
  760. {
  761. struct bpf_map_op *newop;
  762. newop = memdup(op, sizeof(*op));
  763. if (!newop) {
  764. pr_debug("Failed to alloc bpf_map_op\n");
  765. return NULL;
  766. }
  767. INIT_LIST_HEAD(&newop->list);
  768. if (op->key_type == BPF_MAP_KEY_RANGES) {
  769. size_t memsz = op->k.array.nr_ranges *
  770. sizeof(op->k.array.ranges[0]);
  771. newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
  772. if (!newop->k.array.ranges) {
  773. pr_debug("Failed to alloc indices for map\n");
  774. free(newop);
  775. return NULL;
  776. }
  777. }
  778. return newop;
  779. }
  780. static struct bpf_map_priv *
  781. bpf_map_priv__clone(struct bpf_map_priv *priv)
  782. {
  783. struct bpf_map_priv *newpriv;
  784. struct bpf_map_op *pos, *newop;
  785. newpriv = zalloc(sizeof(*newpriv));
  786. if (!newpriv) {
  787. pr_debug("Not enough memory to alloc map private\n");
  788. return NULL;
  789. }
  790. INIT_LIST_HEAD(&newpriv->ops_list);
  791. list_for_each_entry(pos, &priv->ops_list, list) {
  792. newop = bpf_map_op__clone(pos);
  793. if (!newop) {
  794. bpf_map_priv__purge(newpriv);
  795. return NULL;
  796. }
  797. list_add_tail(&newop->list, &newpriv->ops_list);
  798. }
  799. return newpriv;
  800. }
  801. static int
  802. bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
  803. {
  804. const char *map_name = bpf_map__name(map);
  805. struct bpf_map_priv *priv = bpf_map__priv(map);
  806. if (IS_ERR(priv)) {
  807. pr_debug("Failed to get private from map %s\n", map_name);
  808. return PTR_ERR(priv);
  809. }
  810. if (!priv) {
  811. priv = zalloc(sizeof(*priv));
  812. if (!priv) {
  813. pr_debug("Not enough memory to alloc map private\n");
  814. return -ENOMEM;
  815. }
  816. INIT_LIST_HEAD(&priv->ops_list);
  817. if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) {
  818. free(priv);
  819. return -BPF_LOADER_ERRNO__INTERNAL;
  820. }
  821. }
  822. list_add_tail(&op->list, &priv->ops_list);
  823. return 0;
  824. }
  825. static struct bpf_map_op *
  826. bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
  827. {
  828. struct bpf_map_op *op;
  829. int err;
  830. op = bpf_map_op__new(term);
  831. if (IS_ERR(op))
  832. return op;
  833. err = bpf_map__add_op(map, op);
  834. if (err) {
  835. bpf_map_op__delete(op);
  836. return ERR_PTR(err);
  837. }
  838. return op;
  839. }
  840. static int
  841. __bpf_map__config_value(struct bpf_map *map,
  842. struct parse_events_term *term)
  843. {
  844. struct bpf_map_op *op;
  845. const char *map_name = bpf_map__name(map);
  846. const struct bpf_map_def *def = bpf_map__def(map);
  847. if (IS_ERR(def)) {
  848. pr_debug("Unable to get map definition from '%s'\n",
  849. map_name);
  850. return -BPF_LOADER_ERRNO__INTERNAL;
  851. }
  852. if (def->type != BPF_MAP_TYPE_ARRAY) {
  853. pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
  854. map_name);
  855. return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
  856. }
  857. if (def->key_size < sizeof(unsigned int)) {
  858. pr_debug("Map %s has incorrect key size\n", map_name);
  859. return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
  860. }
  861. switch (def->value_size) {
  862. case 1:
  863. case 2:
  864. case 4:
  865. case 8:
  866. break;
  867. default:
  868. pr_debug("Map %s has incorrect value size\n", map_name);
  869. return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
  870. }
  871. op = bpf_map__add_newop(map, term);
  872. if (IS_ERR(op))
  873. return PTR_ERR(op);
  874. op->op_type = BPF_MAP_OP_SET_VALUE;
  875. op->v.value = term->val.num;
  876. return 0;
  877. }
  878. static int
  879. bpf_map__config_value(struct bpf_map *map,
  880. struct parse_events_term *term,
  881. struct perf_evlist *evlist __maybe_unused)
  882. {
  883. if (!term->err_val) {
  884. pr_debug("Config value not set\n");
  885. return -BPF_LOADER_ERRNO__OBJCONF_CONF;
  886. }
  887. if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
  888. pr_debug("ERROR: wrong value type for 'value'\n");
  889. return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
  890. }
  891. return __bpf_map__config_value(map, term);
  892. }
  893. static int
  894. __bpf_map__config_event(struct bpf_map *map,
  895. struct parse_events_term *term,
  896. struct perf_evlist *evlist)
  897. {
  898. struct perf_evsel *evsel;
  899. const struct bpf_map_def *def;
  900. struct bpf_map_op *op;
  901. const char *map_name = bpf_map__name(map);
  902. evsel = perf_evlist__find_evsel_by_str(evlist, term->val.str);
  903. if (!evsel) {
  904. pr_debug("Event (for '%s') '%s' doesn't exist\n",
  905. map_name, term->val.str);
  906. return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
  907. }
  908. def = bpf_map__def(map);
  909. if (IS_ERR(def)) {
  910. pr_debug("Unable to get map definition from '%s'\n",
  911. map_name);
  912. return PTR_ERR(def);
  913. }
  914. /*
  915. * No need to check key_size and value_size:
  916. * kernel has already checked them.
  917. */
  918. if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
  919. pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
  920. map_name);
  921. return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
  922. }
  923. op = bpf_map__add_newop(map, term);
  924. if (IS_ERR(op))
  925. return PTR_ERR(op);
  926. op->op_type = BPF_MAP_OP_SET_EVSEL;
  927. op->v.evsel = evsel;
  928. return 0;
  929. }
  930. static int
  931. bpf_map__config_event(struct bpf_map *map,
  932. struct parse_events_term *term,
  933. struct perf_evlist *evlist)
  934. {
  935. if (!term->err_val) {
  936. pr_debug("Config value not set\n");
  937. return -BPF_LOADER_ERRNO__OBJCONF_CONF;
  938. }
  939. if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
  940. pr_debug("ERROR: wrong value type for 'event'\n");
  941. return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
  942. }
  943. return __bpf_map__config_event(map, term, evlist);
  944. }
  945. struct bpf_obj_config__map_func {
  946. const char *config_opt;
  947. int (*config_func)(struct bpf_map *, struct parse_events_term *,
  948. struct perf_evlist *);
  949. };
  950. struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
  951. {"value", bpf_map__config_value},
  952. {"event", bpf_map__config_event},
  953. };
  954. static int
  955. config_map_indices_range_check(struct parse_events_term *term,
  956. struct bpf_map *map,
  957. const char *map_name)
  958. {
  959. struct parse_events_array *array = &term->array;
  960. const struct bpf_map_def *def;
  961. unsigned int i;
  962. if (!array->nr_ranges)
  963. return 0;
  964. if (!array->ranges) {
  965. pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
  966. map_name, (int)array->nr_ranges);
  967. return -BPF_LOADER_ERRNO__INTERNAL;
  968. }
  969. def = bpf_map__def(map);
  970. if (IS_ERR(def)) {
  971. pr_debug("ERROR: Unable to get map definition from '%s'\n",
  972. map_name);
  973. return -BPF_LOADER_ERRNO__INTERNAL;
  974. }
  975. for (i = 0; i < array->nr_ranges; i++) {
  976. unsigned int start = array->ranges[i].start;
  977. size_t length = array->ranges[i].length;
  978. unsigned int idx = start + length - 1;
  979. if (idx >= def->max_entries) {
  980. pr_debug("ERROR: index %d too large\n", idx);
  981. return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
  982. }
  983. }
  984. return 0;
  985. }
  986. static int
  987. bpf__obj_config_map(struct bpf_object *obj,
  988. struct parse_events_term *term,
  989. struct perf_evlist *evlist,
  990. int *key_scan_pos)
  991. {
  992. /* key is "map:<mapname>.<config opt>" */
  993. char *map_name = strdup(term->config + sizeof("map:") - 1);
  994. struct bpf_map *map;
  995. int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
  996. char *map_opt;
  997. size_t i;
  998. if (!map_name)
  999. return -ENOMEM;
  1000. map_opt = strchr(map_name, '.');
  1001. if (!map_opt) {
  1002. pr_debug("ERROR: Invalid map config: %s\n", map_name);
  1003. goto out;
  1004. }
  1005. *map_opt++ = '\0';
  1006. if (*map_opt == '\0') {
  1007. pr_debug("ERROR: Invalid map option: %s\n", term->config);
  1008. goto out;
  1009. }
  1010. map = bpf_object__find_map_by_name(obj, map_name);
  1011. if (!map) {
  1012. pr_debug("ERROR: Map %s doesn't exist\n", map_name);
  1013. err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
  1014. goto out;
  1015. }
  1016. *key_scan_pos += strlen(map_opt);
  1017. err = config_map_indices_range_check(term, map, map_name);
  1018. if (err)
  1019. goto out;
  1020. *key_scan_pos -= strlen(map_opt);
  1021. for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
  1022. struct bpf_obj_config__map_func *func =
  1023. &bpf_obj_config__map_funcs[i];
  1024. if (strcmp(map_opt, func->config_opt) == 0) {
  1025. err = func->config_func(map, term, evlist);
  1026. goto out;
  1027. }
  1028. }
  1029. pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
  1030. err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
  1031. out:
  1032. free(map_name);
  1033. if (!err)
  1034. key_scan_pos += strlen(map_opt);
  1035. return err;
  1036. }
  1037. int bpf__config_obj(struct bpf_object *obj,
  1038. struct parse_events_term *term,
  1039. struct perf_evlist *evlist,
  1040. int *error_pos)
  1041. {
  1042. int key_scan_pos = 0;
  1043. int err;
  1044. if (!obj || !term || !term->config)
  1045. return -EINVAL;
  1046. if (!prefixcmp(term->config, "map:")) {
  1047. key_scan_pos = sizeof("map:") - 1;
  1048. err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
  1049. goto out;
  1050. }
  1051. err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
  1052. out:
  1053. if (error_pos)
  1054. *error_pos = key_scan_pos;
  1055. return err;
  1056. }
  1057. typedef int (*map_config_func_t)(const char *name, int map_fd,
  1058. const struct bpf_map_def *pdef,
  1059. struct bpf_map_op *op,
  1060. void *pkey, void *arg);
  1061. static int
  1062. foreach_key_array_all(map_config_func_t func,
  1063. void *arg, const char *name,
  1064. int map_fd, const struct bpf_map_def *pdef,
  1065. struct bpf_map_op *op)
  1066. {
  1067. unsigned int i;
  1068. int err;
  1069. for (i = 0; i < pdef->max_entries; i++) {
  1070. err = func(name, map_fd, pdef, op, &i, arg);
  1071. if (err) {
  1072. pr_debug("ERROR: failed to insert value to %s[%u]\n",
  1073. name, i);
  1074. return err;
  1075. }
  1076. }
  1077. return 0;
  1078. }
  1079. static int
  1080. foreach_key_array_ranges(map_config_func_t func, void *arg,
  1081. const char *name, int map_fd,
  1082. const struct bpf_map_def *pdef,
  1083. struct bpf_map_op *op)
  1084. {
  1085. unsigned int i, j;
  1086. int err;
  1087. for (i = 0; i < op->k.array.nr_ranges; i++) {
  1088. unsigned int start = op->k.array.ranges[i].start;
  1089. size_t length = op->k.array.ranges[i].length;
  1090. for (j = 0; j < length; j++) {
  1091. unsigned int idx = start + j;
  1092. err = func(name, map_fd, pdef, op, &idx, arg);
  1093. if (err) {
  1094. pr_debug("ERROR: failed to insert value to %s[%u]\n",
  1095. name, idx);
  1096. return err;
  1097. }
  1098. }
  1099. }
  1100. return 0;
  1101. }
  1102. static int
  1103. bpf_map_config_foreach_key(struct bpf_map *map,
  1104. map_config_func_t func,
  1105. void *arg)
  1106. {
  1107. int err, map_fd;
  1108. struct bpf_map_op *op;
  1109. const struct bpf_map_def *def;
  1110. const char *name = bpf_map__name(map);
  1111. struct bpf_map_priv *priv = bpf_map__priv(map);
  1112. if (IS_ERR(priv)) {
  1113. pr_debug("ERROR: failed to get private from map %s\n", name);
  1114. return -BPF_LOADER_ERRNO__INTERNAL;
  1115. }
  1116. if (!priv || list_empty(&priv->ops_list)) {
  1117. pr_debug("INFO: nothing to config for map %s\n", name);
  1118. return 0;
  1119. }
  1120. def = bpf_map__def(map);
  1121. if (IS_ERR(def)) {
  1122. pr_debug("ERROR: failed to get definition from map %s\n", name);
  1123. return -BPF_LOADER_ERRNO__INTERNAL;
  1124. }
  1125. map_fd = bpf_map__fd(map);
  1126. if (map_fd < 0) {
  1127. pr_debug("ERROR: failed to get fd from map %s\n", name);
  1128. return map_fd;
  1129. }
  1130. list_for_each_entry(op, &priv->ops_list, list) {
  1131. switch (def->type) {
  1132. case BPF_MAP_TYPE_ARRAY:
  1133. case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
  1134. switch (op->key_type) {
  1135. case BPF_MAP_KEY_ALL:
  1136. err = foreach_key_array_all(func, arg, name,
  1137. map_fd, def, op);
  1138. break;
  1139. case BPF_MAP_KEY_RANGES:
  1140. err = foreach_key_array_ranges(func, arg, name,
  1141. map_fd, def,
  1142. op);
  1143. break;
  1144. default:
  1145. pr_debug("ERROR: keytype for map '%s' invalid\n",
  1146. name);
  1147. return -BPF_LOADER_ERRNO__INTERNAL;
  1148. }
  1149. if (err)
  1150. return err;
  1151. break;
  1152. default:
  1153. pr_debug("ERROR: type of '%s' incorrect\n", name);
  1154. return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
  1155. }
  1156. }
  1157. return 0;
  1158. }
  1159. static int
  1160. apply_config_value_for_key(int map_fd, void *pkey,
  1161. size_t val_size, u64 val)
  1162. {
  1163. int err = 0;
  1164. switch (val_size) {
  1165. case 1: {
  1166. u8 _val = (u8)(val);
  1167. err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
  1168. break;
  1169. }
  1170. case 2: {
  1171. u16 _val = (u16)(val);
  1172. err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
  1173. break;
  1174. }
  1175. case 4: {
  1176. u32 _val = (u32)(val);
  1177. err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
  1178. break;
  1179. }
  1180. case 8: {
  1181. err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
  1182. break;
  1183. }
  1184. default:
  1185. pr_debug("ERROR: invalid value size\n");
  1186. return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
  1187. }
  1188. if (err && errno)
  1189. err = -errno;
  1190. return err;
  1191. }
  1192. static int
  1193. apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
  1194. struct perf_evsel *evsel)
  1195. {
  1196. struct xyarray *xy = evsel->fd;
  1197. struct perf_event_attr *attr;
  1198. unsigned int key, events;
  1199. bool check_pass = false;
  1200. int *evt_fd;
  1201. int err;
  1202. if (!xy) {
  1203. pr_debug("ERROR: evsel not ready for map %s\n", name);
  1204. return -BPF_LOADER_ERRNO__INTERNAL;
  1205. }
  1206. if (xy->row_size / xy->entry_size != 1) {
  1207. pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
  1208. name);
  1209. return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
  1210. }
  1211. attr = &evsel->attr;
  1212. if (attr->inherit) {
  1213. pr_debug("ERROR: Can't put inherit event into map %s\n", name);
  1214. return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
  1215. }
  1216. if (perf_evsel__is_bpf_output(evsel))
  1217. check_pass = true;
  1218. if (attr->type == PERF_TYPE_RAW)
  1219. check_pass = true;
  1220. if (attr->type == PERF_TYPE_HARDWARE)
  1221. check_pass = true;
  1222. if (!check_pass) {
  1223. pr_debug("ERROR: Event type is wrong for map %s\n", name);
  1224. return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
  1225. }
  1226. events = xy->entries / (xy->row_size / xy->entry_size);
  1227. key = *((unsigned int *)pkey);
  1228. if (key >= events) {
  1229. pr_debug("ERROR: there is no event %d for map %s\n",
  1230. key, name);
  1231. return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
  1232. }
  1233. evt_fd = xyarray__entry(xy, key, 0);
  1234. err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
  1235. if (err && errno)
  1236. err = -errno;
  1237. return err;
  1238. }
  1239. static int
  1240. apply_obj_config_map_for_key(const char *name, int map_fd,
  1241. const struct bpf_map_def *pdef,
  1242. struct bpf_map_op *op,
  1243. void *pkey, void *arg __maybe_unused)
  1244. {
  1245. int err;
  1246. switch (op->op_type) {
  1247. case BPF_MAP_OP_SET_VALUE:
  1248. err = apply_config_value_for_key(map_fd, pkey,
  1249. pdef->value_size,
  1250. op->v.value);
  1251. break;
  1252. case BPF_MAP_OP_SET_EVSEL:
  1253. err = apply_config_evsel_for_key(name, map_fd, pkey,
  1254. op->v.evsel);
  1255. break;
  1256. default:
  1257. pr_debug("ERROR: unknown value type for '%s'\n", name);
  1258. err = -BPF_LOADER_ERRNO__INTERNAL;
  1259. }
  1260. return err;
  1261. }
  1262. static int
  1263. apply_obj_config_map(struct bpf_map *map)
  1264. {
  1265. return bpf_map_config_foreach_key(map,
  1266. apply_obj_config_map_for_key,
  1267. NULL);
  1268. }
  1269. static int
  1270. apply_obj_config_object(struct bpf_object *obj)
  1271. {
  1272. struct bpf_map *map;
  1273. int err;
  1274. bpf_map__for_each(map, obj) {
  1275. err = apply_obj_config_map(map);
  1276. if (err)
  1277. return err;
  1278. }
  1279. return 0;
  1280. }
  1281. int bpf__apply_obj_config(void)
  1282. {
  1283. struct bpf_object *obj, *tmp;
  1284. int err;
  1285. bpf_object__for_each_safe(obj, tmp) {
  1286. err = apply_obj_config_object(obj);
  1287. if (err)
  1288. return err;
  1289. }
  1290. return 0;
  1291. }
  1292. #define bpf__for_each_map(pos, obj, objtmp) \
  1293. bpf_object__for_each_safe(obj, objtmp) \
  1294. bpf_map__for_each(pos, obj)
  1295. #define bpf__for_each_stdout_map(pos, obj, objtmp) \
  1296. bpf__for_each_map(pos, obj, objtmp) \
  1297. if (bpf_map__name(pos) && \
  1298. (strcmp("__bpf_stdout__", \
  1299. bpf_map__name(pos)) == 0))
  1300. int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
  1301. {
  1302. struct bpf_map_priv *tmpl_priv = NULL;
  1303. struct bpf_object *obj, *tmp;
  1304. struct perf_evsel *evsel = NULL;
  1305. struct bpf_map *map;
  1306. int err;
  1307. bool need_init = false;
  1308. bpf__for_each_stdout_map(map, obj, tmp) {
  1309. struct bpf_map_priv *priv = bpf_map__priv(map);
  1310. if (IS_ERR(priv))
  1311. return -BPF_LOADER_ERRNO__INTERNAL;
  1312. /*
  1313. * No need to check map type: type should have been
  1314. * verified by kernel.
  1315. */
  1316. if (!need_init && !priv)
  1317. need_init = !priv;
  1318. if (!tmpl_priv && priv)
  1319. tmpl_priv = priv;
  1320. }
  1321. if (!need_init)
  1322. return 0;
  1323. if (!tmpl_priv) {
  1324. err = parse_events(evlist, "bpf-output/no-inherit=1,name=__bpf_stdout__/",
  1325. NULL);
  1326. if (err) {
  1327. pr_debug("ERROR: failed to create bpf-output event\n");
  1328. return -err;
  1329. }
  1330. evsel = perf_evlist__last(evlist);
  1331. }
  1332. bpf__for_each_stdout_map(map, obj, tmp) {
  1333. struct bpf_map_priv *priv = bpf_map__priv(map);
  1334. if (IS_ERR(priv))
  1335. return -BPF_LOADER_ERRNO__INTERNAL;
  1336. if (priv)
  1337. continue;
  1338. if (tmpl_priv) {
  1339. priv = bpf_map_priv__clone(tmpl_priv);
  1340. if (!priv)
  1341. return -ENOMEM;
  1342. err = bpf_map__set_priv(map, priv, bpf_map_priv__clear);
  1343. if (err) {
  1344. bpf_map_priv__clear(map, priv);
  1345. return err;
  1346. }
  1347. } else if (evsel) {
  1348. struct bpf_map_op *op;
  1349. op = bpf_map__add_newop(map, NULL);
  1350. if (IS_ERR(op))
  1351. return PTR_ERR(op);
  1352. op->op_type = BPF_MAP_OP_SET_EVSEL;
  1353. op->v.evsel = evsel;
  1354. }
  1355. }
  1356. return 0;
  1357. }
  1358. #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
  1359. #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
  1360. #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
  1361. static const char *bpf_loader_strerror_table[NR_ERRNO] = {
  1362. [ERRCODE_OFFSET(CONFIG)] = "Invalid config string",
  1363. [ERRCODE_OFFSET(GROUP)] = "Invalid group name",
  1364. [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
  1365. [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
  1366. [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
  1367. [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
  1368. [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
  1369. [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
  1370. [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
  1371. [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option",
  1372. [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')",
  1373. [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option",
  1374. [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist",
  1375. [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map",
  1376. [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type",
  1377. [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size",
  1378. [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
  1379. [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting",
  1380. [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting",
  1381. [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large",
  1382. [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event",
  1383. [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map",
  1384. [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large",
  1385. };
  1386. static int
  1387. bpf_loader_strerror(int err, char *buf, size_t size)
  1388. {
  1389. char sbuf[STRERR_BUFSIZE];
  1390. const char *msg;
  1391. if (!buf || !size)
  1392. return -1;
  1393. err = err > 0 ? err : -err;
  1394. if (err >= __LIBBPF_ERRNO__START)
  1395. return libbpf_strerror(err, buf, size);
  1396. if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
  1397. msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
  1398. snprintf(buf, size, "%s", msg);
  1399. buf[size - 1] = '\0';
  1400. return 0;
  1401. }
  1402. if (err >= __BPF_LOADER_ERRNO__END)
  1403. snprintf(buf, size, "Unknown bpf loader error %d", err);
  1404. else
  1405. snprintf(buf, size, "%s",
  1406. str_error_r(err, sbuf, sizeof(sbuf)));
  1407. buf[size - 1] = '\0';
  1408. return -1;
  1409. }
  1410. #define bpf__strerror_head(err, buf, size) \
  1411. char sbuf[STRERR_BUFSIZE], *emsg;\
  1412. if (!size)\
  1413. return 0;\
  1414. if (err < 0)\
  1415. err = -err;\
  1416. bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
  1417. emsg = sbuf;\
  1418. switch (err) {\
  1419. default:\
  1420. scnprintf(buf, size, "%s", emsg);\
  1421. break;
  1422. #define bpf__strerror_entry(val, fmt...)\
  1423. case val: {\
  1424. scnprintf(buf, size, fmt);\
  1425. break;\
  1426. }
  1427. #define bpf__strerror_end(buf, size)\
  1428. }\
  1429. buf[size - 1] = '\0';
  1430. int bpf__strerror_prepare_load(const char *filename, bool source,
  1431. int err, char *buf, size_t size)
  1432. {
  1433. size_t n;
  1434. int ret;
  1435. n = snprintf(buf, size, "Failed to load %s%s: ",
  1436. filename, source ? " from source" : "");
  1437. if (n >= size) {
  1438. buf[size - 1] = '\0';
  1439. return 0;
  1440. }
  1441. buf += n;
  1442. size -= n;
  1443. ret = bpf_loader_strerror(err, buf, size);
  1444. buf[size - 1] = '\0';
  1445. return ret;
  1446. }
  1447. int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
  1448. int err, char *buf, size_t size)
  1449. {
  1450. bpf__strerror_head(err, buf, size);
  1451. case BPF_LOADER_ERRNO__PROGCONF_TERM: {
  1452. scnprintf(buf, size, "%s (add -v to see detail)", emsg);
  1453. break;
  1454. }
  1455. bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
  1456. bpf__strerror_entry(EACCES, "You need to be root");
  1457. bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
  1458. bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
  1459. bpf__strerror_end(buf, size);
  1460. return 0;
  1461. }
  1462. int bpf__strerror_load(struct bpf_object *obj,
  1463. int err, char *buf, size_t size)
  1464. {
  1465. bpf__strerror_head(err, buf, size);
  1466. case LIBBPF_ERRNO__KVER: {
  1467. unsigned int obj_kver = bpf_object__kversion(obj);
  1468. unsigned int real_kver;
  1469. if (fetch_kernel_version(&real_kver, NULL, 0)) {
  1470. scnprintf(buf, size, "Unable to fetch kernel version");
  1471. break;
  1472. }
  1473. if (obj_kver != real_kver) {
  1474. scnprintf(buf, size,
  1475. "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
  1476. KVER_PARAM(obj_kver),
  1477. KVER_PARAM(real_kver));
  1478. break;
  1479. }
  1480. scnprintf(buf, size, "Failed to load program for unknown reason");
  1481. break;
  1482. }
  1483. bpf__strerror_end(buf, size);
  1484. return 0;
  1485. }
  1486. int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
  1487. struct parse_events_term *term __maybe_unused,
  1488. struct perf_evlist *evlist __maybe_unused,
  1489. int *error_pos __maybe_unused, int err,
  1490. char *buf, size_t size)
  1491. {
  1492. bpf__strerror_head(err, buf, size);
  1493. bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
  1494. "Can't use this config term with this map type");
  1495. bpf__strerror_end(buf, size);
  1496. return 0;
  1497. }
  1498. int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
  1499. {
  1500. bpf__strerror_head(err, buf, size);
  1501. bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
  1502. "Cannot set event to BPF map in multi-thread tracing");
  1503. bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
  1504. "%s (Hint: use -i to turn off inherit)", emsg);
  1505. bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
  1506. "Can only put raw, hardware and BPF output event into a BPF map");
  1507. bpf__strerror_end(buf, size);
  1508. return 0;
  1509. }
  1510. int bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused,
  1511. int err, char *buf, size_t size)
  1512. {
  1513. bpf__strerror_head(err, buf, size);
  1514. bpf__strerror_end(buf, size);
  1515. return 0;
  1516. }