bpf.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. #include <stdio.h>
  2. #include <sys/epoll.h>
  3. #include <util/util.h>
  4. #include <util/bpf-loader.h>
  5. #include <util/evlist.h>
  6. #include <linux/bpf.h>
  7. #include <linux/filter.h>
  8. #include <api/fs/fs.h>
  9. #include <bpf/bpf.h>
  10. #include "tests.h"
  11. #include "llvm.h"
  12. #include "debug.h"
  13. #define NR_ITERS 111
  14. #define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test"
  15. #ifdef HAVE_LIBBPF_SUPPORT
  16. static int epoll_wait_loop(void)
  17. {
  18. int i;
  19. /* Should fail NR_ITERS times */
  20. for (i = 0; i < NR_ITERS; i++)
  21. epoll_wait(-(i + 1), NULL, 0, 0);
  22. return 0;
  23. }
  24. #ifdef HAVE_BPF_PROLOGUE
  25. static int llseek_loop(void)
  26. {
  27. int fds[2], i;
  28. fds[0] = open("/dev/null", O_RDONLY);
  29. fds[1] = open("/dev/null", O_RDWR);
  30. if (fds[0] < 0 || fds[1] < 0)
  31. return -1;
  32. for (i = 0; i < NR_ITERS; i++) {
  33. lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
  34. lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
  35. }
  36. close(fds[0]);
  37. close(fds[1]);
  38. return 0;
  39. }
  40. #endif
  41. static struct {
  42. enum test_llvm__testcase prog_id;
  43. const char *desc;
  44. const char *name;
  45. const char *msg_compile_fail;
  46. const char *msg_load_fail;
  47. int (*target_func)(void);
  48. int expect_result;
  49. bool pin;
  50. } bpf_testcase_table[] = {
  51. {
  52. LLVM_TESTCASE_BASE,
  53. "Basic BPF filtering",
  54. "[basic_bpf_test]",
  55. "fix 'perf test LLVM' first",
  56. "load bpf object failed",
  57. &epoll_wait_loop,
  58. (NR_ITERS + 1) / 2,
  59. false,
  60. },
  61. {
  62. LLVM_TESTCASE_BASE,
  63. "BPF pinning",
  64. "[bpf_pinning]",
  65. "fix kbuild first",
  66. "check your vmlinux setting?",
  67. &epoll_wait_loop,
  68. (NR_ITERS + 1) / 2,
  69. true,
  70. },
  71. #ifdef HAVE_BPF_PROLOGUE
  72. {
  73. LLVM_TESTCASE_BPF_PROLOGUE,
  74. "BPF prologue generation",
  75. "[bpf_prologue_test]",
  76. "fix kbuild first",
  77. "check your vmlinux setting?",
  78. &llseek_loop,
  79. (NR_ITERS + 1) / 4,
  80. false,
  81. },
  82. #endif
  83. {
  84. LLVM_TESTCASE_BPF_RELOCATION,
  85. "BPF relocation checker",
  86. "[bpf_relocation_test]",
  87. "fix 'perf test LLVM' first",
  88. "libbpf error when dealing with relocation",
  89. NULL,
  90. 0,
  91. false,
  92. },
  93. };
  94. static int do_test(struct bpf_object *obj, int (*func)(void),
  95. int expect)
  96. {
  97. struct record_opts opts = {
  98. .target = {
  99. .uid = UINT_MAX,
  100. .uses_mmap = true,
  101. },
  102. .freq = 0,
  103. .mmap_pages = 256,
  104. .default_interval = 1,
  105. };
  106. char pid[16];
  107. char sbuf[STRERR_BUFSIZE];
  108. struct perf_evlist *evlist;
  109. int i, ret = TEST_FAIL, err = 0, count = 0;
  110. struct parse_events_evlist parse_evlist;
  111. struct parse_events_error parse_error;
  112. bzero(&parse_error, sizeof(parse_error));
  113. bzero(&parse_evlist, sizeof(parse_evlist));
  114. parse_evlist.error = &parse_error;
  115. INIT_LIST_HEAD(&parse_evlist.list);
  116. err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj, NULL);
  117. if (err || list_empty(&parse_evlist.list)) {
  118. pr_debug("Failed to add events selected by BPF\n");
  119. return TEST_FAIL;
  120. }
  121. snprintf(pid, sizeof(pid), "%d", getpid());
  122. pid[sizeof(pid) - 1] = '\0';
  123. opts.target.tid = opts.target.pid = pid;
  124. /* Instead of perf_evlist__new_default, don't add default events */
  125. evlist = perf_evlist__new();
  126. if (!evlist) {
  127. pr_debug("Not enough memory to create evlist\n");
  128. return TEST_FAIL;
  129. }
  130. err = perf_evlist__create_maps(evlist, &opts.target);
  131. if (err < 0) {
  132. pr_debug("Not enough memory to create thread/cpu maps\n");
  133. goto out_delete_evlist;
  134. }
  135. perf_evlist__splice_list_tail(evlist, &parse_evlist.list);
  136. evlist->nr_groups = parse_evlist.nr_groups;
  137. perf_evlist__config(evlist, &opts, NULL);
  138. err = perf_evlist__open(evlist);
  139. if (err < 0) {
  140. pr_debug("perf_evlist__open: %s\n",
  141. str_error_r(errno, sbuf, sizeof(sbuf)));
  142. goto out_delete_evlist;
  143. }
  144. err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
  145. if (err < 0) {
  146. pr_debug("perf_evlist__mmap: %s\n",
  147. str_error_r(errno, sbuf, sizeof(sbuf)));
  148. goto out_delete_evlist;
  149. }
  150. perf_evlist__enable(evlist);
  151. (*func)();
  152. perf_evlist__disable(evlist);
  153. for (i = 0; i < evlist->nr_mmaps; i++) {
  154. union perf_event *event;
  155. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  156. const u32 type = event->header.type;
  157. if (type == PERF_RECORD_SAMPLE)
  158. count ++;
  159. }
  160. }
  161. if (count != expect) {
  162. pr_debug("BPF filter result incorrect\n");
  163. goto out_delete_evlist;
  164. }
  165. ret = TEST_OK;
  166. out_delete_evlist:
  167. perf_evlist__delete(evlist);
  168. return ret;
  169. }
  170. static struct bpf_object *
  171. prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
  172. {
  173. struct bpf_object *obj;
  174. obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name);
  175. if (IS_ERR(obj)) {
  176. pr_debug("Compile BPF program failed.\n");
  177. return NULL;
  178. }
  179. return obj;
  180. }
  181. static int __test__bpf(int idx)
  182. {
  183. int ret;
  184. void *obj_buf;
  185. size_t obj_buf_sz;
  186. struct bpf_object *obj;
  187. ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
  188. bpf_testcase_table[idx].prog_id,
  189. true, NULL);
  190. if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
  191. pr_debug("Unable to get BPF object, %s\n",
  192. bpf_testcase_table[idx].msg_compile_fail);
  193. if (idx == 0)
  194. return TEST_SKIP;
  195. else
  196. return TEST_FAIL;
  197. }
  198. obj = prepare_bpf(obj_buf, obj_buf_sz,
  199. bpf_testcase_table[idx].name);
  200. if ((!!bpf_testcase_table[idx].target_func) != (!!obj)) {
  201. if (!obj)
  202. pr_debug("Fail to load BPF object: %s\n",
  203. bpf_testcase_table[idx].msg_load_fail);
  204. else
  205. pr_debug("Success unexpectedly: %s\n",
  206. bpf_testcase_table[idx].msg_load_fail);
  207. ret = TEST_FAIL;
  208. goto out;
  209. }
  210. if (obj) {
  211. ret = do_test(obj,
  212. bpf_testcase_table[idx].target_func,
  213. bpf_testcase_table[idx].expect_result);
  214. if (ret != TEST_OK)
  215. goto out;
  216. if (bpf_testcase_table[idx].pin) {
  217. int err;
  218. if (!bpf_fs__mount()) {
  219. pr_debug("BPF filesystem not mounted\n");
  220. ret = TEST_FAIL;
  221. goto out;
  222. }
  223. err = mkdir(PERF_TEST_BPF_PATH, 0777);
  224. if (err && errno != EEXIST) {
  225. pr_debug("Failed to make perf_test dir: %s\n",
  226. strerror(errno));
  227. ret = TEST_FAIL;
  228. goto out;
  229. }
  230. if (bpf_object__pin(obj, PERF_TEST_BPF_PATH))
  231. ret = TEST_FAIL;
  232. if (rm_rf(PERF_TEST_BPF_PATH))
  233. ret = TEST_FAIL;
  234. }
  235. }
  236. out:
  237. bpf__clear();
  238. return ret;
  239. }
  240. int test__bpf_subtest_get_nr(void)
  241. {
  242. return (int)ARRAY_SIZE(bpf_testcase_table);
  243. }
  244. const char *test__bpf_subtest_get_desc(int i)
  245. {
  246. if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
  247. return NULL;
  248. return bpf_testcase_table[i].desc;
  249. }
  250. static int check_env(void)
  251. {
  252. int err;
  253. unsigned int kver_int;
  254. char license[] = "GPL";
  255. struct bpf_insn insns[] = {
  256. BPF_MOV64_IMM(BPF_REG_0, 1),
  257. BPF_EXIT_INSN(),
  258. };
  259. err = fetch_kernel_version(&kver_int, NULL, 0);
  260. if (err) {
  261. pr_debug("Unable to get kernel version\n");
  262. return err;
  263. }
  264. err = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
  265. sizeof(insns) / sizeof(insns[0]),
  266. license, kver_int, NULL, 0);
  267. if (err < 0) {
  268. pr_err("Missing basic BPF support, skip this test: %s\n",
  269. strerror(errno));
  270. return err;
  271. }
  272. close(err);
  273. return 0;
  274. }
  275. int test__bpf(int i)
  276. {
  277. int err;
  278. if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
  279. return TEST_FAIL;
  280. if (geteuid() != 0) {
  281. pr_debug("Only root can run BPF test\n");
  282. return TEST_SKIP;
  283. }
  284. if (check_env())
  285. return TEST_SKIP;
  286. err = __test__bpf(i);
  287. return err;
  288. }
  289. #else
  290. int test__bpf_subtest_get_nr(void)
  291. {
  292. return 0;
  293. }
  294. const char *test__bpf_subtest_get_desc(int i __maybe_unused)
  295. {
  296. return NULL;
  297. }
  298. int test__bpf(int i __maybe_unused)
  299. {
  300. pr_debug("Skip BPF test because BPF support is not compiled\n");
  301. return TEST_SKIP;
  302. }
  303. #endif