bpf.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. #include <errno.h>
  2. #include <stdio.h>
  3. #include <sys/epoll.h>
  4. #include <sys/types.h>
  5. #include <sys/stat.h>
  6. #include <util/util.h>
  7. #include <util/bpf-loader.h>
  8. #include <util/evlist.h>
  9. #include <linux/bpf.h>
  10. #include <linux/filter.h>
  11. #include <linux/kernel.h>
  12. #include <api/fs/fs.h>
  13. #include <bpf/bpf.h>
  14. #include "tests.h"
  15. #include "llvm.h"
  16. #include "debug.h"
  17. #define NR_ITERS 111
  18. #define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test"
  19. #ifdef HAVE_LIBBPF_SUPPORT
  20. static int epoll_wait_loop(void)
  21. {
  22. int i;
  23. /* Should fail NR_ITERS times */
  24. for (i = 0; i < NR_ITERS; i++)
  25. epoll_wait(-(i + 1), NULL, 0, 0);
  26. return 0;
  27. }
  28. #ifdef HAVE_BPF_PROLOGUE
  29. static int llseek_loop(void)
  30. {
  31. int fds[2], i;
  32. fds[0] = open("/dev/null", O_RDONLY);
  33. fds[1] = open("/dev/null", O_RDWR);
  34. if (fds[0] < 0 || fds[1] < 0)
  35. return -1;
  36. for (i = 0; i < NR_ITERS; i++) {
  37. lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
  38. lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
  39. }
  40. close(fds[0]);
  41. close(fds[1]);
  42. return 0;
  43. }
  44. #endif
  45. static struct {
  46. enum test_llvm__testcase prog_id;
  47. const char *desc;
  48. const char *name;
  49. const char *msg_compile_fail;
  50. const char *msg_load_fail;
  51. int (*target_func)(void);
  52. int expect_result;
  53. bool pin;
  54. } bpf_testcase_table[] = {
  55. {
  56. LLVM_TESTCASE_BASE,
  57. "Basic BPF filtering",
  58. "[basic_bpf_test]",
  59. "fix 'perf test LLVM' first",
  60. "load bpf object failed",
  61. &epoll_wait_loop,
  62. (NR_ITERS + 1) / 2,
  63. false,
  64. },
  65. {
  66. LLVM_TESTCASE_BASE,
  67. "BPF pinning",
  68. "[bpf_pinning]",
  69. "fix kbuild first",
  70. "check your vmlinux setting?",
  71. &epoll_wait_loop,
  72. (NR_ITERS + 1) / 2,
  73. true,
  74. },
  75. #ifdef HAVE_BPF_PROLOGUE
  76. {
  77. LLVM_TESTCASE_BPF_PROLOGUE,
  78. "BPF prologue generation",
  79. "[bpf_prologue_test]",
  80. "fix kbuild first",
  81. "check your vmlinux setting?",
  82. &llseek_loop,
  83. (NR_ITERS + 1) / 4,
  84. false,
  85. },
  86. #endif
  87. {
  88. LLVM_TESTCASE_BPF_RELOCATION,
  89. "BPF relocation checker",
  90. "[bpf_relocation_test]",
  91. "fix 'perf test LLVM' first",
  92. "libbpf error when dealing with relocation",
  93. NULL,
  94. 0,
  95. false,
  96. },
  97. };
  98. static int do_test(struct bpf_object *obj, int (*func)(void),
  99. int expect)
  100. {
  101. struct record_opts opts = {
  102. .target = {
  103. .uid = UINT_MAX,
  104. .uses_mmap = true,
  105. },
  106. .freq = 0,
  107. .mmap_pages = 256,
  108. .default_interval = 1,
  109. };
  110. char pid[16];
  111. char sbuf[STRERR_BUFSIZE];
  112. struct perf_evlist *evlist;
  113. int i, ret = TEST_FAIL, err = 0, count = 0;
  114. struct parse_events_evlist parse_evlist;
  115. struct parse_events_error parse_error;
  116. bzero(&parse_error, sizeof(parse_error));
  117. bzero(&parse_evlist, sizeof(parse_evlist));
  118. parse_evlist.error = &parse_error;
  119. INIT_LIST_HEAD(&parse_evlist.list);
  120. err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj, NULL);
  121. if (err || list_empty(&parse_evlist.list)) {
  122. pr_debug("Failed to add events selected by BPF\n");
  123. return TEST_FAIL;
  124. }
  125. snprintf(pid, sizeof(pid), "%d", getpid());
  126. pid[sizeof(pid) - 1] = '\0';
  127. opts.target.tid = opts.target.pid = pid;
  128. /* Instead of perf_evlist__new_default, don't add default events */
  129. evlist = perf_evlist__new();
  130. if (!evlist) {
  131. pr_debug("Not enough memory to create evlist\n");
  132. return TEST_FAIL;
  133. }
  134. err = perf_evlist__create_maps(evlist, &opts.target);
  135. if (err < 0) {
  136. pr_debug("Not enough memory to create thread/cpu maps\n");
  137. goto out_delete_evlist;
  138. }
  139. perf_evlist__splice_list_tail(evlist, &parse_evlist.list);
  140. evlist->nr_groups = parse_evlist.nr_groups;
  141. perf_evlist__config(evlist, &opts, NULL);
  142. err = perf_evlist__open(evlist);
  143. if (err < 0) {
  144. pr_debug("perf_evlist__open: %s\n",
  145. str_error_r(errno, sbuf, sizeof(sbuf)));
  146. goto out_delete_evlist;
  147. }
  148. err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
  149. if (err < 0) {
  150. pr_debug("perf_evlist__mmap: %s\n",
  151. str_error_r(errno, sbuf, sizeof(sbuf)));
  152. goto out_delete_evlist;
  153. }
  154. perf_evlist__enable(evlist);
  155. (*func)();
  156. perf_evlist__disable(evlist);
  157. for (i = 0; i < evlist->nr_mmaps; i++) {
  158. union perf_event *event;
  159. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  160. const u32 type = event->header.type;
  161. if (type == PERF_RECORD_SAMPLE)
  162. count ++;
  163. }
  164. }
  165. if (count != expect) {
  166. pr_debug("BPF filter result incorrect\n");
  167. goto out_delete_evlist;
  168. }
  169. ret = TEST_OK;
  170. out_delete_evlist:
  171. perf_evlist__delete(evlist);
  172. return ret;
  173. }
  174. static struct bpf_object *
  175. prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
  176. {
  177. struct bpf_object *obj;
  178. obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name);
  179. if (IS_ERR(obj)) {
  180. pr_debug("Compile BPF program failed.\n");
  181. return NULL;
  182. }
  183. return obj;
  184. }
  185. static int __test__bpf(int idx)
  186. {
  187. int ret;
  188. void *obj_buf;
  189. size_t obj_buf_sz;
  190. struct bpf_object *obj;
  191. ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
  192. bpf_testcase_table[idx].prog_id,
  193. true, NULL);
  194. if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
  195. pr_debug("Unable to get BPF object, %s\n",
  196. bpf_testcase_table[idx].msg_compile_fail);
  197. if (idx == 0)
  198. return TEST_SKIP;
  199. else
  200. return TEST_FAIL;
  201. }
  202. obj = prepare_bpf(obj_buf, obj_buf_sz,
  203. bpf_testcase_table[idx].name);
  204. if ((!!bpf_testcase_table[idx].target_func) != (!!obj)) {
  205. if (!obj)
  206. pr_debug("Fail to load BPF object: %s\n",
  207. bpf_testcase_table[idx].msg_load_fail);
  208. else
  209. pr_debug("Success unexpectedly: %s\n",
  210. bpf_testcase_table[idx].msg_load_fail);
  211. ret = TEST_FAIL;
  212. goto out;
  213. }
  214. if (obj) {
  215. ret = do_test(obj,
  216. bpf_testcase_table[idx].target_func,
  217. bpf_testcase_table[idx].expect_result);
  218. if (ret != TEST_OK)
  219. goto out;
  220. if (bpf_testcase_table[idx].pin) {
  221. int err;
  222. if (!bpf_fs__mount()) {
  223. pr_debug("BPF filesystem not mounted\n");
  224. ret = TEST_FAIL;
  225. goto out;
  226. }
  227. err = mkdir(PERF_TEST_BPF_PATH, 0777);
  228. if (err && errno != EEXIST) {
  229. pr_debug("Failed to make perf_test dir: %s\n",
  230. strerror(errno));
  231. ret = TEST_FAIL;
  232. goto out;
  233. }
  234. if (bpf_object__pin(obj, PERF_TEST_BPF_PATH))
  235. ret = TEST_FAIL;
  236. if (rm_rf(PERF_TEST_BPF_PATH))
  237. ret = TEST_FAIL;
  238. }
  239. }
  240. out:
  241. bpf__clear();
  242. return ret;
  243. }
  244. int test__bpf_subtest_get_nr(void)
  245. {
  246. return (int)ARRAY_SIZE(bpf_testcase_table);
  247. }
  248. const char *test__bpf_subtest_get_desc(int i)
  249. {
  250. if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
  251. return NULL;
  252. return bpf_testcase_table[i].desc;
  253. }
  254. static int check_env(void)
  255. {
  256. int err;
  257. unsigned int kver_int;
  258. char license[] = "GPL";
  259. struct bpf_insn insns[] = {
  260. BPF_MOV64_IMM(BPF_REG_0, 1),
  261. BPF_EXIT_INSN(),
  262. };
  263. err = fetch_kernel_version(&kver_int, NULL, 0);
  264. if (err) {
  265. pr_debug("Unable to get kernel version\n");
  266. return err;
  267. }
  268. err = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
  269. sizeof(insns) / sizeof(insns[0]),
  270. license, kver_int, NULL, 0);
  271. if (err < 0) {
  272. pr_err("Missing basic BPF support, skip this test: %s\n",
  273. strerror(errno));
  274. return err;
  275. }
  276. close(err);
  277. return 0;
  278. }
  279. int test__bpf(int i)
  280. {
  281. int err;
  282. if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
  283. return TEST_FAIL;
  284. if (geteuid() != 0) {
  285. pr_debug("Only root can run BPF test\n");
  286. return TEST_SKIP;
  287. }
  288. if (check_env())
  289. return TEST_SKIP;
  290. err = __test__bpf(i);
  291. return err;
  292. }
  293. #else
  294. int test__bpf_subtest_get_nr(void)
  295. {
  296. return 0;
  297. }
  298. const char *test__bpf_subtest_get_desc(int i __maybe_unused)
  299. {
  300. return NULL;
  301. }
  302. int test__bpf(int i __maybe_unused)
  303. {
  304. pr_debug("Skip BPF test because BPF support is not compiled\n");
  305. return TEST_SKIP;
  306. }
  307. #endif