test_progs.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769
  1. /* Copyright (c) 2017 Facebook
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. */
  7. #include <stdio.h>
  8. #include <unistd.h>
  9. #include <errno.h>
  10. #include <string.h>
  11. #include <assert.h>
  12. #include <stdlib.h>
  13. #include <time.h>
  14. #include <linux/types.h>
  15. typedef __u16 __sum16;
  16. #include <arpa/inet.h>
  17. #include <linux/if_ether.h>
  18. #include <linux/if_packet.h>
  19. #include <linux/ip.h>
  20. #include <linux/ipv6.h>
  21. #include <linux/tcp.h>
  22. #include <linux/filter.h>
  23. #include <linux/perf_event.h>
  24. #include <linux/unistd.h>
  25. #include <sys/ioctl.h>
  26. #include <sys/wait.h>
  27. #include <sys/resource.h>
  28. #include <sys/types.h>
  29. #include <fcntl.h>
  30. #include <linux/bpf.h>
  31. #include <linux/err.h>
  32. #include <bpf/bpf.h>
  33. #include <bpf/libbpf.h>
  34. #include "test_iptunnel_common.h"
  35. #include "bpf_util.h"
  36. #include "bpf_endian.h"
  37. static int error_cnt, pass_cnt;
  38. #define MAGIC_BYTES 123
  39. /* ipv4 test vector */
  40. static struct {
  41. struct ethhdr eth;
  42. struct iphdr iph;
  43. struct tcphdr tcp;
  44. } __packed pkt_v4 = {
  45. .eth.h_proto = bpf_htons(ETH_P_IP),
  46. .iph.ihl = 5,
  47. .iph.protocol = 6,
  48. .iph.tot_len = bpf_htons(MAGIC_BYTES),
  49. .tcp.urg_ptr = 123,
  50. };
  51. /* ipv6 test vector */
  52. static struct {
  53. struct ethhdr eth;
  54. struct ipv6hdr iph;
  55. struct tcphdr tcp;
  56. } __packed pkt_v6 = {
  57. .eth.h_proto = bpf_htons(ETH_P_IPV6),
  58. .iph.nexthdr = 6,
  59. .iph.payload_len = bpf_htons(MAGIC_BYTES),
  60. .tcp.urg_ptr = 123,
  61. };
  62. #define CHECK(condition, tag, format...) ({ \
  63. int __ret = !!(condition); \
  64. if (__ret) { \
  65. error_cnt++; \
  66. printf("%s:FAIL:%s ", __func__, tag); \
  67. printf(format); \
  68. } else { \
  69. pass_cnt++; \
  70. printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
  71. } \
  72. __ret; \
  73. })
  74. static int bpf_find_map(const char *test, struct bpf_object *obj,
  75. const char *name)
  76. {
  77. struct bpf_map *map;
  78. map = bpf_object__find_map_by_name(obj, name);
  79. if (!map) {
  80. printf("%s:FAIL:map '%s' not found\n", test, name);
  81. error_cnt++;
  82. return -1;
  83. }
  84. return bpf_map__fd(map);
  85. }
  86. static void test_pkt_access(void)
  87. {
  88. const char *file = "./test_pkt_access.o";
  89. struct bpf_object *obj;
  90. __u32 duration, retval;
  91. int err, prog_fd;
  92. err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
  93. if (err) {
  94. error_cnt++;
  95. return;
  96. }
  97. err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
  98. NULL, NULL, &retval, &duration);
  99. CHECK(err || errno || retval, "ipv4",
  100. "err %d errno %d retval %d duration %d\n",
  101. err, errno, retval, duration);
  102. err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
  103. NULL, NULL, &retval, &duration);
  104. CHECK(err || errno || retval, "ipv6",
  105. "err %d errno %d retval %d duration %d\n",
  106. err, errno, retval, duration);
  107. bpf_object__close(obj);
  108. }
  109. static void test_xdp(void)
  110. {
  111. struct vip key4 = {.protocol = 6, .family = AF_INET};
  112. struct vip key6 = {.protocol = 6, .family = AF_INET6};
  113. struct iptnl_info value4 = {.family = AF_INET};
  114. struct iptnl_info value6 = {.family = AF_INET6};
  115. const char *file = "./test_xdp.o";
  116. struct bpf_object *obj;
  117. char buf[128];
  118. struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
  119. struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
  120. __u32 duration, retval, size;
  121. int err, prog_fd, map_fd;
  122. err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
  123. if (err) {
  124. error_cnt++;
  125. return;
  126. }
  127. map_fd = bpf_find_map(__func__, obj, "vip2tnl");
  128. if (map_fd < 0)
  129. goto out;
  130. bpf_map_update_elem(map_fd, &key4, &value4, 0);
  131. bpf_map_update_elem(map_fd, &key6, &value6, 0);
  132. err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
  133. buf, &size, &retval, &duration);
  134. CHECK(err || errno || retval != XDP_TX || size != 74 ||
  135. iph->protocol != IPPROTO_IPIP, "ipv4",
  136. "err %d errno %d retval %d size %d\n",
  137. err, errno, retval, size);
  138. err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
  139. buf, &size, &retval, &duration);
  140. CHECK(err || errno || retval != XDP_TX || size != 114 ||
  141. iph6->nexthdr != IPPROTO_IPV6, "ipv6",
  142. "err %d errno %d retval %d size %d\n",
  143. err, errno, retval, size);
  144. out:
  145. bpf_object__close(obj);
  146. }
  147. #define MAGIC_VAL 0x1234
  148. #define NUM_ITER 100000
  149. #define VIP_NUM 5
  150. static void test_l4lb(void)
  151. {
  152. unsigned int nr_cpus = bpf_num_possible_cpus();
  153. const char *file = "./test_l4lb.o";
  154. struct vip key = {.protocol = 6};
  155. struct vip_meta {
  156. __u32 flags;
  157. __u32 vip_num;
  158. } value = {.vip_num = VIP_NUM};
  159. __u32 stats_key = VIP_NUM;
  160. struct vip_stats {
  161. __u64 bytes;
  162. __u64 pkts;
  163. } stats[nr_cpus];
  164. struct real_definition {
  165. union {
  166. __be32 dst;
  167. __be32 dstv6[4];
  168. };
  169. __u8 flags;
  170. } real_def = {.dst = MAGIC_VAL};
  171. __u32 ch_key = 11, real_num = 3;
  172. __u32 duration, retval, size;
  173. int err, i, prog_fd, map_fd;
  174. __u64 bytes = 0, pkts = 0;
  175. struct bpf_object *obj;
  176. char buf[128];
  177. u32 *magic = (u32 *)buf;
  178. err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
  179. if (err) {
  180. error_cnt++;
  181. return;
  182. }
  183. map_fd = bpf_find_map(__func__, obj, "vip_map");
  184. if (map_fd < 0)
  185. goto out;
  186. bpf_map_update_elem(map_fd, &key, &value, 0);
  187. map_fd = bpf_find_map(__func__, obj, "ch_rings");
  188. if (map_fd < 0)
  189. goto out;
  190. bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
  191. map_fd = bpf_find_map(__func__, obj, "reals");
  192. if (map_fd < 0)
  193. goto out;
  194. bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
  195. err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
  196. buf, &size, &retval, &duration);
  197. CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
  198. *magic != MAGIC_VAL, "ipv4",
  199. "err %d errno %d retval %d size %d magic %x\n",
  200. err, errno, retval, size, *magic);
  201. err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
  202. buf, &size, &retval, &duration);
  203. CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
  204. *magic != MAGIC_VAL, "ipv6",
  205. "err %d errno %d retval %d size %d magic %x\n",
  206. err, errno, retval, size, *magic);
  207. map_fd = bpf_find_map(__func__, obj, "stats");
  208. if (map_fd < 0)
  209. goto out;
  210. bpf_map_lookup_elem(map_fd, &stats_key, stats);
  211. for (i = 0; i < nr_cpus; i++) {
  212. bytes += stats[i].bytes;
  213. pkts += stats[i].pkts;
  214. }
  215. if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
  216. error_cnt++;
  217. printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
  218. }
  219. out:
  220. bpf_object__close(obj);
  221. }
  222. static void test_tcp_estats(void)
  223. {
  224. const char *file = "./test_tcp_estats.o";
  225. int err, prog_fd;
  226. struct bpf_object *obj;
  227. __u32 duration = 0;
  228. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
  229. CHECK(err, "", "err %d errno %d\n", err, errno);
  230. if (err) {
  231. error_cnt++;
  232. return;
  233. }
  234. bpf_object__close(obj);
  235. }
  236. static inline __u64 ptr_to_u64(const void *ptr)
  237. {
  238. return (__u64) (unsigned long) ptr;
  239. }
  240. static void test_bpf_obj_id(void)
  241. {
  242. const __u64 array_magic_value = 0xfaceb00c;
  243. const __u32 array_key = 0;
  244. const int nr_iters = 2;
  245. const char *file = "./test_obj_id.o";
  246. const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
  247. const char *expected_prog_name = "test_obj_id";
  248. const char *expected_map_name = "test_map_id";
  249. const __u64 nsec_per_sec = 1000000000;
  250. struct bpf_object *objs[nr_iters];
  251. int prog_fds[nr_iters], map_fds[nr_iters];
  252. /* +1 to test for the info_len returned by kernel */
  253. struct bpf_prog_info prog_infos[nr_iters + 1];
  254. struct bpf_map_info map_infos[nr_iters + 1];
  255. /* Each prog only uses one map. +1 to test nr_map_ids
  256. * returned by kernel.
  257. */
  258. __u32 map_ids[nr_iters + 1];
  259. char jited_insns[128], xlated_insns[128], zeros[128];
  260. __u32 i, next_id, info_len, nr_id_found, duration = 0;
  261. struct timespec real_time_ts, boot_time_ts;
  262. int sysctl_fd, jit_enabled = 0, err = 0;
  263. __u64 array_value;
  264. uid_t my_uid = getuid();
  265. time_t now, load_time;
  266. sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
  267. if (sysctl_fd != -1) {
  268. char tmpc;
  269. if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
  270. jit_enabled = (tmpc != '0');
  271. close(sysctl_fd);
  272. }
  273. err = bpf_prog_get_fd_by_id(0);
  274. CHECK(err >= 0 || errno != ENOENT,
  275. "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
  276. err = bpf_map_get_fd_by_id(0);
  277. CHECK(err >= 0 || errno != ENOENT,
  278. "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
  279. for (i = 0; i < nr_iters; i++)
  280. objs[i] = NULL;
  281. /* Check bpf_obj_get_info_by_fd() */
  282. bzero(zeros, sizeof(zeros));
  283. for (i = 0; i < nr_iters; i++) {
  284. now = time(NULL);
  285. err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
  286. &objs[i], &prog_fds[i]);
  287. /* test_obj_id.o is a dumb prog. It should never fail
  288. * to load.
  289. */
  290. if (err)
  291. error_cnt++;
  292. assert(!err);
  293. /* Insert a magic value to the map */
  294. map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
  295. assert(map_fds[i] >= 0);
  296. err = bpf_map_update_elem(map_fds[i], &array_key,
  297. &array_magic_value, 0);
  298. assert(!err);
  299. /* Check getting map info */
  300. info_len = sizeof(struct bpf_map_info) * 2;
  301. bzero(&map_infos[i], info_len);
  302. err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
  303. &info_len);
  304. if (CHECK(err ||
  305. map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
  306. map_infos[i].key_size != sizeof(__u32) ||
  307. map_infos[i].value_size != sizeof(__u64) ||
  308. map_infos[i].max_entries != 1 ||
  309. map_infos[i].map_flags != 0 ||
  310. info_len != sizeof(struct bpf_map_info) ||
  311. strcmp((char *)map_infos[i].name, expected_map_name),
  312. "get-map-info(fd)",
  313. "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
  314. err, errno,
  315. map_infos[i].type, BPF_MAP_TYPE_ARRAY,
  316. info_len, sizeof(struct bpf_map_info),
  317. map_infos[i].key_size,
  318. map_infos[i].value_size,
  319. map_infos[i].max_entries,
  320. map_infos[i].map_flags,
  321. map_infos[i].name, expected_map_name))
  322. goto done;
  323. /* Check getting prog info */
  324. info_len = sizeof(struct bpf_prog_info) * 2;
  325. bzero(&prog_infos[i], info_len);
  326. bzero(jited_insns, sizeof(jited_insns));
  327. bzero(xlated_insns, sizeof(xlated_insns));
  328. prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
  329. prog_infos[i].jited_prog_len = sizeof(jited_insns);
  330. prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
  331. prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
  332. prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
  333. prog_infos[i].nr_map_ids = 2;
  334. err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
  335. assert(!err);
  336. err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
  337. assert(!err);
  338. err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
  339. &info_len);
  340. load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
  341. + (prog_infos[i].load_time / nsec_per_sec);
  342. if (CHECK(err ||
  343. prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
  344. info_len != sizeof(struct bpf_prog_info) ||
  345. (jit_enabled && !prog_infos[i].jited_prog_len) ||
  346. (jit_enabled &&
  347. !memcmp(jited_insns, zeros, sizeof(zeros))) ||
  348. !prog_infos[i].xlated_prog_len ||
  349. !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
  350. load_time < now - 60 || load_time > now + 60 ||
  351. prog_infos[i].created_by_uid != my_uid ||
  352. prog_infos[i].nr_map_ids != 1 ||
  353. *(int *)prog_infos[i].map_ids != map_infos[i].id ||
  354. strcmp((char *)prog_infos[i].name, expected_prog_name),
  355. "get-prog-info(fd)",
  356. "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
  357. err, errno, i,
  358. prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
  359. info_len, sizeof(struct bpf_prog_info),
  360. jit_enabled,
  361. prog_infos[i].jited_prog_len,
  362. prog_infos[i].xlated_prog_len,
  363. !!memcmp(jited_insns, zeros, sizeof(zeros)),
  364. !!memcmp(xlated_insns, zeros, sizeof(zeros)),
  365. load_time, now,
  366. prog_infos[i].created_by_uid, my_uid,
  367. prog_infos[i].nr_map_ids, 1,
  368. *(int *)prog_infos[i].map_ids, map_infos[i].id,
  369. prog_infos[i].name, expected_prog_name))
  370. goto done;
  371. }
  372. /* Check bpf_prog_get_next_id() */
  373. nr_id_found = 0;
  374. next_id = 0;
  375. while (!bpf_prog_get_next_id(next_id, &next_id)) {
  376. struct bpf_prog_info prog_info = {};
  377. __u32 saved_map_id;
  378. int prog_fd;
  379. info_len = sizeof(prog_info);
  380. prog_fd = bpf_prog_get_fd_by_id(next_id);
  381. if (prog_fd < 0 && errno == ENOENT)
  382. /* The bpf_prog is in the dead row */
  383. continue;
  384. if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
  385. "prog_fd %d next_id %d errno %d\n",
  386. prog_fd, next_id, errno))
  387. break;
  388. for (i = 0; i < nr_iters; i++)
  389. if (prog_infos[i].id == next_id)
  390. break;
  391. if (i == nr_iters)
  392. continue;
  393. nr_id_found++;
  394. /* Negative test:
  395. * prog_info.nr_map_ids = 1
  396. * prog_info.map_ids = NULL
  397. */
  398. prog_info.nr_map_ids = 1;
  399. err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
  400. if (CHECK(!err || errno != EFAULT,
  401. "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
  402. err, errno, EFAULT))
  403. break;
  404. bzero(&prog_info, sizeof(prog_info));
  405. info_len = sizeof(prog_info);
  406. saved_map_id = *(int *)(prog_infos[i].map_ids);
  407. prog_info.map_ids = prog_infos[i].map_ids;
  408. prog_info.nr_map_ids = 2;
  409. err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
  410. prog_infos[i].jited_prog_insns = 0;
  411. prog_infos[i].xlated_prog_insns = 0;
  412. CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
  413. memcmp(&prog_info, &prog_infos[i], info_len) ||
  414. *(int *)prog_info.map_ids != saved_map_id,
  415. "get-prog-info(next_id->fd)",
  416. "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n",
  417. err, errno, info_len, sizeof(struct bpf_prog_info),
  418. memcmp(&prog_info, &prog_infos[i], info_len),
  419. *(int *)prog_info.map_ids, saved_map_id);
  420. close(prog_fd);
  421. }
  422. CHECK(nr_id_found != nr_iters,
  423. "check total prog id found by get_next_id",
  424. "nr_id_found %u(%u)\n",
  425. nr_id_found, nr_iters);
  426. /* Check bpf_map_get_next_id() */
  427. nr_id_found = 0;
  428. next_id = 0;
  429. while (!bpf_map_get_next_id(next_id, &next_id)) {
  430. struct bpf_map_info map_info = {};
  431. int map_fd;
  432. info_len = sizeof(map_info);
  433. map_fd = bpf_map_get_fd_by_id(next_id);
  434. if (map_fd < 0 && errno == ENOENT)
  435. /* The bpf_map is in the dead row */
  436. continue;
  437. if (CHECK(map_fd < 0, "get-map-fd(next_id)",
  438. "map_fd %d next_id %u errno %d\n",
  439. map_fd, next_id, errno))
  440. break;
  441. for (i = 0; i < nr_iters; i++)
  442. if (map_infos[i].id == next_id)
  443. break;
  444. if (i == nr_iters)
  445. continue;
  446. nr_id_found++;
  447. err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
  448. assert(!err);
  449. err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
  450. CHECK(err || info_len != sizeof(struct bpf_map_info) ||
  451. memcmp(&map_info, &map_infos[i], info_len) ||
  452. array_value != array_magic_value,
  453. "check get-map-info(next_id->fd)",
  454. "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n",
  455. err, errno, info_len, sizeof(struct bpf_map_info),
  456. memcmp(&map_info, &map_infos[i], info_len),
  457. array_value, array_magic_value);
  458. close(map_fd);
  459. }
  460. CHECK(nr_id_found != nr_iters,
  461. "check total map id found by get_next_id",
  462. "nr_id_found %u(%u)\n",
  463. nr_id_found, nr_iters);
  464. done:
  465. for (i = 0; i < nr_iters; i++)
  466. bpf_object__close(objs[i]);
  467. }
  468. static void test_pkt_md_access(void)
  469. {
  470. const char *file = "./test_pkt_md_access.o";
  471. struct bpf_object *obj;
  472. __u32 duration, retval;
  473. int err, prog_fd;
  474. err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
  475. if (err) {
  476. error_cnt++;
  477. return;
  478. }
  479. err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
  480. NULL, NULL, &retval, &duration);
  481. CHECK(err || retval, "",
  482. "err %d errno %d retval %d duration %d\n",
  483. err, errno, retval, duration);
  484. bpf_object__close(obj);
  485. }
  486. static void test_obj_name(void)
  487. {
  488. struct {
  489. const char *name;
  490. int success;
  491. int expected_errno;
  492. } tests[] = {
  493. { "", 1, 0 },
  494. { "_123456789ABCDE", 1, 0 },
  495. { "_123456789ABCDEF", 0, EINVAL },
  496. { "_123456789ABCD\n", 0, EINVAL },
  497. };
  498. struct bpf_insn prog[] = {
  499. BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
  500. BPF_EXIT_INSN(),
  501. };
  502. __u32 duration = 0;
  503. int i;
  504. for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
  505. size_t name_len = strlen(tests[i].name) + 1;
  506. union bpf_attr attr;
  507. size_t ncopy;
  508. int fd;
  509. /* test different attr.prog_name during BPF_PROG_LOAD */
  510. ncopy = name_len < sizeof(attr.prog_name) ?
  511. name_len : sizeof(attr.prog_name);
  512. bzero(&attr, sizeof(attr));
  513. attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
  514. attr.insn_cnt = 2;
  515. attr.insns = ptr_to_u64(prog);
  516. attr.license = ptr_to_u64("");
  517. memcpy(attr.prog_name, tests[i].name, ncopy);
  518. fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
  519. CHECK((tests[i].success && fd < 0) ||
  520. (!tests[i].success && fd != -1) ||
  521. (!tests[i].success && errno != tests[i].expected_errno),
  522. "check-bpf-prog-name",
  523. "fd %d(%d) errno %d(%d)\n",
  524. fd, tests[i].success, errno, tests[i].expected_errno);
  525. if (fd != -1)
  526. close(fd);
  527. /* test different attr.map_name during BPF_MAP_CREATE */
  528. ncopy = name_len < sizeof(attr.map_name) ?
  529. name_len : sizeof(attr.map_name);
  530. bzero(&attr, sizeof(attr));
  531. attr.map_type = BPF_MAP_TYPE_ARRAY;
  532. attr.key_size = 4;
  533. attr.value_size = 4;
  534. attr.max_entries = 1;
  535. attr.map_flags = 0;
  536. memcpy(attr.map_name, tests[i].name, ncopy);
  537. fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
  538. CHECK((tests[i].success && fd < 0) ||
  539. (!tests[i].success && fd != -1) ||
  540. (!tests[i].success && errno != tests[i].expected_errno),
  541. "check-bpf-map-name",
  542. "fd %d(%d) errno %d(%d)\n",
  543. fd, tests[i].success, errno, tests[i].expected_errno);
  544. if (fd != -1)
  545. close(fd);
  546. }
  547. }
  548. static void test_tp_attach_query(void)
  549. {
  550. const int num_progs = 3;
  551. int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
  552. __u32 duration = 0, info_len, saved_prog_ids[num_progs];
  553. const char *file = "./test_tracepoint.o";
  554. struct perf_event_query_bpf *query;
  555. struct perf_event_attr attr = {};
  556. struct bpf_object *obj[num_progs];
  557. struct bpf_prog_info prog_info;
  558. char buf[256];
  559. snprintf(buf, sizeof(buf),
  560. "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
  561. efd = open(buf, O_RDONLY, 0);
  562. if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
  563. return;
  564. bytes = read(efd, buf, sizeof(buf));
  565. close(efd);
  566. if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
  567. "read", "bytes %d errno %d\n", bytes, errno))
  568. return;
  569. attr.config = strtol(buf, NULL, 0);
  570. attr.type = PERF_TYPE_TRACEPOINT;
  571. attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
  572. attr.sample_period = 1;
  573. attr.wakeup_events = 1;
  574. query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
  575. for (i = 0; i < num_progs; i++) {
  576. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
  577. &prog_fd[i]);
  578. if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
  579. goto cleanup1;
  580. bzero(&prog_info, sizeof(prog_info));
  581. prog_info.jited_prog_len = 0;
  582. prog_info.xlated_prog_len = 0;
  583. prog_info.nr_map_ids = 0;
  584. info_len = sizeof(prog_info);
  585. err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
  586. if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
  587. err, errno))
  588. goto cleanup1;
  589. saved_prog_ids[i] = prog_info.id;
  590. pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
  591. 0 /* cpu 0 */, -1 /* group id */,
  592. 0 /* flags */);
  593. if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
  594. pmu_fd[i], errno))
  595. goto cleanup2;
  596. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
  597. if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
  598. err, errno))
  599. goto cleanup3;
  600. if (i == 0) {
  601. /* check NULL prog array query */
  602. query->ids_len = num_progs;
  603. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  604. if (CHECK(err || query->prog_cnt != 0,
  605. "perf_event_ioc_query_bpf",
  606. "err %d errno %d query->prog_cnt %u\n",
  607. err, errno, query->prog_cnt))
  608. goto cleanup3;
  609. }
  610. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
  611. if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
  612. err, errno))
  613. goto cleanup3;
  614. if (i == 1) {
  615. /* try to get # of programs only */
  616. query->ids_len = 0;
  617. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  618. if (CHECK(err || query->prog_cnt != 2,
  619. "perf_event_ioc_query_bpf",
  620. "err %d errno %d query->prog_cnt %u\n",
  621. err, errno, query->prog_cnt))
  622. goto cleanup3;
  623. /* try a few negative tests */
  624. /* invalid query pointer */
  625. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
  626. (struct perf_event_query_bpf *)0x1);
  627. if (CHECK(!err || errno != EFAULT,
  628. "perf_event_ioc_query_bpf",
  629. "err %d errno %d\n", err, errno))
  630. goto cleanup3;
  631. /* no enough space */
  632. query->ids_len = 1;
  633. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  634. if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
  635. "perf_event_ioc_query_bpf",
  636. "err %d errno %d query->prog_cnt %u\n",
  637. err, errno, query->prog_cnt))
  638. goto cleanup3;
  639. }
  640. query->ids_len = num_progs;
  641. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  642. if (CHECK(err || query->prog_cnt != (i + 1),
  643. "perf_event_ioc_query_bpf",
  644. "err %d errno %d query->prog_cnt %u\n",
  645. err, errno, query->prog_cnt))
  646. goto cleanup3;
  647. for (j = 0; j < i + 1; j++)
  648. if (CHECK(saved_prog_ids[j] != query->ids[j],
  649. "perf_event_ioc_query_bpf",
  650. "#%d saved_prog_id %x query prog_id %x\n",
  651. j, saved_prog_ids[j], query->ids[j]))
  652. goto cleanup3;
  653. }
  654. i = num_progs - 1;
  655. for (; i >= 0; i--) {
  656. cleanup3:
  657. ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
  658. cleanup2:
  659. close(pmu_fd[i]);
  660. cleanup1:
  661. bpf_object__close(obj[i]);
  662. }
  663. free(query);
  664. }
  665. int main(void)
  666. {
  667. struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
  668. setrlimit(RLIMIT_MEMLOCK, &rinf);
  669. test_pkt_access();
  670. test_xdp();
  671. test_l4lb();
  672. test_tcp_estats();
  673. test_bpf_obj_id();
  674. test_pkt_md_access();
  675. test_obj_name();
  676. test_tp_attach_query();
  677. printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
  678. return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
  679. }