test_progs.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225
  1. /* Copyright (c) 2017 Facebook
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. */
  7. #include <stdio.h>
  8. #include <unistd.h>
  9. #include <errno.h>
  10. #include <string.h>
  11. #include <assert.h>
  12. #include <stdlib.h>
  13. #include <time.h>
  14. #include <linux/types.h>
  15. typedef __u16 __sum16;
  16. #include <arpa/inet.h>
  17. #include <linux/if_ether.h>
  18. #include <linux/if_packet.h>
  19. #include <linux/ip.h>
  20. #include <linux/ipv6.h>
  21. #include <linux/tcp.h>
  22. #include <linux/filter.h>
  23. #include <linux/perf_event.h>
  24. #include <linux/unistd.h>
  25. #include <sys/ioctl.h>
  26. #include <sys/wait.h>
  27. #include <sys/types.h>
  28. #include <fcntl.h>
  29. #include <linux/bpf.h>
  30. #include <linux/err.h>
  31. #include <bpf/bpf.h>
  32. #include <bpf/libbpf.h>
  33. #include "test_iptunnel_common.h"
  34. #include "bpf_util.h"
  35. #include "bpf_endian.h"
  36. #include "bpf_rlimit.h"
  37. static int error_cnt, pass_cnt;
  38. #define MAGIC_BYTES 123
  39. /* ipv4 test vector */
  40. static struct {
  41. struct ethhdr eth;
  42. struct iphdr iph;
  43. struct tcphdr tcp;
  44. } __packed pkt_v4 = {
  45. .eth.h_proto = bpf_htons(ETH_P_IP),
  46. .iph.ihl = 5,
  47. .iph.protocol = 6,
  48. .iph.tot_len = bpf_htons(MAGIC_BYTES),
  49. .tcp.urg_ptr = 123,
  50. };
  51. /* ipv6 test vector */
  52. static struct {
  53. struct ethhdr eth;
  54. struct ipv6hdr iph;
  55. struct tcphdr tcp;
  56. } __packed pkt_v6 = {
  57. .eth.h_proto = bpf_htons(ETH_P_IPV6),
  58. .iph.nexthdr = 6,
  59. .iph.payload_len = bpf_htons(MAGIC_BYTES),
  60. .tcp.urg_ptr = 123,
  61. };
  62. #define CHECK(condition, tag, format...) ({ \
  63. int __ret = !!(condition); \
  64. if (__ret) { \
  65. error_cnt++; \
  66. printf("%s:FAIL:%s ", __func__, tag); \
  67. printf(format); \
  68. } else { \
  69. pass_cnt++; \
  70. printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
  71. } \
  72. __ret; \
  73. })
  74. static int bpf_find_map(const char *test, struct bpf_object *obj,
  75. const char *name)
  76. {
  77. struct bpf_map *map;
  78. map = bpf_object__find_map_by_name(obj, name);
  79. if (!map) {
  80. printf("%s:FAIL:map '%s' not found\n", test, name);
  81. error_cnt++;
  82. return -1;
  83. }
  84. return bpf_map__fd(map);
  85. }
  86. static void test_pkt_access(void)
  87. {
  88. const char *file = "./test_pkt_access.o";
  89. struct bpf_object *obj;
  90. __u32 duration, retval;
  91. int err, prog_fd;
  92. err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
  93. if (err) {
  94. error_cnt++;
  95. return;
  96. }
  97. err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
  98. NULL, NULL, &retval, &duration);
  99. CHECK(err || errno || retval, "ipv4",
  100. "err %d errno %d retval %d duration %d\n",
  101. err, errno, retval, duration);
  102. err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
  103. NULL, NULL, &retval, &duration);
  104. CHECK(err || errno || retval, "ipv6",
  105. "err %d errno %d retval %d duration %d\n",
  106. err, errno, retval, duration);
  107. bpf_object__close(obj);
  108. }
  109. static void test_xdp(void)
  110. {
  111. struct vip key4 = {.protocol = 6, .family = AF_INET};
  112. struct vip key6 = {.protocol = 6, .family = AF_INET6};
  113. struct iptnl_info value4 = {.family = AF_INET};
  114. struct iptnl_info value6 = {.family = AF_INET6};
  115. const char *file = "./test_xdp.o";
  116. struct bpf_object *obj;
  117. char buf[128];
  118. struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
  119. struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
  120. __u32 duration, retval, size;
  121. int err, prog_fd, map_fd;
  122. err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
  123. if (err) {
  124. error_cnt++;
  125. return;
  126. }
  127. map_fd = bpf_find_map(__func__, obj, "vip2tnl");
  128. if (map_fd < 0)
  129. goto out;
  130. bpf_map_update_elem(map_fd, &key4, &value4, 0);
  131. bpf_map_update_elem(map_fd, &key6, &value6, 0);
  132. err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
  133. buf, &size, &retval, &duration);
  134. CHECK(err || errno || retval != XDP_TX || size != 74 ||
  135. iph->protocol != IPPROTO_IPIP, "ipv4",
  136. "err %d errno %d retval %d size %d\n",
  137. err, errno, retval, size);
  138. err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
  139. buf, &size, &retval, &duration);
  140. CHECK(err || errno || retval != XDP_TX || size != 114 ||
  141. iph6->nexthdr != IPPROTO_IPV6, "ipv6",
  142. "err %d errno %d retval %d size %d\n",
  143. err, errno, retval, size);
  144. out:
  145. bpf_object__close(obj);
  146. }
  147. static void test_xdp_adjust_tail(void)
  148. {
  149. const char *file = "./test_adjust_tail.o";
  150. struct bpf_object *obj;
  151. char buf[128];
  152. __u32 duration, retval, size;
  153. int err, prog_fd;
  154. err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
  155. if (err) {
  156. error_cnt++;
  157. return;
  158. }
  159. err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
  160. buf, &size, &retval, &duration);
  161. CHECK(err || errno || retval != XDP_DROP,
  162. "ipv4", "err %d errno %d retval %d size %d\n",
  163. err, errno, retval, size);
  164. err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
  165. buf, &size, &retval, &duration);
  166. CHECK(err || errno || retval != XDP_TX || size != 54,
  167. "ipv6", "err %d errno %d retval %d size %d\n",
  168. err, errno, retval, size);
  169. bpf_object__close(obj);
  170. }
  171. #define MAGIC_VAL 0x1234
  172. #define NUM_ITER 100000
  173. #define VIP_NUM 5
  174. static void test_l4lb(const char *file)
  175. {
  176. unsigned int nr_cpus = bpf_num_possible_cpus();
  177. struct vip key = {.protocol = 6};
  178. struct vip_meta {
  179. __u32 flags;
  180. __u32 vip_num;
  181. } value = {.vip_num = VIP_NUM};
  182. __u32 stats_key = VIP_NUM;
  183. struct vip_stats {
  184. __u64 bytes;
  185. __u64 pkts;
  186. } stats[nr_cpus];
  187. struct real_definition {
  188. union {
  189. __be32 dst;
  190. __be32 dstv6[4];
  191. };
  192. __u8 flags;
  193. } real_def = {.dst = MAGIC_VAL};
  194. __u32 ch_key = 11, real_num = 3;
  195. __u32 duration, retval, size;
  196. int err, i, prog_fd, map_fd;
  197. __u64 bytes = 0, pkts = 0;
  198. struct bpf_object *obj;
  199. char buf[128];
  200. u32 *magic = (u32 *)buf;
  201. err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
  202. if (err) {
  203. error_cnt++;
  204. return;
  205. }
  206. map_fd = bpf_find_map(__func__, obj, "vip_map");
  207. if (map_fd < 0)
  208. goto out;
  209. bpf_map_update_elem(map_fd, &key, &value, 0);
  210. map_fd = bpf_find_map(__func__, obj, "ch_rings");
  211. if (map_fd < 0)
  212. goto out;
  213. bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
  214. map_fd = bpf_find_map(__func__, obj, "reals");
  215. if (map_fd < 0)
  216. goto out;
  217. bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
  218. err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
  219. buf, &size, &retval, &duration);
  220. CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
  221. *magic != MAGIC_VAL, "ipv4",
  222. "err %d errno %d retval %d size %d magic %x\n",
  223. err, errno, retval, size, *magic);
  224. err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
  225. buf, &size, &retval, &duration);
  226. CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
  227. *magic != MAGIC_VAL, "ipv6",
  228. "err %d errno %d retval %d size %d magic %x\n",
  229. err, errno, retval, size, *magic);
  230. map_fd = bpf_find_map(__func__, obj, "stats");
  231. if (map_fd < 0)
  232. goto out;
  233. bpf_map_lookup_elem(map_fd, &stats_key, stats);
  234. for (i = 0; i < nr_cpus; i++) {
  235. bytes += stats[i].bytes;
  236. pkts += stats[i].pkts;
  237. }
  238. if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
  239. error_cnt++;
  240. printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
  241. }
  242. out:
  243. bpf_object__close(obj);
  244. }
  245. static void test_l4lb_all(void)
  246. {
  247. const char *file1 = "./test_l4lb.o";
  248. const char *file2 = "./test_l4lb_noinline.o";
  249. test_l4lb(file1);
  250. test_l4lb(file2);
  251. }
  252. static void test_xdp_noinline(void)
  253. {
  254. const char *file = "./test_xdp_noinline.o";
  255. unsigned int nr_cpus = bpf_num_possible_cpus();
  256. struct vip key = {.protocol = 6};
  257. struct vip_meta {
  258. __u32 flags;
  259. __u32 vip_num;
  260. } value = {.vip_num = VIP_NUM};
  261. __u32 stats_key = VIP_NUM;
  262. struct vip_stats {
  263. __u64 bytes;
  264. __u64 pkts;
  265. } stats[nr_cpus];
  266. struct real_definition {
  267. union {
  268. __be32 dst;
  269. __be32 dstv6[4];
  270. };
  271. __u8 flags;
  272. } real_def = {.dst = MAGIC_VAL};
  273. __u32 ch_key = 11, real_num = 3;
  274. __u32 duration, retval, size;
  275. int err, i, prog_fd, map_fd;
  276. __u64 bytes = 0, pkts = 0;
  277. struct bpf_object *obj;
  278. char buf[128];
  279. u32 *magic = (u32 *)buf;
  280. err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
  281. if (err) {
  282. error_cnt++;
  283. return;
  284. }
  285. map_fd = bpf_find_map(__func__, obj, "vip_map");
  286. if (map_fd < 0)
  287. goto out;
  288. bpf_map_update_elem(map_fd, &key, &value, 0);
  289. map_fd = bpf_find_map(__func__, obj, "ch_rings");
  290. if (map_fd < 0)
  291. goto out;
  292. bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
  293. map_fd = bpf_find_map(__func__, obj, "reals");
  294. if (map_fd < 0)
  295. goto out;
  296. bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
  297. err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
  298. buf, &size, &retval, &duration);
  299. CHECK(err || errno || retval != 1 || size != 54 ||
  300. *magic != MAGIC_VAL, "ipv4",
  301. "err %d errno %d retval %d size %d magic %x\n",
  302. err, errno, retval, size, *magic);
  303. err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
  304. buf, &size, &retval, &duration);
  305. CHECK(err || errno || retval != 1 || size != 74 ||
  306. *magic != MAGIC_VAL, "ipv6",
  307. "err %d errno %d retval %d size %d magic %x\n",
  308. err, errno, retval, size, *magic);
  309. map_fd = bpf_find_map(__func__, obj, "stats");
  310. if (map_fd < 0)
  311. goto out;
  312. bpf_map_lookup_elem(map_fd, &stats_key, stats);
  313. for (i = 0; i < nr_cpus; i++) {
  314. bytes += stats[i].bytes;
  315. pkts += stats[i].pkts;
  316. }
  317. if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
  318. error_cnt++;
  319. printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
  320. }
  321. out:
  322. bpf_object__close(obj);
  323. }
  324. static void test_tcp_estats(void)
  325. {
  326. const char *file = "./test_tcp_estats.o";
  327. int err, prog_fd;
  328. struct bpf_object *obj;
  329. __u32 duration = 0;
  330. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
  331. CHECK(err, "", "err %d errno %d\n", err, errno);
  332. if (err) {
  333. error_cnt++;
  334. return;
  335. }
  336. bpf_object__close(obj);
  337. }
  338. static inline __u64 ptr_to_u64(const void *ptr)
  339. {
  340. return (__u64) (unsigned long) ptr;
  341. }
  342. static void test_bpf_obj_id(void)
  343. {
  344. const __u64 array_magic_value = 0xfaceb00c;
  345. const __u32 array_key = 0;
  346. const int nr_iters = 2;
  347. const char *file = "./test_obj_id.o";
  348. const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
  349. const char *expected_prog_name = "test_obj_id";
  350. const char *expected_map_name = "test_map_id";
  351. const __u64 nsec_per_sec = 1000000000;
  352. struct bpf_object *objs[nr_iters];
  353. int prog_fds[nr_iters], map_fds[nr_iters];
  354. /* +1 to test for the info_len returned by kernel */
  355. struct bpf_prog_info prog_infos[nr_iters + 1];
  356. struct bpf_map_info map_infos[nr_iters + 1];
  357. /* Each prog only uses one map. +1 to test nr_map_ids
  358. * returned by kernel.
  359. */
  360. __u32 map_ids[nr_iters + 1];
  361. char jited_insns[128], xlated_insns[128], zeros[128];
  362. __u32 i, next_id, info_len, nr_id_found, duration = 0;
  363. struct timespec real_time_ts, boot_time_ts;
  364. int sysctl_fd, jit_enabled = 0, err = 0;
  365. __u64 array_value;
  366. uid_t my_uid = getuid();
  367. time_t now, load_time;
  368. sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
  369. if (sysctl_fd != -1) {
  370. char tmpc;
  371. if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
  372. jit_enabled = (tmpc != '0');
  373. close(sysctl_fd);
  374. }
  375. err = bpf_prog_get_fd_by_id(0);
  376. CHECK(err >= 0 || errno != ENOENT,
  377. "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
  378. err = bpf_map_get_fd_by_id(0);
  379. CHECK(err >= 0 || errno != ENOENT,
  380. "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
  381. for (i = 0; i < nr_iters; i++)
  382. objs[i] = NULL;
  383. /* Check bpf_obj_get_info_by_fd() */
  384. bzero(zeros, sizeof(zeros));
  385. for (i = 0; i < nr_iters; i++) {
  386. now = time(NULL);
  387. err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
  388. &objs[i], &prog_fds[i]);
  389. /* test_obj_id.o is a dumb prog. It should never fail
  390. * to load.
  391. */
  392. if (err)
  393. error_cnt++;
  394. assert(!err);
  395. /* Insert a magic value to the map */
  396. map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
  397. assert(map_fds[i] >= 0);
  398. err = bpf_map_update_elem(map_fds[i], &array_key,
  399. &array_magic_value, 0);
  400. assert(!err);
  401. /* Check getting map info */
  402. info_len = sizeof(struct bpf_map_info) * 2;
  403. bzero(&map_infos[i], info_len);
  404. err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
  405. &info_len);
  406. if (CHECK(err ||
  407. map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
  408. map_infos[i].key_size != sizeof(__u32) ||
  409. map_infos[i].value_size != sizeof(__u64) ||
  410. map_infos[i].max_entries != 1 ||
  411. map_infos[i].map_flags != 0 ||
  412. info_len != sizeof(struct bpf_map_info) ||
  413. strcmp((char *)map_infos[i].name, expected_map_name),
  414. "get-map-info(fd)",
  415. "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
  416. err, errno,
  417. map_infos[i].type, BPF_MAP_TYPE_ARRAY,
  418. info_len, sizeof(struct bpf_map_info),
  419. map_infos[i].key_size,
  420. map_infos[i].value_size,
  421. map_infos[i].max_entries,
  422. map_infos[i].map_flags,
  423. map_infos[i].name, expected_map_name))
  424. goto done;
  425. /* Check getting prog info */
  426. info_len = sizeof(struct bpf_prog_info) * 2;
  427. bzero(&prog_infos[i], info_len);
  428. bzero(jited_insns, sizeof(jited_insns));
  429. bzero(xlated_insns, sizeof(xlated_insns));
  430. prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
  431. prog_infos[i].jited_prog_len = sizeof(jited_insns);
  432. prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
  433. prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
  434. prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
  435. prog_infos[i].nr_map_ids = 2;
  436. err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
  437. assert(!err);
  438. err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
  439. assert(!err);
  440. err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
  441. &info_len);
  442. load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
  443. + (prog_infos[i].load_time / nsec_per_sec);
  444. if (CHECK(err ||
  445. prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
  446. info_len != sizeof(struct bpf_prog_info) ||
  447. (jit_enabled && !prog_infos[i].jited_prog_len) ||
  448. (jit_enabled &&
  449. !memcmp(jited_insns, zeros, sizeof(zeros))) ||
  450. !prog_infos[i].xlated_prog_len ||
  451. !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
  452. load_time < now - 60 || load_time > now + 60 ||
  453. prog_infos[i].created_by_uid != my_uid ||
  454. prog_infos[i].nr_map_ids != 1 ||
  455. *(int *)prog_infos[i].map_ids != map_infos[i].id ||
  456. strcmp((char *)prog_infos[i].name, expected_prog_name),
  457. "get-prog-info(fd)",
  458. "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
  459. err, errno, i,
  460. prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
  461. info_len, sizeof(struct bpf_prog_info),
  462. jit_enabled,
  463. prog_infos[i].jited_prog_len,
  464. prog_infos[i].xlated_prog_len,
  465. !!memcmp(jited_insns, zeros, sizeof(zeros)),
  466. !!memcmp(xlated_insns, zeros, sizeof(zeros)),
  467. load_time, now,
  468. prog_infos[i].created_by_uid, my_uid,
  469. prog_infos[i].nr_map_ids, 1,
  470. *(int *)prog_infos[i].map_ids, map_infos[i].id,
  471. prog_infos[i].name, expected_prog_name))
  472. goto done;
  473. }
  474. /* Check bpf_prog_get_next_id() */
  475. nr_id_found = 0;
  476. next_id = 0;
  477. while (!bpf_prog_get_next_id(next_id, &next_id)) {
  478. struct bpf_prog_info prog_info = {};
  479. __u32 saved_map_id;
  480. int prog_fd;
  481. info_len = sizeof(prog_info);
  482. prog_fd = bpf_prog_get_fd_by_id(next_id);
  483. if (prog_fd < 0 && errno == ENOENT)
  484. /* The bpf_prog is in the dead row */
  485. continue;
  486. if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
  487. "prog_fd %d next_id %d errno %d\n",
  488. prog_fd, next_id, errno))
  489. break;
  490. for (i = 0; i < nr_iters; i++)
  491. if (prog_infos[i].id == next_id)
  492. break;
  493. if (i == nr_iters)
  494. continue;
  495. nr_id_found++;
  496. /* Negative test:
  497. * prog_info.nr_map_ids = 1
  498. * prog_info.map_ids = NULL
  499. */
  500. prog_info.nr_map_ids = 1;
  501. err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
  502. if (CHECK(!err || errno != EFAULT,
  503. "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
  504. err, errno, EFAULT))
  505. break;
  506. bzero(&prog_info, sizeof(prog_info));
  507. info_len = sizeof(prog_info);
  508. saved_map_id = *(int *)(prog_infos[i].map_ids);
  509. prog_info.map_ids = prog_infos[i].map_ids;
  510. prog_info.nr_map_ids = 2;
  511. err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
  512. prog_infos[i].jited_prog_insns = 0;
  513. prog_infos[i].xlated_prog_insns = 0;
  514. CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
  515. memcmp(&prog_info, &prog_infos[i], info_len) ||
  516. *(int *)prog_info.map_ids != saved_map_id,
  517. "get-prog-info(next_id->fd)",
  518. "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
  519. err, errno, info_len, sizeof(struct bpf_prog_info),
  520. memcmp(&prog_info, &prog_infos[i], info_len),
  521. *(int *)prog_info.map_ids, saved_map_id);
  522. close(prog_fd);
  523. }
  524. CHECK(nr_id_found != nr_iters,
  525. "check total prog id found by get_next_id",
  526. "nr_id_found %u(%u)\n",
  527. nr_id_found, nr_iters);
  528. /* Check bpf_map_get_next_id() */
  529. nr_id_found = 0;
  530. next_id = 0;
  531. while (!bpf_map_get_next_id(next_id, &next_id)) {
  532. struct bpf_map_info map_info = {};
  533. int map_fd;
  534. info_len = sizeof(map_info);
  535. map_fd = bpf_map_get_fd_by_id(next_id);
  536. if (map_fd < 0 && errno == ENOENT)
  537. /* The bpf_map is in the dead row */
  538. continue;
  539. if (CHECK(map_fd < 0, "get-map-fd(next_id)",
  540. "map_fd %d next_id %u errno %d\n",
  541. map_fd, next_id, errno))
  542. break;
  543. for (i = 0; i < nr_iters; i++)
  544. if (map_infos[i].id == next_id)
  545. break;
  546. if (i == nr_iters)
  547. continue;
  548. nr_id_found++;
  549. err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
  550. assert(!err);
  551. err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
  552. CHECK(err || info_len != sizeof(struct bpf_map_info) ||
  553. memcmp(&map_info, &map_infos[i], info_len) ||
  554. array_value != array_magic_value,
  555. "check get-map-info(next_id->fd)",
  556. "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
  557. err, errno, info_len, sizeof(struct bpf_map_info),
  558. memcmp(&map_info, &map_infos[i], info_len),
  559. array_value, array_magic_value);
  560. close(map_fd);
  561. }
  562. CHECK(nr_id_found != nr_iters,
  563. "check total map id found by get_next_id",
  564. "nr_id_found %u(%u)\n",
  565. nr_id_found, nr_iters);
  566. done:
  567. for (i = 0; i < nr_iters; i++)
  568. bpf_object__close(objs[i]);
  569. }
  570. static void test_pkt_md_access(void)
  571. {
  572. const char *file = "./test_pkt_md_access.o";
  573. struct bpf_object *obj;
  574. __u32 duration, retval;
  575. int err, prog_fd;
  576. err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
  577. if (err) {
  578. error_cnt++;
  579. return;
  580. }
  581. err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
  582. NULL, NULL, &retval, &duration);
  583. CHECK(err || retval, "",
  584. "err %d errno %d retval %d duration %d\n",
  585. err, errno, retval, duration);
  586. bpf_object__close(obj);
  587. }
  588. static void test_obj_name(void)
  589. {
  590. struct {
  591. const char *name;
  592. int success;
  593. int expected_errno;
  594. } tests[] = {
  595. { "", 1, 0 },
  596. { "_123456789ABCDE", 1, 0 },
  597. { "_123456789ABCDEF", 0, EINVAL },
  598. { "_123456789ABCD\n", 0, EINVAL },
  599. };
  600. struct bpf_insn prog[] = {
  601. BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
  602. BPF_EXIT_INSN(),
  603. };
  604. __u32 duration = 0;
  605. int i;
  606. for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
  607. size_t name_len = strlen(tests[i].name) + 1;
  608. union bpf_attr attr;
  609. size_t ncopy;
  610. int fd;
  611. /* test different attr.prog_name during BPF_PROG_LOAD */
  612. ncopy = name_len < sizeof(attr.prog_name) ?
  613. name_len : sizeof(attr.prog_name);
  614. bzero(&attr, sizeof(attr));
  615. attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
  616. attr.insn_cnt = 2;
  617. attr.insns = ptr_to_u64(prog);
  618. attr.license = ptr_to_u64("");
  619. memcpy(attr.prog_name, tests[i].name, ncopy);
  620. fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
  621. CHECK((tests[i].success && fd < 0) ||
  622. (!tests[i].success && fd != -1) ||
  623. (!tests[i].success && errno != tests[i].expected_errno),
  624. "check-bpf-prog-name",
  625. "fd %d(%d) errno %d(%d)\n",
  626. fd, tests[i].success, errno, tests[i].expected_errno);
  627. if (fd != -1)
  628. close(fd);
  629. /* test different attr.map_name during BPF_MAP_CREATE */
  630. ncopy = name_len < sizeof(attr.map_name) ?
  631. name_len : sizeof(attr.map_name);
  632. bzero(&attr, sizeof(attr));
  633. attr.map_type = BPF_MAP_TYPE_ARRAY;
  634. attr.key_size = 4;
  635. attr.value_size = 4;
  636. attr.max_entries = 1;
  637. attr.map_flags = 0;
  638. memcpy(attr.map_name, tests[i].name, ncopy);
  639. fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
  640. CHECK((tests[i].success && fd < 0) ||
  641. (!tests[i].success && fd != -1) ||
  642. (!tests[i].success && errno != tests[i].expected_errno),
  643. "check-bpf-map-name",
  644. "fd %d(%d) errno %d(%d)\n",
  645. fd, tests[i].success, errno, tests[i].expected_errno);
  646. if (fd != -1)
  647. close(fd);
  648. }
  649. }
  650. static void test_tp_attach_query(void)
  651. {
  652. const int num_progs = 3;
  653. int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
  654. __u32 duration = 0, info_len, saved_prog_ids[num_progs];
  655. const char *file = "./test_tracepoint.o";
  656. struct perf_event_query_bpf *query;
  657. struct perf_event_attr attr = {};
  658. struct bpf_object *obj[num_progs];
  659. struct bpf_prog_info prog_info;
  660. char buf[256];
  661. snprintf(buf, sizeof(buf),
  662. "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
  663. efd = open(buf, O_RDONLY, 0);
  664. if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
  665. return;
  666. bytes = read(efd, buf, sizeof(buf));
  667. close(efd);
  668. if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
  669. "read", "bytes %d errno %d\n", bytes, errno))
  670. return;
  671. attr.config = strtol(buf, NULL, 0);
  672. attr.type = PERF_TYPE_TRACEPOINT;
  673. attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
  674. attr.sample_period = 1;
  675. attr.wakeup_events = 1;
  676. query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
  677. for (i = 0; i < num_progs; i++) {
  678. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
  679. &prog_fd[i]);
  680. if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
  681. goto cleanup1;
  682. bzero(&prog_info, sizeof(prog_info));
  683. prog_info.jited_prog_len = 0;
  684. prog_info.xlated_prog_len = 0;
  685. prog_info.nr_map_ids = 0;
  686. info_len = sizeof(prog_info);
  687. err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
  688. if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
  689. err, errno))
  690. goto cleanup1;
  691. saved_prog_ids[i] = prog_info.id;
  692. pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
  693. 0 /* cpu 0 */, -1 /* group id */,
  694. 0 /* flags */);
  695. if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
  696. pmu_fd[i], errno))
  697. goto cleanup2;
  698. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
  699. if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
  700. err, errno))
  701. goto cleanup3;
  702. if (i == 0) {
  703. /* check NULL prog array query */
  704. query->ids_len = num_progs;
  705. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  706. if (CHECK(err || query->prog_cnt != 0,
  707. "perf_event_ioc_query_bpf",
  708. "err %d errno %d query->prog_cnt %u\n",
  709. err, errno, query->prog_cnt))
  710. goto cleanup3;
  711. }
  712. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
  713. if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
  714. err, errno))
  715. goto cleanup3;
  716. if (i == 1) {
  717. /* try to get # of programs only */
  718. query->ids_len = 0;
  719. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  720. if (CHECK(err || query->prog_cnt != 2,
  721. "perf_event_ioc_query_bpf",
  722. "err %d errno %d query->prog_cnt %u\n",
  723. err, errno, query->prog_cnt))
  724. goto cleanup3;
  725. /* try a few negative tests */
  726. /* invalid query pointer */
  727. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
  728. (struct perf_event_query_bpf *)0x1);
  729. if (CHECK(!err || errno != EFAULT,
  730. "perf_event_ioc_query_bpf",
  731. "err %d errno %d\n", err, errno))
  732. goto cleanup3;
  733. /* no enough space */
  734. query->ids_len = 1;
  735. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  736. if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
  737. "perf_event_ioc_query_bpf",
  738. "err %d errno %d query->prog_cnt %u\n",
  739. err, errno, query->prog_cnt))
  740. goto cleanup3;
  741. }
  742. query->ids_len = num_progs;
  743. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  744. if (CHECK(err || query->prog_cnt != (i + 1),
  745. "perf_event_ioc_query_bpf",
  746. "err %d errno %d query->prog_cnt %u\n",
  747. err, errno, query->prog_cnt))
  748. goto cleanup3;
  749. for (j = 0; j < i + 1; j++)
  750. if (CHECK(saved_prog_ids[j] != query->ids[j],
  751. "perf_event_ioc_query_bpf",
  752. "#%d saved_prog_id %x query prog_id %x\n",
  753. j, saved_prog_ids[j], query->ids[j]))
  754. goto cleanup3;
  755. }
  756. i = num_progs - 1;
  757. for (; i >= 0; i--) {
  758. cleanup3:
  759. ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
  760. cleanup2:
  761. close(pmu_fd[i]);
  762. cleanup1:
  763. bpf_object__close(obj[i]);
  764. }
  765. free(query);
  766. }
  767. static int compare_map_keys(int map1_fd, int map2_fd)
  768. {
  769. __u32 key, next_key;
  770. char val_buf[PERF_MAX_STACK_DEPTH *
  771. sizeof(struct bpf_stack_build_id)];
  772. int err;
  773. err = bpf_map_get_next_key(map1_fd, NULL, &key);
  774. if (err)
  775. return err;
  776. err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
  777. if (err)
  778. return err;
  779. while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
  780. err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
  781. if (err)
  782. return err;
  783. key = next_key;
  784. }
  785. if (errno != ENOENT)
  786. return -1;
  787. return 0;
  788. }
  789. static void test_stacktrace_map()
  790. {
  791. int control_map_fd, stackid_hmap_fd, stackmap_fd;
  792. const char *file = "./test_stacktrace_map.o";
  793. int bytes, efd, err, pmu_fd, prog_fd;
  794. struct perf_event_attr attr = {};
  795. __u32 key, val, duration = 0;
  796. struct bpf_object *obj;
  797. char buf[256];
  798. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
  799. if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
  800. return;
  801. /* Get the ID for the sched/sched_switch tracepoint */
  802. snprintf(buf, sizeof(buf),
  803. "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
  804. efd = open(buf, O_RDONLY, 0);
  805. if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
  806. goto close_prog;
  807. bytes = read(efd, buf, sizeof(buf));
  808. close(efd);
  809. if (bytes <= 0 || bytes >= sizeof(buf))
  810. goto close_prog;
  811. /* Open the perf event and attach bpf progrram */
  812. attr.config = strtol(buf, NULL, 0);
  813. attr.type = PERF_TYPE_TRACEPOINT;
  814. attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
  815. attr.sample_period = 1;
  816. attr.wakeup_events = 1;
  817. pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
  818. 0 /* cpu 0 */, -1 /* group id */,
  819. 0 /* flags */);
  820. if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
  821. pmu_fd, errno))
  822. goto close_prog;
  823. err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
  824. if (err)
  825. goto disable_pmu;
  826. err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
  827. if (err)
  828. goto disable_pmu;
  829. /* find map fds */
  830. control_map_fd = bpf_find_map(__func__, obj, "control_map");
  831. if (control_map_fd < 0)
  832. goto disable_pmu;
  833. stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
  834. if (stackid_hmap_fd < 0)
  835. goto disable_pmu;
  836. stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
  837. if (stackmap_fd < 0)
  838. goto disable_pmu;
  839. /* give some time for bpf program run */
  840. sleep(1);
  841. /* disable stack trace collection */
  842. key = 0;
  843. val = 1;
  844. bpf_map_update_elem(control_map_fd, &key, &val, 0);
  845. /* for every element in stackid_hmap, we can find a corresponding one
  846. * in stackmap, and vise versa.
  847. */
  848. err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
  849. if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
  850. "err %d errno %d\n", err, errno))
  851. goto disable_pmu_noerr;
  852. err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
  853. if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
  854. "err %d errno %d\n", err, errno))
  855. goto disable_pmu_noerr;
  856. goto disable_pmu_noerr;
  857. disable_pmu:
  858. error_cnt++;
  859. disable_pmu_noerr:
  860. ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
  861. close(pmu_fd);
  862. close_prog:
  863. bpf_object__close(obj);
  864. }
  865. static void test_stacktrace_map_raw_tp()
  866. {
  867. int control_map_fd, stackid_hmap_fd, stackmap_fd;
  868. const char *file = "./test_stacktrace_map.o";
  869. int efd, err, prog_fd;
  870. __u32 key, val, duration = 0;
  871. struct bpf_object *obj;
  872. err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
  873. if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
  874. return;
  875. efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
  876. if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
  877. goto close_prog;
  878. /* find map fds */
  879. control_map_fd = bpf_find_map(__func__, obj, "control_map");
  880. if (control_map_fd < 0)
  881. goto close_prog;
  882. stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
  883. if (stackid_hmap_fd < 0)
  884. goto close_prog;
  885. stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
  886. if (stackmap_fd < 0)
  887. goto close_prog;
  888. /* give some time for bpf program run */
  889. sleep(1);
  890. /* disable stack trace collection */
  891. key = 0;
  892. val = 1;
  893. bpf_map_update_elem(control_map_fd, &key, &val, 0);
  894. /* for every element in stackid_hmap, we can find a corresponding one
  895. * in stackmap, and vise versa.
  896. */
  897. err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
  898. if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
  899. "err %d errno %d\n", err, errno))
  900. goto close_prog;
  901. err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
  902. if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
  903. "err %d errno %d\n", err, errno))
  904. goto close_prog;
  905. goto close_prog_noerr;
  906. close_prog:
  907. error_cnt++;
  908. close_prog_noerr:
  909. bpf_object__close(obj);
  910. }
  911. static int extract_build_id(char *build_id, size_t size)
  912. {
  913. FILE *fp;
  914. char *line = NULL;
  915. size_t len = 0;
  916. fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
  917. if (fp == NULL)
  918. return -1;
  919. if (getline(&line, &len, fp) == -1)
  920. goto err;
  921. fclose(fp);
  922. if (len > size)
  923. len = size;
  924. memcpy(build_id, line, len);
  925. build_id[len] = '\0';
  926. return 0;
  927. err:
  928. fclose(fp);
  929. return -1;
  930. }
  931. static void test_stacktrace_build_id(void)
  932. {
  933. int control_map_fd, stackid_hmap_fd, stackmap_fd;
  934. const char *file = "./test_stacktrace_build_id.o";
  935. int bytes, efd, err, pmu_fd, prog_fd;
  936. struct perf_event_attr attr = {};
  937. __u32 key, previous_key, val, duration = 0;
  938. struct bpf_object *obj;
  939. char buf[256];
  940. int i, j;
  941. struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
  942. int build_id_matches = 0;
  943. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
  944. if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
  945. goto out;
  946. /* Get the ID for the sched/sched_switch tracepoint */
  947. snprintf(buf, sizeof(buf),
  948. "/sys/kernel/debug/tracing/events/random/urandom_read/id");
  949. efd = open(buf, O_RDONLY, 0);
  950. if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
  951. goto close_prog;
  952. bytes = read(efd, buf, sizeof(buf));
  953. close(efd);
  954. if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
  955. "read", "bytes %d errno %d\n", bytes, errno))
  956. goto close_prog;
  957. /* Open the perf event and attach bpf progrram */
  958. attr.config = strtol(buf, NULL, 0);
  959. attr.type = PERF_TYPE_TRACEPOINT;
  960. attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
  961. attr.sample_period = 1;
  962. attr.wakeup_events = 1;
  963. pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
  964. 0 /* cpu 0 */, -1 /* group id */,
  965. 0 /* flags */);
  966. if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
  967. pmu_fd, errno))
  968. goto close_prog;
  969. err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
  970. if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
  971. err, errno))
  972. goto close_pmu;
  973. err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
  974. if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
  975. err, errno))
  976. goto disable_pmu;
  977. /* find map fds */
  978. control_map_fd = bpf_find_map(__func__, obj, "control_map");
  979. if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
  980. "err %d errno %d\n", err, errno))
  981. goto disable_pmu;
  982. stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
  983. if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
  984. "err %d errno %d\n", err, errno))
  985. goto disable_pmu;
  986. stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
  987. if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
  988. err, errno))
  989. goto disable_pmu;
  990. assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
  991. == 0);
  992. assert(system("./urandom_read if=/dev/urandom of=/dev/zero count=4 2> /dev/null") == 0);
  993. /* disable stack trace collection */
  994. key = 0;
  995. val = 1;
  996. bpf_map_update_elem(control_map_fd, &key, &val, 0);
  997. /* for every element in stackid_hmap, we can find a corresponding one
  998. * in stackmap, and vise versa.
  999. */
  1000. err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
  1001. if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
  1002. "err %d errno %d\n", err, errno))
  1003. goto disable_pmu;
  1004. err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
  1005. if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
  1006. "err %d errno %d\n", err, errno))
  1007. goto disable_pmu;
  1008. err = extract_build_id(buf, 256);
  1009. if (CHECK(err, "get build_id with readelf",
  1010. "err %d errno %d\n", err, errno))
  1011. goto disable_pmu;
  1012. err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
  1013. if (CHECK(err, "get_next_key from stackmap",
  1014. "err %d, errno %d\n", err, errno))
  1015. goto disable_pmu;
  1016. do {
  1017. char build_id[64];
  1018. err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
  1019. if (CHECK(err, "lookup_elem from stackmap",
  1020. "err %d, errno %d\n", err, errno))
  1021. goto disable_pmu;
  1022. for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
  1023. if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
  1024. id_offs[i].offset != 0) {
  1025. for (j = 0; j < 20; ++j)
  1026. sprintf(build_id + 2 * j, "%02x",
  1027. id_offs[i].build_id[j] & 0xff);
  1028. if (strstr(buf, build_id) != NULL)
  1029. build_id_matches = 1;
  1030. }
  1031. previous_key = key;
  1032. } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
  1033. CHECK(build_id_matches < 1, "build id match",
  1034. "Didn't find expected build ID from the map");
  1035. disable_pmu:
  1036. ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
  1037. close_pmu:
  1038. close(pmu_fd);
  1039. close_prog:
  1040. bpf_object__close(obj);
  1041. out:
  1042. return;
  1043. }
  1044. int main(void)
  1045. {
  1046. test_pkt_access();
  1047. test_xdp();
  1048. test_xdp_adjust_tail();
  1049. test_l4lb_all();
  1050. test_xdp_noinline();
  1051. test_tcp_estats();
  1052. test_bpf_obj_id();
  1053. test_pkt_md_access();
  1054. test_obj_name();
  1055. test_tp_attach_query();
  1056. test_stacktrace_map();
  1057. test_stacktrace_build_id();
  1058. test_stacktrace_map_raw_tp();
  1059. printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
  1060. return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
  1061. }