test_progs.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862
  1. /* Copyright (c) 2017 Facebook
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. */
  7. #include <stdio.h>
  8. #include <unistd.h>
  9. #include <errno.h>
  10. #include <string.h>
  11. #include <assert.h>
  12. #include <stdlib.h>
  13. #include <time.h>
  14. #include <linux/types.h>
  15. typedef __u16 __sum16;
  16. #include <arpa/inet.h>
  17. #include <linux/if_ether.h>
  18. #include <linux/if_packet.h>
  19. #include <linux/ip.h>
  20. #include <linux/ipv6.h>
  21. #include <linux/tcp.h>
  22. #include <linux/filter.h>
  23. #include <linux/perf_event.h>
  24. #include <linux/unistd.h>
  25. #include <sys/ioctl.h>
  26. #include <sys/wait.h>
  27. #include <sys/types.h>
  28. #include <fcntl.h>
  29. #include <linux/bpf.h>
  30. #include <linux/err.h>
  31. #include <bpf/bpf.h>
  32. #include <bpf/libbpf.h>
  33. #include "test_iptunnel_common.h"
  34. #include "bpf_util.h"
  35. #include "bpf_endian.h"
  36. #include "bpf_rlimit.h"
  37. #include "trace_helpers.h"
  38. static int error_cnt, pass_cnt;
  39. static bool jit_enabled;
  40. #define MAGIC_BYTES 123
  41. /* ipv4 test vector */
  42. static struct {
  43. struct ethhdr eth;
  44. struct iphdr iph;
  45. struct tcphdr tcp;
  46. } __packed pkt_v4 = {
  47. .eth.h_proto = bpf_htons(ETH_P_IP),
  48. .iph.ihl = 5,
  49. .iph.protocol = 6,
  50. .iph.tot_len = bpf_htons(MAGIC_BYTES),
  51. .tcp.urg_ptr = 123,
  52. };
  53. /* ipv6 test vector */
  54. static struct {
  55. struct ethhdr eth;
  56. struct ipv6hdr iph;
  57. struct tcphdr tcp;
  58. } __packed pkt_v6 = {
  59. .eth.h_proto = bpf_htons(ETH_P_IPV6),
  60. .iph.nexthdr = 6,
  61. .iph.payload_len = bpf_htons(MAGIC_BYTES),
  62. .tcp.urg_ptr = 123,
  63. };
  64. #define CHECK(condition, tag, format...) ({ \
  65. int __ret = !!(condition); \
  66. if (__ret) { \
  67. error_cnt++; \
  68. printf("%s:FAIL:%s ", __func__, tag); \
  69. printf(format); \
  70. } else { \
  71. pass_cnt++; \
  72. printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
  73. } \
  74. __ret; \
  75. })
  76. static int bpf_find_map(const char *test, struct bpf_object *obj,
  77. const char *name)
  78. {
  79. struct bpf_map *map;
  80. map = bpf_object__find_map_by_name(obj, name);
  81. if (!map) {
  82. printf("%s:FAIL:map '%s' not found\n", test, name);
  83. error_cnt++;
  84. return -1;
  85. }
  86. return bpf_map__fd(map);
  87. }
  88. static void test_pkt_access(void)
  89. {
  90. const char *file = "./test_pkt_access.o";
  91. struct bpf_object *obj;
  92. __u32 duration, retval;
  93. int err, prog_fd;
  94. err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
  95. if (err) {
  96. error_cnt++;
  97. return;
  98. }
  99. err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
  100. NULL, NULL, &retval, &duration);
  101. CHECK(err || retval, "ipv4",
  102. "err %d errno %d retval %d duration %d\n",
  103. err, errno, retval, duration);
  104. err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
  105. NULL, NULL, &retval, &duration);
  106. CHECK(err || retval, "ipv6",
  107. "err %d errno %d retval %d duration %d\n",
  108. err, errno, retval, duration);
  109. bpf_object__close(obj);
  110. }
  111. static void test_xdp(void)
  112. {
  113. struct vip key4 = {.protocol = 6, .family = AF_INET};
  114. struct vip key6 = {.protocol = 6, .family = AF_INET6};
  115. struct iptnl_info value4 = {.family = AF_INET};
  116. struct iptnl_info value6 = {.family = AF_INET6};
  117. const char *file = "./test_xdp.o";
  118. struct bpf_object *obj;
  119. char buf[128];
  120. struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
  121. struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
  122. __u32 duration, retval, size;
  123. int err, prog_fd, map_fd;
  124. err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
  125. if (err) {
  126. error_cnt++;
  127. return;
  128. }
  129. map_fd = bpf_find_map(__func__, obj, "vip2tnl");
  130. if (map_fd < 0)
  131. goto out;
  132. bpf_map_update_elem(map_fd, &key4, &value4, 0);
  133. bpf_map_update_elem(map_fd, &key6, &value6, 0);
  134. err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
  135. buf, &size, &retval, &duration);
  136. CHECK(err || retval != XDP_TX || size != 74 ||
  137. iph->protocol != IPPROTO_IPIP, "ipv4",
  138. "err %d errno %d retval %d size %d\n",
  139. err, errno, retval, size);
  140. err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
  141. buf, &size, &retval, &duration);
  142. CHECK(err || retval != XDP_TX || size != 114 ||
  143. iph6->nexthdr != IPPROTO_IPV6, "ipv6",
  144. "err %d errno %d retval %d size %d\n",
  145. err, errno, retval, size);
  146. out:
  147. bpf_object__close(obj);
  148. }
  149. static void test_xdp_adjust_tail(void)
  150. {
  151. const char *file = "./test_adjust_tail.o";
  152. struct bpf_object *obj;
  153. char buf[128];
  154. __u32 duration, retval, size;
  155. int err, prog_fd;
  156. err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
  157. if (err) {
  158. error_cnt++;
  159. return;
  160. }
  161. err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
  162. buf, &size, &retval, &duration);
  163. CHECK(err || retval != XDP_DROP,
  164. "ipv4", "err %d errno %d retval %d size %d\n",
  165. err, errno, retval, size);
  166. err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
  167. buf, &size, &retval, &duration);
  168. CHECK(err || retval != XDP_TX || size != 54,
  169. "ipv6", "err %d errno %d retval %d size %d\n",
  170. err, errno, retval, size);
  171. bpf_object__close(obj);
  172. }
  173. #define MAGIC_VAL 0x1234
  174. #define NUM_ITER 100000
  175. #define VIP_NUM 5
  176. static void test_l4lb(const char *file)
  177. {
  178. unsigned int nr_cpus = bpf_num_possible_cpus();
  179. struct vip key = {.protocol = 6};
  180. struct vip_meta {
  181. __u32 flags;
  182. __u32 vip_num;
  183. } value = {.vip_num = VIP_NUM};
  184. __u32 stats_key = VIP_NUM;
  185. struct vip_stats {
  186. __u64 bytes;
  187. __u64 pkts;
  188. } stats[nr_cpus];
  189. struct real_definition {
  190. union {
  191. __be32 dst;
  192. __be32 dstv6[4];
  193. };
  194. __u8 flags;
  195. } real_def = {.dst = MAGIC_VAL};
  196. __u32 ch_key = 11, real_num = 3;
  197. __u32 duration, retval, size;
  198. int err, i, prog_fd, map_fd;
  199. __u64 bytes = 0, pkts = 0;
  200. struct bpf_object *obj;
  201. char buf[128];
  202. u32 *magic = (u32 *)buf;
  203. err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
  204. if (err) {
  205. error_cnt++;
  206. return;
  207. }
  208. map_fd = bpf_find_map(__func__, obj, "vip_map");
  209. if (map_fd < 0)
  210. goto out;
  211. bpf_map_update_elem(map_fd, &key, &value, 0);
  212. map_fd = bpf_find_map(__func__, obj, "ch_rings");
  213. if (map_fd < 0)
  214. goto out;
  215. bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
  216. map_fd = bpf_find_map(__func__, obj, "reals");
  217. if (map_fd < 0)
  218. goto out;
  219. bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
  220. err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
  221. buf, &size, &retval, &duration);
  222. CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
  223. *magic != MAGIC_VAL, "ipv4",
  224. "err %d errno %d retval %d size %d magic %x\n",
  225. err, errno, retval, size, *magic);
  226. err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
  227. buf, &size, &retval, &duration);
  228. CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
  229. *magic != MAGIC_VAL, "ipv6",
  230. "err %d errno %d retval %d size %d magic %x\n",
  231. err, errno, retval, size, *magic);
  232. map_fd = bpf_find_map(__func__, obj, "stats");
  233. if (map_fd < 0)
  234. goto out;
  235. bpf_map_lookup_elem(map_fd, &stats_key, stats);
  236. for (i = 0; i < nr_cpus; i++) {
  237. bytes += stats[i].bytes;
  238. pkts += stats[i].pkts;
  239. }
  240. if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
  241. error_cnt++;
  242. printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
  243. }
  244. out:
  245. bpf_object__close(obj);
  246. }
  247. static void test_l4lb_all(void)
  248. {
  249. const char *file1 = "./test_l4lb.o";
  250. const char *file2 = "./test_l4lb_noinline.o";
  251. test_l4lb(file1);
  252. test_l4lb(file2);
  253. }
  254. static void test_xdp_noinline(void)
  255. {
  256. const char *file = "./test_xdp_noinline.o";
  257. unsigned int nr_cpus = bpf_num_possible_cpus();
  258. struct vip key = {.protocol = 6};
  259. struct vip_meta {
  260. __u32 flags;
  261. __u32 vip_num;
  262. } value = {.vip_num = VIP_NUM};
  263. __u32 stats_key = VIP_NUM;
  264. struct vip_stats {
  265. __u64 bytes;
  266. __u64 pkts;
  267. } stats[nr_cpus];
  268. struct real_definition {
  269. union {
  270. __be32 dst;
  271. __be32 dstv6[4];
  272. };
  273. __u8 flags;
  274. } real_def = {.dst = MAGIC_VAL};
  275. __u32 ch_key = 11, real_num = 3;
  276. __u32 duration, retval, size;
  277. int err, i, prog_fd, map_fd;
  278. __u64 bytes = 0, pkts = 0;
  279. struct bpf_object *obj;
  280. char buf[128];
  281. u32 *magic = (u32 *)buf;
  282. err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
  283. if (err) {
  284. error_cnt++;
  285. return;
  286. }
  287. map_fd = bpf_find_map(__func__, obj, "vip_map");
  288. if (map_fd < 0)
  289. goto out;
  290. bpf_map_update_elem(map_fd, &key, &value, 0);
  291. map_fd = bpf_find_map(__func__, obj, "ch_rings");
  292. if (map_fd < 0)
  293. goto out;
  294. bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
  295. map_fd = bpf_find_map(__func__, obj, "reals");
  296. if (map_fd < 0)
  297. goto out;
  298. bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
  299. err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
  300. buf, &size, &retval, &duration);
  301. CHECK(err || retval != 1 || size != 54 ||
  302. *magic != MAGIC_VAL, "ipv4",
  303. "err %d errno %d retval %d size %d magic %x\n",
  304. err, errno, retval, size, *magic);
  305. err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
  306. buf, &size, &retval, &duration);
  307. CHECK(err || retval != 1 || size != 74 ||
  308. *magic != MAGIC_VAL, "ipv6",
  309. "err %d errno %d retval %d size %d magic %x\n",
  310. err, errno, retval, size, *magic);
  311. map_fd = bpf_find_map(__func__, obj, "stats");
  312. if (map_fd < 0)
  313. goto out;
  314. bpf_map_lookup_elem(map_fd, &stats_key, stats);
  315. for (i = 0; i < nr_cpus; i++) {
  316. bytes += stats[i].bytes;
  317. pkts += stats[i].pkts;
  318. }
  319. if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
  320. error_cnt++;
  321. printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
  322. }
  323. out:
  324. bpf_object__close(obj);
  325. }
  326. static void test_tcp_estats(void)
  327. {
  328. const char *file = "./test_tcp_estats.o";
  329. int err, prog_fd;
  330. struct bpf_object *obj;
  331. __u32 duration = 0;
  332. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
  333. CHECK(err, "", "err %d errno %d\n", err, errno);
  334. if (err) {
  335. error_cnt++;
  336. return;
  337. }
  338. bpf_object__close(obj);
  339. }
  340. static inline __u64 ptr_to_u64(const void *ptr)
  341. {
  342. return (__u64) (unsigned long) ptr;
  343. }
  344. static bool is_jit_enabled(void)
  345. {
  346. const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
  347. bool enabled = false;
  348. int sysctl_fd;
  349. sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
  350. if (sysctl_fd != -1) {
  351. char tmpc;
  352. if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
  353. enabled = (tmpc != '0');
  354. close(sysctl_fd);
  355. }
  356. return enabled;
  357. }
  358. static void test_bpf_obj_id(void)
  359. {
  360. const __u64 array_magic_value = 0xfaceb00c;
  361. const __u32 array_key = 0;
  362. const int nr_iters = 2;
  363. const char *file = "./test_obj_id.o";
  364. const char *expected_prog_name = "test_obj_id";
  365. const char *expected_map_name = "test_map_id";
  366. const __u64 nsec_per_sec = 1000000000;
  367. struct bpf_object *objs[nr_iters];
  368. int prog_fds[nr_iters], map_fds[nr_iters];
  369. /* +1 to test for the info_len returned by kernel */
  370. struct bpf_prog_info prog_infos[nr_iters + 1];
  371. struct bpf_map_info map_infos[nr_iters + 1];
  372. /* Each prog only uses one map. +1 to test nr_map_ids
  373. * returned by kernel.
  374. */
  375. __u32 map_ids[nr_iters + 1];
  376. char jited_insns[128], xlated_insns[128], zeros[128];
  377. __u32 i, next_id, info_len, nr_id_found, duration = 0;
  378. struct timespec real_time_ts, boot_time_ts;
  379. int err = 0;
  380. __u64 array_value;
  381. uid_t my_uid = getuid();
  382. time_t now, load_time;
  383. err = bpf_prog_get_fd_by_id(0);
  384. CHECK(err >= 0 || errno != ENOENT,
  385. "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
  386. err = bpf_map_get_fd_by_id(0);
  387. CHECK(err >= 0 || errno != ENOENT,
  388. "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
  389. for (i = 0; i < nr_iters; i++)
  390. objs[i] = NULL;
  391. /* Check bpf_obj_get_info_by_fd() */
  392. bzero(zeros, sizeof(zeros));
  393. for (i = 0; i < nr_iters; i++) {
  394. now = time(NULL);
  395. err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
  396. &objs[i], &prog_fds[i]);
  397. /* test_obj_id.o is a dumb prog. It should never fail
  398. * to load.
  399. */
  400. if (err)
  401. error_cnt++;
  402. assert(!err);
  403. /* Insert a magic value to the map */
  404. map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
  405. assert(map_fds[i] >= 0);
  406. err = bpf_map_update_elem(map_fds[i], &array_key,
  407. &array_magic_value, 0);
  408. assert(!err);
  409. /* Check getting map info */
  410. info_len = sizeof(struct bpf_map_info) * 2;
  411. bzero(&map_infos[i], info_len);
  412. err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
  413. &info_len);
  414. if (CHECK(err ||
  415. map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
  416. map_infos[i].key_size != sizeof(__u32) ||
  417. map_infos[i].value_size != sizeof(__u64) ||
  418. map_infos[i].max_entries != 1 ||
  419. map_infos[i].map_flags != 0 ||
  420. info_len != sizeof(struct bpf_map_info) ||
  421. strcmp((char *)map_infos[i].name, expected_map_name),
  422. "get-map-info(fd)",
  423. "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
  424. err, errno,
  425. map_infos[i].type, BPF_MAP_TYPE_ARRAY,
  426. info_len, sizeof(struct bpf_map_info),
  427. map_infos[i].key_size,
  428. map_infos[i].value_size,
  429. map_infos[i].max_entries,
  430. map_infos[i].map_flags,
  431. map_infos[i].name, expected_map_name))
  432. goto done;
  433. /* Check getting prog info */
  434. info_len = sizeof(struct bpf_prog_info) * 2;
  435. bzero(&prog_infos[i], info_len);
  436. bzero(jited_insns, sizeof(jited_insns));
  437. bzero(xlated_insns, sizeof(xlated_insns));
  438. prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
  439. prog_infos[i].jited_prog_len = sizeof(jited_insns);
  440. prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
  441. prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
  442. prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
  443. prog_infos[i].nr_map_ids = 2;
  444. err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
  445. assert(!err);
  446. err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
  447. assert(!err);
  448. err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
  449. &info_len);
  450. load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
  451. + (prog_infos[i].load_time / nsec_per_sec);
  452. if (CHECK(err ||
  453. prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
  454. info_len != sizeof(struct bpf_prog_info) ||
  455. (jit_enabled && !prog_infos[i].jited_prog_len) ||
  456. (jit_enabled &&
  457. !memcmp(jited_insns, zeros, sizeof(zeros))) ||
  458. !prog_infos[i].xlated_prog_len ||
  459. !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
  460. load_time < now - 60 || load_time > now + 60 ||
  461. prog_infos[i].created_by_uid != my_uid ||
  462. prog_infos[i].nr_map_ids != 1 ||
  463. *(int *)prog_infos[i].map_ids != map_infos[i].id ||
  464. strcmp((char *)prog_infos[i].name, expected_prog_name),
  465. "get-prog-info(fd)",
  466. "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
  467. err, errno, i,
  468. prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
  469. info_len, sizeof(struct bpf_prog_info),
  470. jit_enabled,
  471. prog_infos[i].jited_prog_len,
  472. prog_infos[i].xlated_prog_len,
  473. !!memcmp(jited_insns, zeros, sizeof(zeros)),
  474. !!memcmp(xlated_insns, zeros, sizeof(zeros)),
  475. load_time, now,
  476. prog_infos[i].created_by_uid, my_uid,
  477. prog_infos[i].nr_map_ids, 1,
  478. *(int *)prog_infos[i].map_ids, map_infos[i].id,
  479. prog_infos[i].name, expected_prog_name))
  480. goto done;
  481. }
  482. /* Check bpf_prog_get_next_id() */
  483. nr_id_found = 0;
  484. next_id = 0;
  485. while (!bpf_prog_get_next_id(next_id, &next_id)) {
  486. struct bpf_prog_info prog_info = {};
  487. __u32 saved_map_id;
  488. int prog_fd;
  489. info_len = sizeof(prog_info);
  490. prog_fd = bpf_prog_get_fd_by_id(next_id);
  491. if (prog_fd < 0 && errno == ENOENT)
  492. /* The bpf_prog is in the dead row */
  493. continue;
  494. if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
  495. "prog_fd %d next_id %d errno %d\n",
  496. prog_fd, next_id, errno))
  497. break;
  498. for (i = 0; i < nr_iters; i++)
  499. if (prog_infos[i].id == next_id)
  500. break;
  501. if (i == nr_iters)
  502. continue;
  503. nr_id_found++;
  504. /* Negative test:
  505. * prog_info.nr_map_ids = 1
  506. * prog_info.map_ids = NULL
  507. */
  508. prog_info.nr_map_ids = 1;
  509. err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
  510. if (CHECK(!err || errno != EFAULT,
  511. "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
  512. err, errno, EFAULT))
  513. break;
  514. bzero(&prog_info, sizeof(prog_info));
  515. info_len = sizeof(prog_info);
  516. saved_map_id = *(int *)(prog_infos[i].map_ids);
  517. prog_info.map_ids = prog_infos[i].map_ids;
  518. prog_info.nr_map_ids = 2;
  519. err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
  520. prog_infos[i].jited_prog_insns = 0;
  521. prog_infos[i].xlated_prog_insns = 0;
  522. CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
  523. memcmp(&prog_info, &prog_infos[i], info_len) ||
  524. *(int *)prog_info.map_ids != saved_map_id,
  525. "get-prog-info(next_id->fd)",
  526. "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
  527. err, errno, info_len, sizeof(struct bpf_prog_info),
  528. memcmp(&prog_info, &prog_infos[i], info_len),
  529. *(int *)prog_info.map_ids, saved_map_id);
  530. close(prog_fd);
  531. }
  532. CHECK(nr_id_found != nr_iters,
  533. "check total prog id found by get_next_id",
  534. "nr_id_found %u(%u)\n",
  535. nr_id_found, nr_iters);
  536. /* Check bpf_map_get_next_id() */
  537. nr_id_found = 0;
  538. next_id = 0;
  539. while (!bpf_map_get_next_id(next_id, &next_id)) {
  540. struct bpf_map_info map_info = {};
  541. int map_fd;
  542. info_len = sizeof(map_info);
  543. map_fd = bpf_map_get_fd_by_id(next_id);
  544. if (map_fd < 0 && errno == ENOENT)
  545. /* The bpf_map is in the dead row */
  546. continue;
  547. if (CHECK(map_fd < 0, "get-map-fd(next_id)",
  548. "map_fd %d next_id %u errno %d\n",
  549. map_fd, next_id, errno))
  550. break;
  551. for (i = 0; i < nr_iters; i++)
  552. if (map_infos[i].id == next_id)
  553. break;
  554. if (i == nr_iters)
  555. continue;
  556. nr_id_found++;
  557. err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
  558. assert(!err);
  559. err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
  560. CHECK(err || info_len != sizeof(struct bpf_map_info) ||
  561. memcmp(&map_info, &map_infos[i], info_len) ||
  562. array_value != array_magic_value,
  563. "check get-map-info(next_id->fd)",
  564. "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
  565. err, errno, info_len, sizeof(struct bpf_map_info),
  566. memcmp(&map_info, &map_infos[i], info_len),
  567. array_value, array_magic_value);
  568. close(map_fd);
  569. }
  570. CHECK(nr_id_found != nr_iters,
  571. "check total map id found by get_next_id",
  572. "nr_id_found %u(%u)\n",
  573. nr_id_found, nr_iters);
  574. done:
  575. for (i = 0; i < nr_iters; i++)
  576. bpf_object__close(objs[i]);
  577. }
  578. static void test_pkt_md_access(void)
  579. {
  580. const char *file = "./test_pkt_md_access.o";
  581. struct bpf_object *obj;
  582. __u32 duration, retval;
  583. int err, prog_fd;
  584. err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
  585. if (err) {
  586. error_cnt++;
  587. return;
  588. }
  589. err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
  590. NULL, NULL, &retval, &duration);
  591. CHECK(err || retval, "",
  592. "err %d errno %d retval %d duration %d\n",
  593. err, errno, retval, duration);
  594. bpf_object__close(obj);
  595. }
  596. static void test_obj_name(void)
  597. {
  598. struct {
  599. const char *name;
  600. int success;
  601. int expected_errno;
  602. } tests[] = {
  603. { "", 1, 0 },
  604. { "_123456789ABCDE", 1, 0 },
  605. { "_123456789ABCDEF", 0, EINVAL },
  606. { "_123456789ABCD\n", 0, EINVAL },
  607. };
  608. struct bpf_insn prog[] = {
  609. BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
  610. BPF_EXIT_INSN(),
  611. };
  612. __u32 duration = 0;
  613. int i;
  614. for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
  615. size_t name_len = strlen(tests[i].name) + 1;
  616. union bpf_attr attr;
  617. size_t ncopy;
  618. int fd;
  619. /* test different attr.prog_name during BPF_PROG_LOAD */
  620. ncopy = name_len < sizeof(attr.prog_name) ?
  621. name_len : sizeof(attr.prog_name);
  622. bzero(&attr, sizeof(attr));
  623. attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
  624. attr.insn_cnt = 2;
  625. attr.insns = ptr_to_u64(prog);
  626. attr.license = ptr_to_u64("");
  627. memcpy(attr.prog_name, tests[i].name, ncopy);
  628. fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
  629. CHECK((tests[i].success && fd < 0) ||
  630. (!tests[i].success && fd != -1) ||
  631. (!tests[i].success && errno != tests[i].expected_errno),
  632. "check-bpf-prog-name",
  633. "fd %d(%d) errno %d(%d)\n",
  634. fd, tests[i].success, errno, tests[i].expected_errno);
  635. if (fd != -1)
  636. close(fd);
  637. /* test different attr.map_name during BPF_MAP_CREATE */
  638. ncopy = name_len < sizeof(attr.map_name) ?
  639. name_len : sizeof(attr.map_name);
  640. bzero(&attr, sizeof(attr));
  641. attr.map_type = BPF_MAP_TYPE_ARRAY;
  642. attr.key_size = 4;
  643. attr.value_size = 4;
  644. attr.max_entries = 1;
  645. attr.map_flags = 0;
  646. memcpy(attr.map_name, tests[i].name, ncopy);
  647. fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
  648. CHECK((tests[i].success && fd < 0) ||
  649. (!tests[i].success && fd != -1) ||
  650. (!tests[i].success && errno != tests[i].expected_errno),
  651. "check-bpf-map-name",
  652. "fd %d(%d) errno %d(%d)\n",
  653. fd, tests[i].success, errno, tests[i].expected_errno);
  654. if (fd != -1)
  655. close(fd);
  656. }
  657. }
  658. static void test_tp_attach_query(void)
  659. {
  660. const int num_progs = 3;
  661. int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
  662. __u32 duration = 0, info_len, saved_prog_ids[num_progs];
  663. const char *file = "./test_tracepoint.o";
  664. struct perf_event_query_bpf *query;
  665. struct perf_event_attr attr = {};
  666. struct bpf_object *obj[num_progs];
  667. struct bpf_prog_info prog_info;
  668. char buf[256];
  669. snprintf(buf, sizeof(buf),
  670. "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
  671. efd = open(buf, O_RDONLY, 0);
  672. if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
  673. return;
  674. bytes = read(efd, buf, sizeof(buf));
  675. close(efd);
  676. if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
  677. "read", "bytes %d errno %d\n", bytes, errno))
  678. return;
  679. attr.config = strtol(buf, NULL, 0);
  680. attr.type = PERF_TYPE_TRACEPOINT;
  681. attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
  682. attr.sample_period = 1;
  683. attr.wakeup_events = 1;
  684. query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
  685. for (i = 0; i < num_progs; i++) {
  686. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
  687. &prog_fd[i]);
  688. if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
  689. goto cleanup1;
  690. bzero(&prog_info, sizeof(prog_info));
  691. prog_info.jited_prog_len = 0;
  692. prog_info.xlated_prog_len = 0;
  693. prog_info.nr_map_ids = 0;
  694. info_len = sizeof(prog_info);
  695. err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
  696. if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
  697. err, errno))
  698. goto cleanup1;
  699. saved_prog_ids[i] = prog_info.id;
  700. pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
  701. 0 /* cpu 0 */, -1 /* group id */,
  702. 0 /* flags */);
  703. if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
  704. pmu_fd[i], errno))
  705. goto cleanup2;
  706. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
  707. if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
  708. err, errno))
  709. goto cleanup3;
  710. if (i == 0) {
  711. /* check NULL prog array query */
  712. query->ids_len = num_progs;
  713. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  714. if (CHECK(err || query->prog_cnt != 0,
  715. "perf_event_ioc_query_bpf",
  716. "err %d errno %d query->prog_cnt %u\n",
  717. err, errno, query->prog_cnt))
  718. goto cleanup3;
  719. }
  720. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
  721. if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
  722. err, errno))
  723. goto cleanup3;
  724. if (i == 1) {
  725. /* try to get # of programs only */
  726. query->ids_len = 0;
  727. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  728. if (CHECK(err || query->prog_cnt != 2,
  729. "perf_event_ioc_query_bpf",
  730. "err %d errno %d query->prog_cnt %u\n",
  731. err, errno, query->prog_cnt))
  732. goto cleanup3;
  733. /* try a few negative tests */
  734. /* invalid query pointer */
  735. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
  736. (struct perf_event_query_bpf *)0x1);
  737. if (CHECK(!err || errno != EFAULT,
  738. "perf_event_ioc_query_bpf",
  739. "err %d errno %d\n", err, errno))
  740. goto cleanup3;
  741. /* no enough space */
  742. query->ids_len = 1;
  743. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  744. if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
  745. "perf_event_ioc_query_bpf",
  746. "err %d errno %d query->prog_cnt %u\n",
  747. err, errno, query->prog_cnt))
  748. goto cleanup3;
  749. }
  750. query->ids_len = num_progs;
  751. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  752. if (CHECK(err || query->prog_cnt != (i + 1),
  753. "perf_event_ioc_query_bpf",
  754. "err %d errno %d query->prog_cnt %u\n",
  755. err, errno, query->prog_cnt))
  756. goto cleanup3;
  757. for (j = 0; j < i + 1; j++)
  758. if (CHECK(saved_prog_ids[j] != query->ids[j],
  759. "perf_event_ioc_query_bpf",
  760. "#%d saved_prog_id %x query prog_id %x\n",
  761. j, saved_prog_ids[j], query->ids[j]))
  762. goto cleanup3;
  763. }
  764. i = num_progs - 1;
  765. for (; i >= 0; i--) {
  766. cleanup3:
  767. ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
  768. cleanup2:
  769. close(pmu_fd[i]);
  770. cleanup1:
  771. bpf_object__close(obj[i]);
  772. }
  773. free(query);
  774. }
  775. static int compare_map_keys(int map1_fd, int map2_fd)
  776. {
  777. __u32 key, next_key;
  778. char val_buf[PERF_MAX_STACK_DEPTH *
  779. sizeof(struct bpf_stack_build_id)];
  780. int err;
  781. err = bpf_map_get_next_key(map1_fd, NULL, &key);
  782. if (err)
  783. return err;
  784. err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
  785. if (err)
  786. return err;
  787. while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
  788. err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
  789. if (err)
  790. return err;
  791. key = next_key;
  792. }
  793. if (errno != ENOENT)
  794. return -1;
  795. return 0;
  796. }
  797. static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
  798. {
  799. __u32 key, next_key, *cur_key_p, *next_key_p;
  800. char *val_buf1, *val_buf2;
  801. int i, err = 0;
  802. val_buf1 = malloc(stack_trace_len);
  803. val_buf2 = malloc(stack_trace_len);
  804. cur_key_p = NULL;
  805. next_key_p = &key;
  806. while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
  807. err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
  808. if (err)
  809. goto out;
  810. err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
  811. if (err)
  812. goto out;
  813. for (i = 0; i < stack_trace_len; i++) {
  814. if (val_buf1[i] != val_buf2[i]) {
  815. err = -1;
  816. goto out;
  817. }
  818. }
  819. key = *next_key_p;
  820. cur_key_p = &key;
  821. next_key_p = &next_key;
  822. }
  823. if (errno != ENOENT)
  824. err = -1;
  825. out:
  826. free(val_buf1);
  827. free(val_buf2);
  828. return err;
  829. }
  830. static void test_stacktrace_map()
  831. {
  832. int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
  833. const char *file = "./test_stacktrace_map.o";
  834. int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
  835. struct perf_event_attr attr = {};
  836. __u32 key, val, duration = 0;
  837. struct bpf_object *obj;
  838. char buf[256];
  839. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
  840. if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
  841. return;
  842. /* Get the ID for the sched/sched_switch tracepoint */
  843. snprintf(buf, sizeof(buf),
  844. "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
  845. efd = open(buf, O_RDONLY, 0);
  846. if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
  847. goto close_prog;
  848. bytes = read(efd, buf, sizeof(buf));
  849. close(efd);
  850. if (bytes <= 0 || bytes >= sizeof(buf))
  851. goto close_prog;
  852. /* Open the perf event and attach bpf progrram */
  853. attr.config = strtol(buf, NULL, 0);
  854. attr.type = PERF_TYPE_TRACEPOINT;
  855. attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
  856. attr.sample_period = 1;
  857. attr.wakeup_events = 1;
  858. pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
  859. 0 /* cpu 0 */, -1 /* group id */,
  860. 0 /* flags */);
  861. if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
  862. pmu_fd, errno))
  863. goto close_prog;
  864. err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
  865. if (err)
  866. goto disable_pmu;
  867. err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
  868. if (err)
  869. goto disable_pmu;
  870. /* find map fds */
  871. control_map_fd = bpf_find_map(__func__, obj, "control_map");
  872. if (control_map_fd < 0)
  873. goto disable_pmu;
  874. stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
  875. if (stackid_hmap_fd < 0)
  876. goto disable_pmu;
  877. stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
  878. if (stackmap_fd < 0)
  879. goto disable_pmu;
  880. stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
  881. if (stack_amap_fd < 0)
  882. goto disable_pmu;
  883. /* give some time for bpf program run */
  884. sleep(1);
  885. /* disable stack trace collection */
  886. key = 0;
  887. val = 1;
  888. bpf_map_update_elem(control_map_fd, &key, &val, 0);
  889. /* for every element in stackid_hmap, we can find a corresponding one
  890. * in stackmap, and vise versa.
  891. */
  892. err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
  893. if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
  894. "err %d errno %d\n", err, errno))
  895. goto disable_pmu_noerr;
  896. err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
  897. if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
  898. "err %d errno %d\n", err, errno))
  899. goto disable_pmu_noerr;
  900. stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
  901. err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
  902. if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
  903. "err %d errno %d\n", err, errno))
  904. goto disable_pmu_noerr;
  905. goto disable_pmu_noerr;
  906. disable_pmu:
  907. error_cnt++;
  908. disable_pmu_noerr:
  909. ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
  910. close(pmu_fd);
  911. close_prog:
  912. bpf_object__close(obj);
  913. }
  914. static void test_stacktrace_map_raw_tp()
  915. {
  916. int control_map_fd, stackid_hmap_fd, stackmap_fd;
  917. const char *file = "./test_stacktrace_map.o";
  918. int efd, err, prog_fd;
  919. __u32 key, val, duration = 0;
  920. struct bpf_object *obj;
  921. err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
  922. if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
  923. return;
  924. efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
  925. if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
  926. goto close_prog;
  927. /* find map fds */
  928. control_map_fd = bpf_find_map(__func__, obj, "control_map");
  929. if (control_map_fd < 0)
  930. goto close_prog;
  931. stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
  932. if (stackid_hmap_fd < 0)
  933. goto close_prog;
  934. stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
  935. if (stackmap_fd < 0)
  936. goto close_prog;
  937. /* give some time for bpf program run */
  938. sleep(1);
  939. /* disable stack trace collection */
  940. key = 0;
  941. val = 1;
  942. bpf_map_update_elem(control_map_fd, &key, &val, 0);
  943. /* for every element in stackid_hmap, we can find a corresponding one
  944. * in stackmap, and vise versa.
  945. */
  946. err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
  947. if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
  948. "err %d errno %d\n", err, errno))
  949. goto close_prog;
  950. err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
  951. if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
  952. "err %d errno %d\n", err, errno))
  953. goto close_prog;
  954. goto close_prog_noerr;
  955. close_prog:
  956. error_cnt++;
  957. close_prog_noerr:
  958. bpf_object__close(obj);
  959. }
  960. static int extract_build_id(char *build_id, size_t size)
  961. {
  962. FILE *fp;
  963. char *line = NULL;
  964. size_t len = 0;
  965. fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
  966. if (fp == NULL)
  967. return -1;
  968. if (getline(&line, &len, fp) == -1)
  969. goto err;
  970. fclose(fp);
  971. if (len > size)
  972. len = size;
  973. memcpy(build_id, line, len);
  974. build_id[len] = '\0';
  975. return 0;
  976. err:
  977. fclose(fp);
  978. return -1;
  979. }
  980. static void test_stacktrace_build_id(void)
  981. {
  982. int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
  983. const char *file = "./test_stacktrace_build_id.o";
  984. int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
  985. struct perf_event_attr attr = {};
  986. __u32 key, previous_key, val, duration = 0;
  987. struct bpf_object *obj;
  988. char buf[256];
  989. int i, j;
  990. struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
  991. int build_id_matches = 0;
  992. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
  993. if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
  994. goto out;
  995. /* Get the ID for the sched/sched_switch tracepoint */
  996. snprintf(buf, sizeof(buf),
  997. "/sys/kernel/debug/tracing/events/random/urandom_read/id");
  998. efd = open(buf, O_RDONLY, 0);
  999. if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
  1000. goto close_prog;
  1001. bytes = read(efd, buf, sizeof(buf));
  1002. close(efd);
  1003. if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
  1004. "read", "bytes %d errno %d\n", bytes, errno))
  1005. goto close_prog;
  1006. /* Open the perf event and attach bpf progrram */
  1007. attr.config = strtol(buf, NULL, 0);
  1008. attr.type = PERF_TYPE_TRACEPOINT;
  1009. attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
  1010. attr.sample_period = 1;
  1011. attr.wakeup_events = 1;
  1012. pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
  1013. 0 /* cpu 0 */, -1 /* group id */,
  1014. 0 /* flags */);
  1015. if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
  1016. pmu_fd, errno))
  1017. goto close_prog;
  1018. err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
  1019. if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
  1020. err, errno))
  1021. goto close_pmu;
  1022. err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
  1023. if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
  1024. err, errno))
  1025. goto disable_pmu;
  1026. /* find map fds */
  1027. control_map_fd = bpf_find_map(__func__, obj, "control_map");
  1028. if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
  1029. "err %d errno %d\n", err, errno))
  1030. goto disable_pmu;
  1031. stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
  1032. if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
  1033. "err %d errno %d\n", err, errno))
  1034. goto disable_pmu;
  1035. stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
  1036. if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
  1037. err, errno))
  1038. goto disable_pmu;
  1039. stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
  1040. if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
  1041. "err %d errno %d\n", err, errno))
  1042. goto disable_pmu;
  1043. assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
  1044. == 0);
  1045. assert(system("./urandom_read") == 0);
  1046. /* disable stack trace collection */
  1047. key = 0;
  1048. val = 1;
  1049. bpf_map_update_elem(control_map_fd, &key, &val, 0);
  1050. /* for every element in stackid_hmap, we can find a corresponding one
  1051. * in stackmap, and vise versa.
  1052. */
  1053. err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
  1054. if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
  1055. "err %d errno %d\n", err, errno))
  1056. goto disable_pmu;
  1057. err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
  1058. if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
  1059. "err %d errno %d\n", err, errno))
  1060. goto disable_pmu;
  1061. err = extract_build_id(buf, 256);
  1062. if (CHECK(err, "get build_id with readelf",
  1063. "err %d errno %d\n", err, errno))
  1064. goto disable_pmu;
  1065. err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
  1066. if (CHECK(err, "get_next_key from stackmap",
  1067. "err %d, errno %d\n", err, errno))
  1068. goto disable_pmu;
  1069. do {
  1070. char build_id[64];
  1071. err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
  1072. if (CHECK(err, "lookup_elem from stackmap",
  1073. "err %d, errno %d\n", err, errno))
  1074. goto disable_pmu;
  1075. for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
  1076. if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
  1077. id_offs[i].offset != 0) {
  1078. for (j = 0; j < 20; ++j)
  1079. sprintf(build_id + 2 * j, "%02x",
  1080. id_offs[i].build_id[j] & 0xff);
  1081. if (strstr(buf, build_id) != NULL)
  1082. build_id_matches = 1;
  1083. }
  1084. previous_key = key;
  1085. } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
  1086. if (CHECK(build_id_matches < 1, "build id match",
  1087. "Didn't find expected build ID from the map\n"))
  1088. goto disable_pmu;
  1089. stack_trace_len = PERF_MAX_STACK_DEPTH
  1090. * sizeof(struct bpf_stack_build_id);
  1091. err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
  1092. CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
  1093. "err %d errno %d\n", err, errno);
  1094. disable_pmu:
  1095. ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
  1096. close_pmu:
  1097. close(pmu_fd);
  1098. close_prog:
  1099. bpf_object__close(obj);
  1100. out:
  1101. return;
  1102. }
  1103. static void test_stacktrace_build_id_nmi(void)
  1104. {
  1105. int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
  1106. const char *file = "./test_stacktrace_build_id.o";
  1107. int err, pmu_fd, prog_fd;
  1108. struct perf_event_attr attr = {
  1109. .sample_freq = 5000,
  1110. .freq = 1,
  1111. .type = PERF_TYPE_HARDWARE,
  1112. .config = PERF_COUNT_HW_CPU_CYCLES,
  1113. };
  1114. __u32 key, previous_key, val, duration = 0;
  1115. struct bpf_object *obj;
  1116. char buf[256];
  1117. int i, j;
  1118. struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
  1119. int build_id_matches = 0;
  1120. err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
  1121. if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
  1122. return;
  1123. pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
  1124. 0 /* cpu 0 */, -1 /* group id */,
  1125. 0 /* flags */);
  1126. if (CHECK(pmu_fd < 0, "perf_event_open",
  1127. "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
  1128. pmu_fd, errno))
  1129. goto close_prog;
  1130. err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
  1131. if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
  1132. err, errno))
  1133. goto close_pmu;
  1134. err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
  1135. if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
  1136. err, errno))
  1137. goto disable_pmu;
  1138. /* find map fds */
  1139. control_map_fd = bpf_find_map(__func__, obj, "control_map");
  1140. if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
  1141. "err %d errno %d\n", err, errno))
  1142. goto disable_pmu;
  1143. stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
  1144. if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
  1145. "err %d errno %d\n", err, errno))
  1146. goto disable_pmu;
  1147. stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
  1148. if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
  1149. err, errno))
  1150. goto disable_pmu;
  1151. stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
  1152. if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
  1153. "err %d errno %d\n", err, errno))
  1154. goto disable_pmu;
  1155. assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
  1156. == 0);
  1157. assert(system("taskset 0x1 ./urandom_read 100000") == 0);
  1158. /* disable stack trace collection */
  1159. key = 0;
  1160. val = 1;
  1161. bpf_map_update_elem(control_map_fd, &key, &val, 0);
  1162. /* for every element in stackid_hmap, we can find a corresponding one
  1163. * in stackmap, and vise versa.
  1164. */
  1165. err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
  1166. if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
  1167. "err %d errno %d\n", err, errno))
  1168. goto disable_pmu;
  1169. err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
  1170. if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
  1171. "err %d errno %d\n", err, errno))
  1172. goto disable_pmu;
  1173. err = extract_build_id(buf, 256);
  1174. if (CHECK(err, "get build_id with readelf",
  1175. "err %d errno %d\n", err, errno))
  1176. goto disable_pmu;
  1177. err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
  1178. if (CHECK(err, "get_next_key from stackmap",
  1179. "err %d, errno %d\n", err, errno))
  1180. goto disable_pmu;
  1181. do {
  1182. char build_id[64];
  1183. err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
  1184. if (CHECK(err, "lookup_elem from stackmap",
  1185. "err %d, errno %d\n", err, errno))
  1186. goto disable_pmu;
  1187. for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
  1188. if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
  1189. id_offs[i].offset != 0) {
  1190. for (j = 0; j < 20; ++j)
  1191. sprintf(build_id + 2 * j, "%02x",
  1192. id_offs[i].build_id[j] & 0xff);
  1193. if (strstr(buf, build_id) != NULL)
  1194. build_id_matches = 1;
  1195. }
  1196. previous_key = key;
  1197. } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
  1198. if (CHECK(build_id_matches < 1, "build id match",
  1199. "Didn't find expected build ID from the map\n"))
  1200. goto disable_pmu;
  1201. /*
  1202. * We intentionally skip compare_stack_ips(). This is because we
  1203. * only support one in_nmi() ips-to-build_id translation per cpu
  1204. * at any time, thus stack_amap here will always fallback to
  1205. * BPF_STACK_BUILD_ID_IP;
  1206. */
  1207. disable_pmu:
  1208. ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
  1209. close_pmu:
  1210. close(pmu_fd);
  1211. close_prog:
  1212. bpf_object__close(obj);
  1213. }
  1214. #define MAX_CNT_RAWTP 10ull
  1215. #define MAX_STACK_RAWTP 100
  1216. struct get_stack_trace_t {
  1217. int pid;
  1218. int kern_stack_size;
  1219. int user_stack_size;
  1220. int user_stack_buildid_size;
  1221. __u64 kern_stack[MAX_STACK_RAWTP];
  1222. __u64 user_stack[MAX_STACK_RAWTP];
  1223. struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
  1224. };
  1225. static int get_stack_print_output(void *data, int size)
  1226. {
  1227. bool good_kern_stack = false, good_user_stack = false;
  1228. const char *nonjit_func = "___bpf_prog_run";
  1229. struct get_stack_trace_t *e = data;
  1230. int i, num_stack;
  1231. static __u64 cnt;
  1232. struct ksym *ks;
  1233. cnt++;
  1234. if (size < sizeof(struct get_stack_trace_t)) {
  1235. __u64 *raw_data = data;
  1236. bool found = false;
  1237. num_stack = size / sizeof(__u64);
  1238. /* If jit is enabled, we do not have a good way to
  1239. * verify the sanity of the kernel stack. So we
  1240. * just assume it is good if the stack is not empty.
  1241. * This could be improved in the future.
  1242. */
  1243. if (jit_enabled) {
  1244. found = num_stack > 0;
  1245. } else {
  1246. for (i = 0; i < num_stack; i++) {
  1247. ks = ksym_search(raw_data[i]);
  1248. if (strcmp(ks->name, nonjit_func) == 0) {
  1249. found = true;
  1250. break;
  1251. }
  1252. }
  1253. }
  1254. if (found) {
  1255. good_kern_stack = true;
  1256. good_user_stack = true;
  1257. }
  1258. } else {
  1259. num_stack = e->kern_stack_size / sizeof(__u64);
  1260. if (jit_enabled) {
  1261. good_kern_stack = num_stack > 0;
  1262. } else {
  1263. for (i = 0; i < num_stack; i++) {
  1264. ks = ksym_search(e->kern_stack[i]);
  1265. if (strcmp(ks->name, nonjit_func) == 0) {
  1266. good_kern_stack = true;
  1267. break;
  1268. }
  1269. }
  1270. }
  1271. if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
  1272. good_user_stack = true;
  1273. }
  1274. if (!good_kern_stack || !good_user_stack)
  1275. return LIBBPF_PERF_EVENT_ERROR;
  1276. if (cnt == MAX_CNT_RAWTP)
  1277. return LIBBPF_PERF_EVENT_DONE;
  1278. return LIBBPF_PERF_EVENT_CONT;
  1279. }
  1280. static void test_get_stack_raw_tp(void)
  1281. {
  1282. const char *file = "./test_get_stack_rawtp.o";
  1283. int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
  1284. struct perf_event_attr attr = {};
  1285. struct timespec tv = {0, 10};
  1286. __u32 key = 0, duration = 0;
  1287. struct bpf_object *obj;
  1288. err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
  1289. if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
  1290. return;
  1291. efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
  1292. if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
  1293. goto close_prog;
  1294. perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
  1295. if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
  1296. perfmap_fd, errno))
  1297. goto close_prog;
  1298. err = load_kallsyms();
  1299. if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
  1300. goto close_prog;
  1301. attr.sample_type = PERF_SAMPLE_RAW;
  1302. attr.type = PERF_TYPE_SOFTWARE;
  1303. attr.config = PERF_COUNT_SW_BPF_OUTPUT;
  1304. pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
  1305. -1/*group_fd*/, 0);
  1306. if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
  1307. errno))
  1308. goto close_prog;
  1309. err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
  1310. if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
  1311. errno))
  1312. goto close_prog;
  1313. err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
  1314. if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
  1315. err, errno))
  1316. goto close_prog;
  1317. err = perf_event_mmap(pmu_fd);
  1318. if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
  1319. goto close_prog;
  1320. /* trigger some syscall action */
  1321. for (i = 0; i < MAX_CNT_RAWTP; i++)
  1322. nanosleep(&tv, NULL);
  1323. err = perf_event_poller(pmu_fd, get_stack_print_output);
  1324. if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
  1325. goto close_prog;
  1326. goto close_prog_noerr;
  1327. close_prog:
  1328. error_cnt++;
  1329. close_prog_noerr:
  1330. bpf_object__close(obj);
  1331. }
  1332. static void test_task_fd_query_rawtp(void)
  1333. {
  1334. const char *file = "./test_get_stack_rawtp.o";
  1335. __u64 probe_offset, probe_addr;
  1336. __u32 len, prog_id, fd_type;
  1337. struct bpf_object *obj;
  1338. int efd, err, prog_fd;
  1339. __u32 duration = 0;
  1340. char buf[256];
  1341. err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
  1342. if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
  1343. return;
  1344. efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
  1345. if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
  1346. goto close_prog;
  1347. /* query (getpid(), efd) */
  1348. len = sizeof(buf);
  1349. err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
  1350. &fd_type, &probe_offset, &probe_addr);
  1351. if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
  1352. errno))
  1353. goto close_prog;
  1354. err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
  1355. strcmp(buf, "sys_enter") == 0;
  1356. if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
  1357. fd_type, buf))
  1358. goto close_prog;
  1359. /* test zero len */
  1360. len = 0;
  1361. err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
  1362. &fd_type, &probe_offset, &probe_addr);
  1363. if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
  1364. err, errno))
  1365. goto close_prog;
  1366. err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
  1367. len == strlen("sys_enter");
  1368. if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
  1369. goto close_prog;
  1370. /* test empty buffer */
  1371. len = sizeof(buf);
  1372. err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
  1373. &fd_type, &probe_offset, &probe_addr);
  1374. if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
  1375. err, errno))
  1376. goto close_prog;
  1377. err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
  1378. len == strlen("sys_enter");
  1379. if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
  1380. goto close_prog;
  1381. /* test smaller buffer */
  1382. len = 3;
  1383. err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
  1384. &fd_type, &probe_offset, &probe_addr);
  1385. if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
  1386. "err %d errno %d\n", err, errno))
  1387. goto close_prog;
  1388. err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
  1389. len == strlen("sys_enter") &&
  1390. strcmp(buf, "sy") == 0;
  1391. if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
  1392. goto close_prog;
  1393. goto close_prog_noerr;
  1394. close_prog:
  1395. error_cnt++;
  1396. close_prog_noerr:
  1397. bpf_object__close(obj);
  1398. }
  1399. static void test_task_fd_query_tp_core(const char *probe_name,
  1400. const char *tp_name)
  1401. {
  1402. const char *file = "./test_tracepoint.o";
  1403. int err, bytes, efd, prog_fd, pmu_fd;
  1404. struct perf_event_attr attr = {};
  1405. __u64 probe_offset, probe_addr;
  1406. __u32 len, prog_id, fd_type;
  1407. struct bpf_object *obj;
  1408. __u32 duration = 0;
  1409. char buf[256];
  1410. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
  1411. if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
  1412. goto close_prog;
  1413. snprintf(buf, sizeof(buf),
  1414. "/sys/kernel/debug/tracing/events/%s/id", probe_name);
  1415. efd = open(buf, O_RDONLY, 0);
  1416. if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
  1417. goto close_prog;
  1418. bytes = read(efd, buf, sizeof(buf));
  1419. close(efd);
  1420. if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
  1421. "bytes %d errno %d\n", bytes, errno))
  1422. goto close_prog;
  1423. attr.config = strtol(buf, NULL, 0);
  1424. attr.type = PERF_TYPE_TRACEPOINT;
  1425. attr.sample_type = PERF_SAMPLE_RAW;
  1426. attr.sample_period = 1;
  1427. attr.wakeup_events = 1;
  1428. pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
  1429. 0 /* cpu 0 */, -1 /* group id */,
  1430. 0 /* flags */);
  1431. if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
  1432. goto close_pmu;
  1433. err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
  1434. if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
  1435. errno))
  1436. goto close_pmu;
  1437. err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
  1438. if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
  1439. errno))
  1440. goto close_pmu;
  1441. /* query (getpid(), pmu_fd) */
  1442. len = sizeof(buf);
  1443. err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
  1444. &fd_type, &probe_offset, &probe_addr);
  1445. if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
  1446. errno))
  1447. goto close_pmu;
  1448. err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
  1449. if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
  1450. fd_type, buf))
  1451. goto close_pmu;
  1452. close(pmu_fd);
  1453. goto close_prog_noerr;
  1454. close_pmu:
  1455. close(pmu_fd);
  1456. close_prog:
  1457. error_cnt++;
  1458. close_prog_noerr:
  1459. bpf_object__close(obj);
  1460. }
  1461. static void test_task_fd_query_tp(void)
  1462. {
  1463. test_task_fd_query_tp_core("sched/sched_switch",
  1464. "sched_switch");
  1465. test_task_fd_query_tp_core("syscalls/sys_enter_read",
  1466. "sys_enter_read");
  1467. }
  1468. static void test_reference_tracking()
  1469. {
  1470. const char *file = "./test_sk_lookup_kern.o";
  1471. struct bpf_object *obj;
  1472. struct bpf_program *prog;
  1473. __u32 duration;
  1474. int err = 0;
  1475. obj = bpf_object__open(file);
  1476. if (IS_ERR(obj)) {
  1477. error_cnt++;
  1478. return;
  1479. }
  1480. bpf_object__for_each_program(prog, obj) {
  1481. const char *title;
  1482. /* Ignore .text sections */
  1483. title = bpf_program__title(prog, false);
  1484. if (strstr(title, ".text") != NULL)
  1485. continue;
  1486. bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS);
  1487. /* Expect verifier failure if test name has 'fail' */
  1488. if (strstr(title, "fail") != NULL) {
  1489. libbpf_set_print(NULL, NULL, NULL);
  1490. err = !bpf_program__load(prog, "GPL", 0);
  1491. libbpf_set_print(printf, printf, NULL);
  1492. } else {
  1493. err = bpf_program__load(prog, "GPL", 0);
  1494. }
  1495. CHECK(err, title, "\n");
  1496. }
  1497. bpf_object__close(obj);
  1498. }
  1499. enum {
  1500. QUEUE,
  1501. STACK,
  1502. };
  1503. static void test_queue_stack_map(int type)
  1504. {
  1505. const int MAP_SIZE = 32;
  1506. __u32 vals[MAP_SIZE], duration, retval, size, val;
  1507. int i, err, prog_fd, map_in_fd, map_out_fd;
  1508. char file[32], buf[128];
  1509. struct bpf_object *obj;
  1510. struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
  1511. /* Fill test values to be used */
  1512. for (i = 0; i < MAP_SIZE; i++)
  1513. vals[i] = rand();
  1514. if (type == QUEUE)
  1515. strncpy(file, "./test_queue_map.o", sizeof(file));
  1516. else if (type == STACK)
  1517. strncpy(file, "./test_stack_map.o", sizeof(file));
  1518. else
  1519. return;
  1520. err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
  1521. if (err) {
  1522. error_cnt++;
  1523. return;
  1524. }
  1525. map_in_fd = bpf_find_map(__func__, obj, "map_in");
  1526. if (map_in_fd < 0)
  1527. goto out;
  1528. map_out_fd = bpf_find_map(__func__, obj, "map_out");
  1529. if (map_out_fd < 0)
  1530. goto out;
  1531. /* Push 32 elements to the input map */
  1532. for (i = 0; i < MAP_SIZE; i++) {
  1533. err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0);
  1534. if (err) {
  1535. error_cnt++;
  1536. goto out;
  1537. }
  1538. }
  1539. /* The eBPF program pushes iph.saddr in the output map,
  1540. * pops the input map and saves this value in iph.daddr
  1541. */
  1542. for (i = 0; i < MAP_SIZE; i++) {
  1543. if (type == QUEUE) {
  1544. val = vals[i];
  1545. pkt_v4.iph.saddr = vals[i] * 5;
  1546. } else if (type == STACK) {
  1547. val = vals[MAP_SIZE - 1 - i];
  1548. pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5;
  1549. }
  1550. err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
  1551. buf, &size, &retval, &duration);
  1552. if (err || retval || size != sizeof(pkt_v4) ||
  1553. iph->daddr != val)
  1554. break;
  1555. }
  1556. CHECK(err || retval || size != sizeof(pkt_v4) || iph->daddr != val,
  1557. "bpf_map_pop_elem",
  1558. "err %d errno %d retval %d size %d iph->daddr %u\n",
  1559. err, errno, retval, size, iph->daddr);
  1560. /* Queue is empty, program should return TC_ACT_SHOT */
  1561. err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
  1562. buf, &size, &retval, &duration);
  1563. CHECK(err || retval != 2 /* TC_ACT_SHOT */|| size != sizeof(pkt_v4),
  1564. "check-queue-stack-map-empty",
  1565. "err %d errno %d retval %d size %d\n",
  1566. err, errno, retval, size);
  1567. /* Check that the program pushed elements correctly */
  1568. for (i = 0; i < MAP_SIZE; i++) {
  1569. err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val);
  1570. if (err || val != vals[i] * 5)
  1571. break;
  1572. }
  1573. CHECK(i != MAP_SIZE && (err || val != vals[i] * 5),
  1574. "bpf_map_push_elem", "err %d value %u\n", err, val);
  1575. out:
  1576. pkt_v4.iph.saddr = 0;
  1577. bpf_object__close(obj);
  1578. }
  1579. int main(void)
  1580. {
  1581. srand(time(NULL));
  1582. jit_enabled = is_jit_enabled();
  1583. test_pkt_access();
  1584. test_xdp();
  1585. test_xdp_adjust_tail();
  1586. test_l4lb_all();
  1587. test_xdp_noinline();
  1588. test_tcp_estats();
  1589. test_bpf_obj_id();
  1590. test_pkt_md_access();
  1591. test_obj_name();
  1592. test_tp_attach_query();
  1593. test_stacktrace_map();
  1594. test_stacktrace_build_id();
  1595. test_stacktrace_build_id_nmi();
  1596. test_stacktrace_map_raw_tp();
  1597. test_get_stack_raw_tp();
  1598. test_task_fd_query_rawtp();
  1599. test_task_fd_query_tp();
  1600. test_reference_tracking();
  1601. test_queue_stack_map(QUEUE);
  1602. test_queue_stack_map(STACK);
  1603. printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
  1604. return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
  1605. }