test_verifier.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974
  1. /*
  2. * Testsuite for eBPF verifier
  3. *
  4. * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. */
  10. #include <stdio.h>
  11. #include <unistd.h>
  12. #include <linux/bpf.h>
  13. #include <errno.h>
  14. #include <linux/unistd.h>
  15. #include <string.h>
  16. #include <linux/filter.h>
  17. #include <stddef.h>
  18. #include "libbpf.h"
  19. #define MAX_INSNS 512
  20. #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
  21. struct bpf_test {
  22. const char *descr;
  23. struct bpf_insn insns[MAX_INSNS];
  24. int fixup[32];
  25. const char *errstr;
  26. enum {
  27. ACCEPT,
  28. REJECT
  29. } result;
  30. enum bpf_prog_type prog_type;
  31. };
  32. static struct bpf_test tests[] = {
  33. {
  34. "add+sub+mul",
  35. .insns = {
  36. BPF_MOV64_IMM(BPF_REG_1, 1),
  37. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
  38. BPF_MOV64_IMM(BPF_REG_2, 3),
  39. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
  40. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
  41. BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
  42. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  43. BPF_EXIT_INSN(),
  44. },
  45. .result = ACCEPT,
  46. },
  47. {
  48. "unreachable",
  49. .insns = {
  50. BPF_EXIT_INSN(),
  51. BPF_EXIT_INSN(),
  52. },
  53. .errstr = "unreachable",
  54. .result = REJECT,
  55. },
  56. {
  57. "unreachable2",
  58. .insns = {
  59. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  60. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  61. BPF_EXIT_INSN(),
  62. },
  63. .errstr = "unreachable",
  64. .result = REJECT,
  65. },
  66. {
  67. "out of range jump",
  68. .insns = {
  69. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  70. BPF_EXIT_INSN(),
  71. },
  72. .errstr = "jump out of range",
  73. .result = REJECT,
  74. },
  75. {
  76. "out of range jump2",
  77. .insns = {
  78. BPF_JMP_IMM(BPF_JA, 0, 0, -2),
  79. BPF_EXIT_INSN(),
  80. },
  81. .errstr = "jump out of range",
  82. .result = REJECT,
  83. },
  84. {
  85. "test1 ld_imm64",
  86. .insns = {
  87. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  88. BPF_LD_IMM64(BPF_REG_0, 0),
  89. BPF_LD_IMM64(BPF_REG_0, 0),
  90. BPF_LD_IMM64(BPF_REG_0, 1),
  91. BPF_LD_IMM64(BPF_REG_0, 1),
  92. BPF_MOV64_IMM(BPF_REG_0, 2),
  93. BPF_EXIT_INSN(),
  94. },
  95. .errstr = "invalid BPF_LD_IMM insn",
  96. .result = REJECT,
  97. },
  98. {
  99. "test2 ld_imm64",
  100. .insns = {
  101. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  102. BPF_LD_IMM64(BPF_REG_0, 0),
  103. BPF_LD_IMM64(BPF_REG_0, 0),
  104. BPF_LD_IMM64(BPF_REG_0, 1),
  105. BPF_LD_IMM64(BPF_REG_0, 1),
  106. BPF_EXIT_INSN(),
  107. },
  108. .errstr = "invalid BPF_LD_IMM insn",
  109. .result = REJECT,
  110. },
  111. {
  112. "test3 ld_imm64",
  113. .insns = {
  114. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  115. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  116. BPF_LD_IMM64(BPF_REG_0, 0),
  117. BPF_LD_IMM64(BPF_REG_0, 0),
  118. BPF_LD_IMM64(BPF_REG_0, 1),
  119. BPF_LD_IMM64(BPF_REG_0, 1),
  120. BPF_EXIT_INSN(),
  121. },
  122. .errstr = "invalid bpf_ld_imm64 insn",
  123. .result = REJECT,
  124. },
  125. {
  126. "test4 ld_imm64",
  127. .insns = {
  128. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  129. BPF_EXIT_INSN(),
  130. },
  131. .errstr = "invalid bpf_ld_imm64 insn",
  132. .result = REJECT,
  133. },
  134. {
  135. "test5 ld_imm64",
  136. .insns = {
  137. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  138. },
  139. .errstr = "invalid bpf_ld_imm64 insn",
  140. .result = REJECT,
  141. },
  142. {
  143. "no bpf_exit",
  144. .insns = {
  145. BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
  146. },
  147. .errstr = "jump out of range",
  148. .result = REJECT,
  149. },
  150. {
  151. "loop (back-edge)",
  152. .insns = {
  153. BPF_JMP_IMM(BPF_JA, 0, 0, -1),
  154. BPF_EXIT_INSN(),
  155. },
  156. .errstr = "back-edge",
  157. .result = REJECT,
  158. },
  159. {
  160. "loop2 (back-edge)",
  161. .insns = {
  162. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  163. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  164. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  165. BPF_JMP_IMM(BPF_JA, 0, 0, -4),
  166. BPF_EXIT_INSN(),
  167. },
  168. .errstr = "back-edge",
  169. .result = REJECT,
  170. },
  171. {
  172. "conditional loop",
  173. .insns = {
  174. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  175. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  176. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  177. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
  178. BPF_EXIT_INSN(),
  179. },
  180. .errstr = "back-edge",
  181. .result = REJECT,
  182. },
  183. {
  184. "read uninitialized register",
  185. .insns = {
  186. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  187. BPF_EXIT_INSN(),
  188. },
  189. .errstr = "R2 !read_ok",
  190. .result = REJECT,
  191. },
  192. {
  193. "read invalid register",
  194. .insns = {
  195. BPF_MOV64_REG(BPF_REG_0, -1),
  196. BPF_EXIT_INSN(),
  197. },
  198. .errstr = "R15 is invalid",
  199. .result = REJECT,
  200. },
  201. {
  202. "program doesn't init R0 before exit",
  203. .insns = {
  204. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
  205. BPF_EXIT_INSN(),
  206. },
  207. .errstr = "R0 !read_ok",
  208. .result = REJECT,
  209. },
  210. {
  211. "program doesn't init R0 before exit in all branches",
  212. .insns = {
  213. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  214. BPF_MOV64_IMM(BPF_REG_0, 1),
  215. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
  216. BPF_EXIT_INSN(),
  217. },
  218. .errstr = "R0 !read_ok",
  219. .result = REJECT,
  220. },
  221. {
  222. "stack out of bounds",
  223. .insns = {
  224. BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
  225. BPF_EXIT_INSN(),
  226. },
  227. .errstr = "invalid stack",
  228. .result = REJECT,
  229. },
  230. {
  231. "invalid call insn1",
  232. .insns = {
  233. BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
  234. BPF_EXIT_INSN(),
  235. },
  236. .errstr = "BPF_CALL uses reserved",
  237. .result = REJECT,
  238. },
  239. {
  240. "invalid call insn2",
  241. .insns = {
  242. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
  243. BPF_EXIT_INSN(),
  244. },
  245. .errstr = "BPF_CALL uses reserved",
  246. .result = REJECT,
  247. },
  248. {
  249. "invalid function call",
  250. .insns = {
  251. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
  252. BPF_EXIT_INSN(),
  253. },
  254. .errstr = "invalid func 1234567",
  255. .result = REJECT,
  256. },
  257. {
  258. "uninitialized stack1",
  259. .insns = {
  260. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  261. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  262. BPF_LD_MAP_FD(BPF_REG_1, 0),
  263. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  264. BPF_EXIT_INSN(),
  265. },
  266. .fixup = {2},
  267. .errstr = "invalid indirect read from stack",
  268. .result = REJECT,
  269. },
  270. {
  271. "uninitialized stack2",
  272. .insns = {
  273. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  274. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
  275. BPF_EXIT_INSN(),
  276. },
  277. .errstr = "invalid read from stack",
  278. .result = REJECT,
  279. },
  280. {
  281. "check valid spill/fill",
  282. .insns = {
  283. /* spill R1(ctx) into stack */
  284. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  285. /* fill it back into R2 */
  286. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
  287. /* should be able to access R0 = *(R2 + 8) */
  288. /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
  289. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  290. BPF_EXIT_INSN(),
  291. },
  292. .result = ACCEPT,
  293. },
  294. {
  295. "check corrupted spill/fill",
  296. .insns = {
  297. /* spill R1(ctx) into stack */
  298. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  299. /* mess up with R1 pointer on stack */
  300. BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
  301. /* fill back into R0 should fail */
  302. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  303. BPF_EXIT_INSN(),
  304. },
  305. .errstr = "corrupted spill",
  306. .result = REJECT,
  307. },
  308. {
  309. "invalid src register in STX",
  310. .insns = {
  311. BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
  312. BPF_EXIT_INSN(),
  313. },
  314. .errstr = "R15 is invalid",
  315. .result = REJECT,
  316. },
  317. {
  318. "invalid dst register in STX",
  319. .insns = {
  320. BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
  321. BPF_EXIT_INSN(),
  322. },
  323. .errstr = "R14 is invalid",
  324. .result = REJECT,
  325. },
  326. {
  327. "invalid dst register in ST",
  328. .insns = {
  329. BPF_ST_MEM(BPF_B, 14, -1, -1),
  330. BPF_EXIT_INSN(),
  331. },
  332. .errstr = "R14 is invalid",
  333. .result = REJECT,
  334. },
  335. {
  336. "invalid src register in LDX",
  337. .insns = {
  338. BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
  339. BPF_EXIT_INSN(),
  340. },
  341. .errstr = "R12 is invalid",
  342. .result = REJECT,
  343. },
  344. {
  345. "invalid dst register in LDX",
  346. .insns = {
  347. BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
  348. BPF_EXIT_INSN(),
  349. },
  350. .errstr = "R11 is invalid",
  351. .result = REJECT,
  352. },
  353. {
  354. "junk insn",
  355. .insns = {
  356. BPF_RAW_INSN(0, 0, 0, 0, 0),
  357. BPF_EXIT_INSN(),
  358. },
  359. .errstr = "invalid BPF_LD_IMM",
  360. .result = REJECT,
  361. },
  362. {
  363. "junk insn2",
  364. .insns = {
  365. BPF_RAW_INSN(1, 0, 0, 0, 0),
  366. BPF_EXIT_INSN(),
  367. },
  368. .errstr = "BPF_LDX uses reserved fields",
  369. .result = REJECT,
  370. },
  371. {
  372. "junk insn3",
  373. .insns = {
  374. BPF_RAW_INSN(-1, 0, 0, 0, 0),
  375. BPF_EXIT_INSN(),
  376. },
  377. .errstr = "invalid BPF_ALU opcode f0",
  378. .result = REJECT,
  379. },
  380. {
  381. "junk insn4",
  382. .insns = {
  383. BPF_RAW_INSN(-1, -1, -1, -1, -1),
  384. BPF_EXIT_INSN(),
  385. },
  386. .errstr = "invalid BPF_ALU opcode f0",
  387. .result = REJECT,
  388. },
  389. {
  390. "junk insn5",
  391. .insns = {
  392. BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
  393. BPF_EXIT_INSN(),
  394. },
  395. .errstr = "BPF_ALU uses reserved fields",
  396. .result = REJECT,
  397. },
  398. {
  399. "misaligned read from stack",
  400. .insns = {
  401. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  402. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
  403. BPF_EXIT_INSN(),
  404. },
  405. .errstr = "misaligned access",
  406. .result = REJECT,
  407. },
  408. {
  409. "invalid map_fd for function call",
  410. .insns = {
  411. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  412. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
  413. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  414. BPF_LD_MAP_FD(BPF_REG_1, 0),
  415. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
  416. BPF_EXIT_INSN(),
  417. },
  418. .errstr = "fd 0 is not pointing to valid bpf_map",
  419. .result = REJECT,
  420. },
  421. {
  422. "don't check return value before access",
  423. .insns = {
  424. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  425. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  426. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  427. BPF_LD_MAP_FD(BPF_REG_1, 0),
  428. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  429. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  430. BPF_EXIT_INSN(),
  431. },
  432. .fixup = {3},
  433. .errstr = "R0 invalid mem access 'map_value_or_null'",
  434. .result = REJECT,
  435. },
  436. {
  437. "access memory with incorrect alignment",
  438. .insns = {
  439. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  440. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  441. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  442. BPF_LD_MAP_FD(BPF_REG_1, 0),
  443. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  444. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  445. BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
  446. BPF_EXIT_INSN(),
  447. },
  448. .fixup = {3},
  449. .errstr = "misaligned access",
  450. .result = REJECT,
  451. },
  452. {
  453. "sometimes access memory with incorrect alignment",
  454. .insns = {
  455. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  456. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  457. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  458. BPF_LD_MAP_FD(BPF_REG_1, 0),
  459. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  460. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  461. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  462. BPF_EXIT_INSN(),
  463. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
  464. BPF_EXIT_INSN(),
  465. },
  466. .fixup = {3},
  467. .errstr = "R0 invalid mem access",
  468. .result = REJECT,
  469. },
  470. {
  471. "jump test 1",
  472. .insns = {
  473. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  474. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
  475. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  476. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  477. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
  478. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
  479. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
  480. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
  481. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
  482. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
  483. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
  484. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
  485. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  486. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
  487. BPF_MOV64_IMM(BPF_REG_0, 0),
  488. BPF_EXIT_INSN(),
  489. },
  490. .result = ACCEPT,
  491. },
  492. {
  493. "jump test 2",
  494. .insns = {
  495. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  496. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
  497. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  498. BPF_JMP_IMM(BPF_JA, 0, 0, 14),
  499. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
  500. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  501. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  502. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
  503. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  504. BPF_JMP_IMM(BPF_JA, 0, 0, 8),
  505. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
  506. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  507. BPF_JMP_IMM(BPF_JA, 0, 0, 5),
  508. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
  509. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  510. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  511. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  512. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  513. BPF_MOV64_IMM(BPF_REG_0, 0),
  514. BPF_EXIT_INSN(),
  515. },
  516. .result = ACCEPT,
  517. },
  518. {
  519. "jump test 3",
  520. .insns = {
  521. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  522. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  523. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  524. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  525. BPF_JMP_IMM(BPF_JA, 0, 0, 19),
  526. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
  527. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  528. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  529. BPF_JMP_IMM(BPF_JA, 0, 0, 15),
  530. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
  531. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  532. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
  533. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  534. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
  535. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  536. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
  537. BPF_JMP_IMM(BPF_JA, 0, 0, 7),
  538. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
  539. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  540. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
  541. BPF_JMP_IMM(BPF_JA, 0, 0, 3),
  542. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
  543. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  544. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
  545. BPF_LD_MAP_FD(BPF_REG_1, 0),
  546. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
  547. BPF_EXIT_INSN(),
  548. },
  549. .fixup = {24},
  550. .result = ACCEPT,
  551. },
  552. {
  553. "jump test 4",
  554. .insns = {
  555. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  556. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  557. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  558. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  559. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  560. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  561. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  562. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  563. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  564. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  565. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  566. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  567. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  568. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  569. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  570. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  571. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  572. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  573. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  574. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  575. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  576. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  577. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  578. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  579. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  580. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  581. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  582. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  583. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  584. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  585. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  586. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  587. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  588. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  589. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  590. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  591. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  592. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  593. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  594. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  595. BPF_MOV64_IMM(BPF_REG_0, 0),
  596. BPF_EXIT_INSN(),
  597. },
  598. .result = ACCEPT,
  599. },
  600. {
  601. "jump test 5",
  602. .insns = {
  603. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  604. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  605. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  606. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  607. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  608. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  609. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  610. BPF_MOV64_IMM(BPF_REG_0, 0),
  611. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  612. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  613. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  614. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  615. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  616. BPF_MOV64_IMM(BPF_REG_0, 0),
  617. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  618. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  619. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  620. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  621. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  622. BPF_MOV64_IMM(BPF_REG_0, 0),
  623. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  624. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  625. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  626. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  627. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  628. BPF_MOV64_IMM(BPF_REG_0, 0),
  629. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  630. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  631. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  632. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  633. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  634. BPF_MOV64_IMM(BPF_REG_0, 0),
  635. BPF_EXIT_INSN(),
  636. },
  637. .result = ACCEPT,
  638. },
  639. {
  640. "access skb fields ok",
  641. .insns = {
  642. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  643. offsetof(struct __sk_buff, len)),
  644. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  645. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  646. offsetof(struct __sk_buff, mark)),
  647. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  648. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  649. offsetof(struct __sk_buff, pkt_type)),
  650. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  651. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  652. offsetof(struct __sk_buff, queue_mapping)),
  653. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  654. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  655. offsetof(struct __sk_buff, protocol)),
  656. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  657. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  658. offsetof(struct __sk_buff, vlan_present)),
  659. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  660. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  661. offsetof(struct __sk_buff, vlan_tci)),
  662. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  663. BPF_EXIT_INSN(),
  664. },
  665. .result = ACCEPT,
  666. },
  667. {
  668. "access skb fields bad1",
  669. .insns = {
  670. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
  671. BPF_EXIT_INSN(),
  672. },
  673. .errstr = "invalid bpf_context access",
  674. .result = REJECT,
  675. },
  676. {
  677. "access skb fields bad2",
  678. .insns = {
  679. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
  680. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  681. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  682. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  683. BPF_LD_MAP_FD(BPF_REG_1, 0),
  684. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  685. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  686. BPF_EXIT_INSN(),
  687. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  688. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  689. offsetof(struct __sk_buff, pkt_type)),
  690. BPF_EXIT_INSN(),
  691. },
  692. .fixup = {4},
  693. .errstr = "different pointers",
  694. .result = REJECT,
  695. },
  696. {
  697. "access skb fields bad3",
  698. .insns = {
  699. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  700. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  701. offsetof(struct __sk_buff, pkt_type)),
  702. BPF_EXIT_INSN(),
  703. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  704. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  705. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  706. BPF_LD_MAP_FD(BPF_REG_1, 0),
  707. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  708. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  709. BPF_EXIT_INSN(),
  710. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  711. BPF_JMP_IMM(BPF_JA, 0, 0, -12),
  712. },
  713. .fixup = {6},
  714. .errstr = "different pointers",
  715. .result = REJECT,
  716. },
  717. {
  718. "access skb fields bad4",
  719. .insns = {
  720. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
  721. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  722. offsetof(struct __sk_buff, len)),
  723. BPF_MOV64_IMM(BPF_REG_0, 0),
  724. BPF_EXIT_INSN(),
  725. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  726. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  727. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  728. BPF_LD_MAP_FD(BPF_REG_1, 0),
  729. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  730. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  731. BPF_EXIT_INSN(),
  732. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  733. BPF_JMP_IMM(BPF_JA, 0, 0, -13),
  734. },
  735. .fixup = {7},
  736. .errstr = "different pointers",
  737. .result = REJECT,
  738. },
  739. {
  740. "check skb->mark is not writeable by sockets",
  741. .insns = {
  742. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  743. offsetof(struct __sk_buff, mark)),
  744. BPF_EXIT_INSN(),
  745. },
  746. .errstr = "invalid bpf_context access",
  747. .result = REJECT,
  748. },
  749. {
  750. "check skb->tc_index is not writeable by sockets",
  751. .insns = {
  752. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  753. offsetof(struct __sk_buff, tc_index)),
  754. BPF_EXIT_INSN(),
  755. },
  756. .errstr = "invalid bpf_context access",
  757. .result = REJECT,
  758. },
  759. {
  760. "check non-u32 access to cb",
  761. .insns = {
  762. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
  763. offsetof(struct __sk_buff, cb[0])),
  764. BPF_EXIT_INSN(),
  765. },
  766. .errstr = "invalid bpf_context access",
  767. .result = REJECT,
  768. },
  769. {
  770. "check out of range skb->cb access",
  771. .insns = {
  772. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  773. offsetof(struct __sk_buff, cb[60])),
  774. BPF_EXIT_INSN(),
  775. },
  776. .errstr = "invalid bpf_context access",
  777. .result = REJECT,
  778. .prog_type = BPF_PROG_TYPE_SCHED_ACT,
  779. },
  780. {
  781. "write skb fields from socket prog",
  782. .insns = {
  783. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  784. offsetof(struct __sk_buff, cb[4])),
  785. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  786. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  787. offsetof(struct __sk_buff, mark)),
  788. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  789. offsetof(struct __sk_buff, tc_index)),
  790. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  791. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  792. offsetof(struct __sk_buff, cb[0])),
  793. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  794. offsetof(struct __sk_buff, cb[2])),
  795. BPF_EXIT_INSN(),
  796. },
  797. .result = ACCEPT,
  798. },
  799. {
  800. "write skb fields from tc_cls_act prog",
  801. .insns = {
  802. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  803. offsetof(struct __sk_buff, cb[0])),
  804. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  805. offsetof(struct __sk_buff, mark)),
  806. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  807. offsetof(struct __sk_buff, tc_index)),
  808. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  809. offsetof(struct __sk_buff, tc_index)),
  810. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  811. offsetof(struct __sk_buff, cb[3])),
  812. BPF_EXIT_INSN(),
  813. },
  814. .result = ACCEPT,
  815. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  816. },
  817. {
  818. "PTR_TO_STACK store/load",
  819. .insns = {
  820. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  821. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  822. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  823. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  824. BPF_EXIT_INSN(),
  825. },
  826. .result = ACCEPT,
  827. },
  828. {
  829. "PTR_TO_STACK store/load - bad alignment on off",
  830. .insns = {
  831. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  832. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  833. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  834. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  835. BPF_EXIT_INSN(),
  836. },
  837. .result = REJECT,
  838. .errstr = "misaligned access off -6 size 8",
  839. },
  840. {
  841. "PTR_TO_STACK store/load - bad alignment on reg",
  842. .insns = {
  843. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  844. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  845. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  846. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  847. BPF_EXIT_INSN(),
  848. },
  849. .result = REJECT,
  850. .errstr = "misaligned access off -2 size 8",
  851. },
  852. {
  853. "PTR_TO_STACK store/load - out of bounds low",
  854. .insns = {
  855. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  856. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
  857. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  858. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  859. BPF_EXIT_INSN(),
  860. },
  861. .result = REJECT,
  862. .errstr = "invalid stack off=-79992 size=8",
  863. },
  864. {
  865. "PTR_TO_STACK store/load - out of bounds high",
  866. .insns = {
  867. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  868. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  869. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  870. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  871. BPF_EXIT_INSN(),
  872. },
  873. .result = REJECT,
  874. .errstr = "invalid stack off=0 size=8",
  875. },
  876. };
  877. static int probe_filter_length(struct bpf_insn *fp)
  878. {
  879. int len = 0;
  880. for (len = MAX_INSNS - 1; len > 0; --len)
  881. if (fp[len].code != 0 || fp[len].imm != 0)
  882. break;
  883. return len + 1;
  884. }
  885. static int create_map(void)
  886. {
  887. long long key, value = 0;
  888. int map_fd;
  889. map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 1024);
  890. if (map_fd < 0) {
  891. printf("failed to create map '%s'\n", strerror(errno));
  892. }
  893. return map_fd;
  894. }
  895. static int test(void)
  896. {
  897. int prog_fd, i, pass_cnt = 0, err_cnt = 0;
  898. for (i = 0; i < ARRAY_SIZE(tests); i++) {
  899. struct bpf_insn *prog = tests[i].insns;
  900. int prog_type = tests[i].prog_type;
  901. int prog_len = probe_filter_length(prog);
  902. int *fixup = tests[i].fixup;
  903. int map_fd = -1;
  904. if (*fixup) {
  905. map_fd = create_map();
  906. do {
  907. prog[*fixup].imm = map_fd;
  908. fixup++;
  909. } while (*fixup);
  910. }
  911. printf("#%d %s ", i, tests[i].descr);
  912. prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
  913. prog, prog_len * sizeof(struct bpf_insn),
  914. "GPL", 0);
  915. if (tests[i].result == ACCEPT) {
  916. if (prog_fd < 0) {
  917. printf("FAIL\nfailed to load prog '%s'\n",
  918. strerror(errno));
  919. printf("%s", bpf_log_buf);
  920. err_cnt++;
  921. goto fail;
  922. }
  923. } else {
  924. if (prog_fd >= 0) {
  925. printf("FAIL\nunexpected success to load\n");
  926. printf("%s", bpf_log_buf);
  927. err_cnt++;
  928. goto fail;
  929. }
  930. if (strstr(bpf_log_buf, tests[i].errstr) == 0) {
  931. printf("FAIL\nunexpected error message: %s",
  932. bpf_log_buf);
  933. err_cnt++;
  934. goto fail;
  935. }
  936. }
  937. pass_cnt++;
  938. printf("OK\n");
  939. fail:
  940. if (map_fd >= 0)
  941. close(map_fd);
  942. close(prog_fd);
  943. }
  944. printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
  945. return 0;
  946. }
  947. int main(void)
  948. {
  949. return test();
  950. }