test_verifier.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659
  1. /*
  2. * Testsuite for eBPF verifier
  3. *
  4. * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. */
  10. #include <stdio.h>
  11. #include <unistd.h>
  12. #include <linux/bpf.h>
  13. #include <errno.h>
  14. #include <linux/unistd.h>
  15. #include <string.h>
  16. #include <linux/filter.h>
  17. #include <stddef.h>
  18. #include <stdbool.h>
  19. #include <sys/resource.h>
  20. #include "libbpf.h"
  21. #define MAX_INSNS 512
  22. #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
  23. #define MAX_FIXUPS 8
  24. struct bpf_test {
  25. const char *descr;
  26. struct bpf_insn insns[MAX_INSNS];
  27. int fixup[MAX_FIXUPS];
  28. int prog_array_fixup[MAX_FIXUPS];
  29. const char *errstr;
  30. const char *errstr_unpriv;
  31. enum {
  32. UNDEF,
  33. ACCEPT,
  34. REJECT
  35. } result, result_unpriv;
  36. enum bpf_prog_type prog_type;
  37. };
  38. static struct bpf_test tests[] = {
  39. {
  40. "add+sub+mul",
  41. .insns = {
  42. BPF_MOV64_IMM(BPF_REG_1, 1),
  43. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
  44. BPF_MOV64_IMM(BPF_REG_2, 3),
  45. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
  46. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
  47. BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
  48. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  49. BPF_EXIT_INSN(),
  50. },
  51. .result = ACCEPT,
  52. },
  53. {
  54. "unreachable",
  55. .insns = {
  56. BPF_EXIT_INSN(),
  57. BPF_EXIT_INSN(),
  58. },
  59. .errstr = "unreachable",
  60. .result = REJECT,
  61. },
  62. {
  63. "unreachable2",
  64. .insns = {
  65. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  66. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  67. BPF_EXIT_INSN(),
  68. },
  69. .errstr = "unreachable",
  70. .result = REJECT,
  71. },
  72. {
  73. "out of range jump",
  74. .insns = {
  75. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  76. BPF_EXIT_INSN(),
  77. },
  78. .errstr = "jump out of range",
  79. .result = REJECT,
  80. },
  81. {
  82. "out of range jump2",
  83. .insns = {
  84. BPF_JMP_IMM(BPF_JA, 0, 0, -2),
  85. BPF_EXIT_INSN(),
  86. },
  87. .errstr = "jump out of range",
  88. .result = REJECT,
  89. },
  90. {
  91. "test1 ld_imm64",
  92. .insns = {
  93. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  94. BPF_LD_IMM64(BPF_REG_0, 0),
  95. BPF_LD_IMM64(BPF_REG_0, 0),
  96. BPF_LD_IMM64(BPF_REG_0, 1),
  97. BPF_LD_IMM64(BPF_REG_0, 1),
  98. BPF_MOV64_IMM(BPF_REG_0, 2),
  99. BPF_EXIT_INSN(),
  100. },
  101. .errstr = "invalid BPF_LD_IMM insn",
  102. .errstr_unpriv = "R1 pointer comparison",
  103. .result = REJECT,
  104. },
  105. {
  106. "test2 ld_imm64",
  107. .insns = {
  108. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  109. BPF_LD_IMM64(BPF_REG_0, 0),
  110. BPF_LD_IMM64(BPF_REG_0, 0),
  111. BPF_LD_IMM64(BPF_REG_0, 1),
  112. BPF_LD_IMM64(BPF_REG_0, 1),
  113. BPF_EXIT_INSN(),
  114. },
  115. .errstr = "invalid BPF_LD_IMM insn",
  116. .errstr_unpriv = "R1 pointer comparison",
  117. .result = REJECT,
  118. },
  119. {
  120. "test3 ld_imm64",
  121. .insns = {
  122. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  123. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  124. BPF_LD_IMM64(BPF_REG_0, 0),
  125. BPF_LD_IMM64(BPF_REG_0, 0),
  126. BPF_LD_IMM64(BPF_REG_0, 1),
  127. BPF_LD_IMM64(BPF_REG_0, 1),
  128. BPF_EXIT_INSN(),
  129. },
  130. .errstr = "invalid bpf_ld_imm64 insn",
  131. .result = REJECT,
  132. },
  133. {
  134. "test4 ld_imm64",
  135. .insns = {
  136. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  137. BPF_EXIT_INSN(),
  138. },
  139. .errstr = "invalid bpf_ld_imm64 insn",
  140. .result = REJECT,
  141. },
  142. {
  143. "test5 ld_imm64",
  144. .insns = {
  145. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  146. },
  147. .errstr = "invalid bpf_ld_imm64 insn",
  148. .result = REJECT,
  149. },
  150. {
  151. "no bpf_exit",
  152. .insns = {
  153. BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
  154. },
  155. .errstr = "jump out of range",
  156. .result = REJECT,
  157. },
  158. {
  159. "loop (back-edge)",
  160. .insns = {
  161. BPF_JMP_IMM(BPF_JA, 0, 0, -1),
  162. BPF_EXIT_INSN(),
  163. },
  164. .errstr = "back-edge",
  165. .result = REJECT,
  166. },
  167. {
  168. "loop2 (back-edge)",
  169. .insns = {
  170. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  171. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  172. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  173. BPF_JMP_IMM(BPF_JA, 0, 0, -4),
  174. BPF_EXIT_INSN(),
  175. },
  176. .errstr = "back-edge",
  177. .result = REJECT,
  178. },
  179. {
  180. "conditional loop",
  181. .insns = {
  182. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  183. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  184. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  185. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
  186. BPF_EXIT_INSN(),
  187. },
  188. .errstr = "back-edge",
  189. .result = REJECT,
  190. },
  191. {
  192. "read uninitialized register",
  193. .insns = {
  194. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  195. BPF_EXIT_INSN(),
  196. },
  197. .errstr = "R2 !read_ok",
  198. .result = REJECT,
  199. },
  200. {
  201. "read invalid register",
  202. .insns = {
  203. BPF_MOV64_REG(BPF_REG_0, -1),
  204. BPF_EXIT_INSN(),
  205. },
  206. .errstr = "R15 is invalid",
  207. .result = REJECT,
  208. },
  209. {
  210. "program doesn't init R0 before exit",
  211. .insns = {
  212. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
  213. BPF_EXIT_INSN(),
  214. },
  215. .errstr = "R0 !read_ok",
  216. .result = REJECT,
  217. },
  218. {
  219. "program doesn't init R0 before exit in all branches",
  220. .insns = {
  221. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  222. BPF_MOV64_IMM(BPF_REG_0, 1),
  223. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
  224. BPF_EXIT_INSN(),
  225. },
  226. .errstr = "R0 !read_ok",
  227. .errstr_unpriv = "R1 pointer comparison",
  228. .result = REJECT,
  229. },
  230. {
  231. "stack out of bounds",
  232. .insns = {
  233. BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
  234. BPF_EXIT_INSN(),
  235. },
  236. .errstr = "invalid stack",
  237. .result = REJECT,
  238. },
  239. {
  240. "invalid call insn1",
  241. .insns = {
  242. BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
  243. BPF_EXIT_INSN(),
  244. },
  245. .errstr = "BPF_CALL uses reserved",
  246. .result = REJECT,
  247. },
  248. {
  249. "invalid call insn2",
  250. .insns = {
  251. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
  252. BPF_EXIT_INSN(),
  253. },
  254. .errstr = "BPF_CALL uses reserved",
  255. .result = REJECT,
  256. },
  257. {
  258. "invalid function call",
  259. .insns = {
  260. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
  261. BPF_EXIT_INSN(),
  262. },
  263. .errstr = "invalid func 1234567",
  264. .result = REJECT,
  265. },
  266. {
  267. "uninitialized stack1",
  268. .insns = {
  269. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  270. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  271. BPF_LD_MAP_FD(BPF_REG_1, 0),
  272. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  273. BPF_EXIT_INSN(),
  274. },
  275. .fixup = {2},
  276. .errstr = "invalid indirect read from stack",
  277. .result = REJECT,
  278. },
  279. {
  280. "uninitialized stack2",
  281. .insns = {
  282. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  283. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
  284. BPF_EXIT_INSN(),
  285. },
  286. .errstr = "invalid read from stack",
  287. .result = REJECT,
  288. },
  289. {
  290. "check valid spill/fill",
  291. .insns = {
  292. /* spill R1(ctx) into stack */
  293. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  294. /* fill it back into R2 */
  295. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
  296. /* should be able to access R0 = *(R2 + 8) */
  297. /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
  298. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  299. BPF_EXIT_INSN(),
  300. },
  301. .errstr_unpriv = "R0 leaks addr",
  302. .result = ACCEPT,
  303. .result_unpriv = REJECT,
  304. },
  305. {
  306. "check valid spill/fill, skb mark",
  307. .insns = {
  308. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
  309. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
  310. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  311. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  312. offsetof(struct __sk_buff, mark)),
  313. BPF_EXIT_INSN(),
  314. },
  315. .result = ACCEPT,
  316. .result_unpriv = ACCEPT,
  317. },
  318. {
  319. "check corrupted spill/fill",
  320. .insns = {
  321. /* spill R1(ctx) into stack */
  322. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  323. /* mess up with R1 pointer on stack */
  324. BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
  325. /* fill back into R0 should fail */
  326. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  327. BPF_EXIT_INSN(),
  328. },
  329. .errstr_unpriv = "attempt to corrupt spilled",
  330. .errstr = "corrupted spill",
  331. .result = REJECT,
  332. },
  333. {
  334. "invalid src register in STX",
  335. .insns = {
  336. BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
  337. BPF_EXIT_INSN(),
  338. },
  339. .errstr = "R15 is invalid",
  340. .result = REJECT,
  341. },
  342. {
  343. "invalid dst register in STX",
  344. .insns = {
  345. BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
  346. BPF_EXIT_INSN(),
  347. },
  348. .errstr = "R14 is invalid",
  349. .result = REJECT,
  350. },
  351. {
  352. "invalid dst register in ST",
  353. .insns = {
  354. BPF_ST_MEM(BPF_B, 14, -1, -1),
  355. BPF_EXIT_INSN(),
  356. },
  357. .errstr = "R14 is invalid",
  358. .result = REJECT,
  359. },
  360. {
  361. "invalid src register in LDX",
  362. .insns = {
  363. BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
  364. BPF_EXIT_INSN(),
  365. },
  366. .errstr = "R12 is invalid",
  367. .result = REJECT,
  368. },
  369. {
  370. "invalid dst register in LDX",
  371. .insns = {
  372. BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
  373. BPF_EXIT_INSN(),
  374. },
  375. .errstr = "R11 is invalid",
  376. .result = REJECT,
  377. },
  378. {
  379. "junk insn",
  380. .insns = {
  381. BPF_RAW_INSN(0, 0, 0, 0, 0),
  382. BPF_EXIT_INSN(),
  383. },
  384. .errstr = "invalid BPF_LD_IMM",
  385. .result = REJECT,
  386. },
  387. {
  388. "junk insn2",
  389. .insns = {
  390. BPF_RAW_INSN(1, 0, 0, 0, 0),
  391. BPF_EXIT_INSN(),
  392. },
  393. .errstr = "BPF_LDX uses reserved fields",
  394. .result = REJECT,
  395. },
  396. {
  397. "junk insn3",
  398. .insns = {
  399. BPF_RAW_INSN(-1, 0, 0, 0, 0),
  400. BPF_EXIT_INSN(),
  401. },
  402. .errstr = "invalid BPF_ALU opcode f0",
  403. .result = REJECT,
  404. },
  405. {
  406. "junk insn4",
  407. .insns = {
  408. BPF_RAW_INSN(-1, -1, -1, -1, -1),
  409. BPF_EXIT_INSN(),
  410. },
  411. .errstr = "invalid BPF_ALU opcode f0",
  412. .result = REJECT,
  413. },
  414. {
  415. "junk insn5",
  416. .insns = {
  417. BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
  418. BPF_EXIT_INSN(),
  419. },
  420. .errstr = "BPF_ALU uses reserved fields",
  421. .result = REJECT,
  422. },
  423. {
  424. "misaligned read from stack",
  425. .insns = {
  426. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  427. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
  428. BPF_EXIT_INSN(),
  429. },
  430. .errstr = "misaligned access",
  431. .result = REJECT,
  432. },
  433. {
  434. "invalid map_fd for function call",
  435. .insns = {
  436. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  437. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
  438. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  439. BPF_LD_MAP_FD(BPF_REG_1, 0),
  440. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
  441. BPF_EXIT_INSN(),
  442. },
  443. .errstr = "fd 0 is not pointing to valid bpf_map",
  444. .result = REJECT,
  445. },
  446. {
  447. "don't check return value before access",
  448. .insns = {
  449. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  450. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  451. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  452. BPF_LD_MAP_FD(BPF_REG_1, 0),
  453. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  454. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  455. BPF_EXIT_INSN(),
  456. },
  457. .fixup = {3},
  458. .errstr = "R0 invalid mem access 'map_value_or_null'",
  459. .result = REJECT,
  460. },
  461. {
  462. "access memory with incorrect alignment",
  463. .insns = {
  464. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  465. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  466. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  467. BPF_LD_MAP_FD(BPF_REG_1, 0),
  468. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  469. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  470. BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
  471. BPF_EXIT_INSN(),
  472. },
  473. .fixup = {3},
  474. .errstr = "misaligned access",
  475. .result = REJECT,
  476. },
  477. {
  478. "sometimes access memory with incorrect alignment",
  479. .insns = {
  480. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  481. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  482. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  483. BPF_LD_MAP_FD(BPF_REG_1, 0),
  484. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  485. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  486. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  487. BPF_EXIT_INSN(),
  488. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
  489. BPF_EXIT_INSN(),
  490. },
  491. .fixup = {3},
  492. .errstr = "R0 invalid mem access",
  493. .errstr_unpriv = "R0 leaks addr",
  494. .result = REJECT,
  495. },
  496. {
  497. "jump test 1",
  498. .insns = {
  499. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  500. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
  501. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  502. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  503. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
  504. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
  505. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
  506. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
  507. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
  508. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
  509. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
  510. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
  511. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  512. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
  513. BPF_MOV64_IMM(BPF_REG_0, 0),
  514. BPF_EXIT_INSN(),
  515. },
  516. .errstr_unpriv = "R1 pointer comparison",
  517. .result_unpriv = REJECT,
  518. .result = ACCEPT,
  519. },
  520. {
  521. "jump test 2",
  522. .insns = {
  523. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  524. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
  525. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  526. BPF_JMP_IMM(BPF_JA, 0, 0, 14),
  527. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
  528. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  529. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  530. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
  531. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  532. BPF_JMP_IMM(BPF_JA, 0, 0, 8),
  533. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
  534. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  535. BPF_JMP_IMM(BPF_JA, 0, 0, 5),
  536. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
  537. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  538. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  539. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  540. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  541. BPF_MOV64_IMM(BPF_REG_0, 0),
  542. BPF_EXIT_INSN(),
  543. },
  544. .errstr_unpriv = "R1 pointer comparison",
  545. .result_unpriv = REJECT,
  546. .result = ACCEPT,
  547. },
  548. {
  549. "jump test 3",
  550. .insns = {
  551. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  552. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  553. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  554. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  555. BPF_JMP_IMM(BPF_JA, 0, 0, 19),
  556. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
  557. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  558. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  559. BPF_JMP_IMM(BPF_JA, 0, 0, 15),
  560. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
  561. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  562. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
  563. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  564. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
  565. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  566. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
  567. BPF_JMP_IMM(BPF_JA, 0, 0, 7),
  568. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
  569. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  570. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
  571. BPF_JMP_IMM(BPF_JA, 0, 0, 3),
  572. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
  573. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  574. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
  575. BPF_LD_MAP_FD(BPF_REG_1, 0),
  576. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
  577. BPF_EXIT_INSN(),
  578. },
  579. .fixup = {24},
  580. .errstr_unpriv = "R1 pointer comparison",
  581. .result_unpriv = REJECT,
  582. .result = ACCEPT,
  583. },
  584. {
  585. "jump test 4",
  586. .insns = {
  587. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  588. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  589. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  590. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  591. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  592. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  593. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  594. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  595. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  596. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  597. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  598. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  599. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  600. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  601. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  602. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  603. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  604. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  605. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  606. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  607. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  608. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  609. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  610. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  611. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  612. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  613. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  614. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  615. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  616. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  617. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  618. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  619. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  620. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  621. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  622. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  623. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  624. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  625. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  626. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  627. BPF_MOV64_IMM(BPF_REG_0, 0),
  628. BPF_EXIT_INSN(),
  629. },
  630. .errstr_unpriv = "R1 pointer comparison",
  631. .result_unpriv = REJECT,
  632. .result = ACCEPT,
  633. },
  634. {
  635. "jump test 5",
  636. .insns = {
  637. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  638. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  639. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  640. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  641. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  642. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  643. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  644. BPF_MOV64_IMM(BPF_REG_0, 0),
  645. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  646. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  647. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  648. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  649. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  650. BPF_MOV64_IMM(BPF_REG_0, 0),
  651. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  652. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  653. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  654. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  655. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  656. BPF_MOV64_IMM(BPF_REG_0, 0),
  657. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  658. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  659. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  660. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  661. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  662. BPF_MOV64_IMM(BPF_REG_0, 0),
  663. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  664. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  665. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  666. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  667. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  668. BPF_MOV64_IMM(BPF_REG_0, 0),
  669. BPF_EXIT_INSN(),
  670. },
  671. .errstr_unpriv = "R1 pointer comparison",
  672. .result_unpriv = REJECT,
  673. .result = ACCEPT,
  674. },
  675. {
  676. "access skb fields ok",
  677. .insns = {
  678. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  679. offsetof(struct __sk_buff, len)),
  680. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  681. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  682. offsetof(struct __sk_buff, mark)),
  683. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  684. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  685. offsetof(struct __sk_buff, pkt_type)),
  686. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  687. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  688. offsetof(struct __sk_buff, queue_mapping)),
  689. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  690. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  691. offsetof(struct __sk_buff, protocol)),
  692. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  693. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  694. offsetof(struct __sk_buff, vlan_present)),
  695. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  696. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  697. offsetof(struct __sk_buff, vlan_tci)),
  698. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  699. BPF_EXIT_INSN(),
  700. },
  701. .result = ACCEPT,
  702. },
  703. {
  704. "access skb fields bad1",
  705. .insns = {
  706. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
  707. BPF_EXIT_INSN(),
  708. },
  709. .errstr = "invalid bpf_context access",
  710. .result = REJECT,
  711. },
  712. {
  713. "access skb fields bad2",
  714. .insns = {
  715. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
  716. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  717. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  718. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  719. BPF_LD_MAP_FD(BPF_REG_1, 0),
  720. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  721. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  722. BPF_EXIT_INSN(),
  723. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  724. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  725. offsetof(struct __sk_buff, pkt_type)),
  726. BPF_EXIT_INSN(),
  727. },
  728. .fixup = {4},
  729. .errstr = "different pointers",
  730. .errstr_unpriv = "R1 pointer comparison",
  731. .result = REJECT,
  732. },
  733. {
  734. "access skb fields bad3",
  735. .insns = {
  736. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  737. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  738. offsetof(struct __sk_buff, pkt_type)),
  739. BPF_EXIT_INSN(),
  740. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  741. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  742. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  743. BPF_LD_MAP_FD(BPF_REG_1, 0),
  744. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  745. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  746. BPF_EXIT_INSN(),
  747. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  748. BPF_JMP_IMM(BPF_JA, 0, 0, -12),
  749. },
  750. .fixup = {6},
  751. .errstr = "different pointers",
  752. .errstr_unpriv = "R1 pointer comparison",
  753. .result = REJECT,
  754. },
  755. {
  756. "access skb fields bad4",
  757. .insns = {
  758. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
  759. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  760. offsetof(struct __sk_buff, len)),
  761. BPF_MOV64_IMM(BPF_REG_0, 0),
  762. BPF_EXIT_INSN(),
  763. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  764. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  765. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  766. BPF_LD_MAP_FD(BPF_REG_1, 0),
  767. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  768. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  769. BPF_EXIT_INSN(),
  770. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  771. BPF_JMP_IMM(BPF_JA, 0, 0, -13),
  772. },
  773. .fixup = {7},
  774. .errstr = "different pointers",
  775. .errstr_unpriv = "R1 pointer comparison",
  776. .result = REJECT,
  777. },
  778. {
  779. "check skb->mark is not writeable by sockets",
  780. .insns = {
  781. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  782. offsetof(struct __sk_buff, mark)),
  783. BPF_EXIT_INSN(),
  784. },
  785. .errstr = "invalid bpf_context access",
  786. .errstr_unpriv = "R1 leaks addr",
  787. .result = REJECT,
  788. },
  789. {
  790. "check skb->tc_index is not writeable by sockets",
  791. .insns = {
  792. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  793. offsetof(struct __sk_buff, tc_index)),
  794. BPF_EXIT_INSN(),
  795. },
  796. .errstr = "invalid bpf_context access",
  797. .errstr_unpriv = "R1 leaks addr",
  798. .result = REJECT,
  799. },
  800. {
  801. "check non-u32 access to cb",
  802. .insns = {
  803. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
  804. offsetof(struct __sk_buff, cb[0])),
  805. BPF_EXIT_INSN(),
  806. },
  807. .errstr = "invalid bpf_context access",
  808. .errstr_unpriv = "R1 leaks addr",
  809. .result = REJECT,
  810. },
  811. {
  812. "check out of range skb->cb access",
  813. .insns = {
  814. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  815. offsetof(struct __sk_buff, cb[0]) + 256),
  816. BPF_EXIT_INSN(),
  817. },
  818. .errstr = "invalid bpf_context access",
  819. .errstr_unpriv = "",
  820. .result = REJECT,
  821. .prog_type = BPF_PROG_TYPE_SCHED_ACT,
  822. },
  823. {
  824. "write skb fields from socket prog",
  825. .insns = {
  826. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  827. offsetof(struct __sk_buff, cb[4])),
  828. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  829. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  830. offsetof(struct __sk_buff, mark)),
  831. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  832. offsetof(struct __sk_buff, tc_index)),
  833. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  834. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  835. offsetof(struct __sk_buff, cb[0])),
  836. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  837. offsetof(struct __sk_buff, cb[2])),
  838. BPF_EXIT_INSN(),
  839. },
  840. .result = ACCEPT,
  841. .errstr_unpriv = "R1 leaks addr",
  842. .result_unpriv = REJECT,
  843. },
  844. {
  845. "write skb fields from tc_cls_act prog",
  846. .insns = {
  847. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  848. offsetof(struct __sk_buff, cb[0])),
  849. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  850. offsetof(struct __sk_buff, mark)),
  851. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  852. offsetof(struct __sk_buff, tc_index)),
  853. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  854. offsetof(struct __sk_buff, tc_index)),
  855. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  856. offsetof(struct __sk_buff, cb[3])),
  857. BPF_EXIT_INSN(),
  858. },
  859. .errstr_unpriv = "",
  860. .result_unpriv = REJECT,
  861. .result = ACCEPT,
  862. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  863. },
  864. {
  865. "PTR_TO_STACK store/load",
  866. .insns = {
  867. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  868. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  869. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  870. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  871. BPF_EXIT_INSN(),
  872. },
  873. .result = ACCEPT,
  874. },
  875. {
  876. "PTR_TO_STACK store/load - bad alignment on off",
  877. .insns = {
  878. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  879. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  880. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  881. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  882. BPF_EXIT_INSN(),
  883. },
  884. .result = REJECT,
  885. .errstr = "misaligned access off -6 size 8",
  886. },
  887. {
  888. "PTR_TO_STACK store/load - bad alignment on reg",
  889. .insns = {
  890. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  891. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  892. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  893. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  894. BPF_EXIT_INSN(),
  895. },
  896. .result = REJECT,
  897. .errstr = "misaligned access off -2 size 8",
  898. },
  899. {
  900. "PTR_TO_STACK store/load - out of bounds low",
  901. .insns = {
  902. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  903. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
  904. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  905. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  906. BPF_EXIT_INSN(),
  907. },
  908. .result = REJECT,
  909. .errstr = "invalid stack off=-79992 size=8",
  910. },
  911. {
  912. "PTR_TO_STACK store/load - out of bounds high",
  913. .insns = {
  914. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  915. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  916. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  917. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  918. BPF_EXIT_INSN(),
  919. },
  920. .result = REJECT,
  921. .errstr = "invalid stack off=0 size=8",
  922. },
  923. {
  924. "unpriv: return pointer",
  925. .insns = {
  926. BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
  927. BPF_EXIT_INSN(),
  928. },
  929. .result = ACCEPT,
  930. .result_unpriv = REJECT,
  931. .errstr_unpriv = "R0 leaks addr",
  932. },
  933. {
  934. "unpriv: add const to pointer",
  935. .insns = {
  936. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  937. BPF_MOV64_IMM(BPF_REG_0, 0),
  938. BPF_EXIT_INSN(),
  939. },
  940. .result = ACCEPT,
  941. .result_unpriv = REJECT,
  942. .errstr_unpriv = "R1 pointer arithmetic",
  943. },
  944. {
  945. "unpriv: add pointer to pointer",
  946. .insns = {
  947. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
  948. BPF_MOV64_IMM(BPF_REG_0, 0),
  949. BPF_EXIT_INSN(),
  950. },
  951. .result = ACCEPT,
  952. .result_unpriv = REJECT,
  953. .errstr_unpriv = "R1 pointer arithmetic",
  954. },
  955. {
  956. "unpriv: neg pointer",
  957. .insns = {
  958. BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
  959. BPF_MOV64_IMM(BPF_REG_0, 0),
  960. BPF_EXIT_INSN(),
  961. },
  962. .result = ACCEPT,
  963. .result_unpriv = REJECT,
  964. .errstr_unpriv = "R1 pointer arithmetic",
  965. },
  966. {
  967. "unpriv: cmp pointer with const",
  968. .insns = {
  969. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  970. BPF_MOV64_IMM(BPF_REG_0, 0),
  971. BPF_EXIT_INSN(),
  972. },
  973. .result = ACCEPT,
  974. .result_unpriv = REJECT,
  975. .errstr_unpriv = "R1 pointer comparison",
  976. },
  977. {
  978. "unpriv: cmp pointer with pointer",
  979. .insns = {
  980. BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  981. BPF_MOV64_IMM(BPF_REG_0, 0),
  982. BPF_EXIT_INSN(),
  983. },
  984. .result = ACCEPT,
  985. .result_unpriv = REJECT,
  986. .errstr_unpriv = "R10 pointer comparison",
  987. },
  988. {
  989. "unpriv: check that printk is disallowed",
  990. .insns = {
  991. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  992. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  993. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  994. BPF_MOV64_IMM(BPF_REG_2, 8),
  995. BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
  996. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
  997. BPF_MOV64_IMM(BPF_REG_0, 0),
  998. BPF_EXIT_INSN(),
  999. },
  1000. .errstr_unpriv = "unknown func 6",
  1001. .result_unpriv = REJECT,
  1002. .result = ACCEPT,
  1003. },
  1004. {
  1005. "unpriv: pass pointer to helper function",
  1006. .insns = {
  1007. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1008. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1009. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1010. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1011. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  1012. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1013. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
  1014. BPF_MOV64_IMM(BPF_REG_0, 0),
  1015. BPF_EXIT_INSN(),
  1016. },
  1017. .fixup = {3},
  1018. .errstr_unpriv = "R4 leaks addr",
  1019. .result_unpriv = REJECT,
  1020. .result = ACCEPT,
  1021. },
  1022. {
  1023. "unpriv: indirectly pass pointer on stack to helper function",
  1024. .insns = {
  1025. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1026. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1027. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1028. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1029. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1030. BPF_MOV64_IMM(BPF_REG_0, 0),
  1031. BPF_EXIT_INSN(),
  1032. },
  1033. .fixup = {3},
  1034. .errstr = "invalid indirect read from stack off -8+0 size 8",
  1035. .result = REJECT,
  1036. },
  1037. {
  1038. "unpriv: mangle pointer on stack 1",
  1039. .insns = {
  1040. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1041. BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
  1042. BPF_MOV64_IMM(BPF_REG_0, 0),
  1043. BPF_EXIT_INSN(),
  1044. },
  1045. .errstr_unpriv = "attempt to corrupt spilled",
  1046. .result_unpriv = REJECT,
  1047. .result = ACCEPT,
  1048. },
  1049. {
  1050. "unpriv: mangle pointer on stack 2",
  1051. .insns = {
  1052. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1053. BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
  1054. BPF_MOV64_IMM(BPF_REG_0, 0),
  1055. BPF_EXIT_INSN(),
  1056. },
  1057. .errstr_unpriv = "attempt to corrupt spilled",
  1058. .result_unpriv = REJECT,
  1059. .result = ACCEPT,
  1060. },
  1061. {
  1062. "unpriv: read pointer from stack in small chunks",
  1063. .insns = {
  1064. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1065. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
  1066. BPF_MOV64_IMM(BPF_REG_0, 0),
  1067. BPF_EXIT_INSN(),
  1068. },
  1069. .errstr = "invalid size",
  1070. .result = REJECT,
  1071. },
  1072. {
  1073. "unpriv: write pointer into ctx",
  1074. .insns = {
  1075. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
  1076. BPF_MOV64_IMM(BPF_REG_0, 0),
  1077. BPF_EXIT_INSN(),
  1078. },
  1079. .errstr_unpriv = "R1 leaks addr",
  1080. .result_unpriv = REJECT,
  1081. .errstr = "invalid bpf_context access",
  1082. .result = REJECT,
  1083. },
  1084. {
  1085. "unpriv: write pointer into map elem value",
  1086. .insns = {
  1087. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1088. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1089. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1090. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1091. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  1092. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  1093. BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
  1094. BPF_EXIT_INSN(),
  1095. },
  1096. .fixup = {3},
  1097. .errstr_unpriv = "R0 leaks addr",
  1098. .result_unpriv = REJECT,
  1099. .result = ACCEPT,
  1100. },
  1101. {
  1102. "unpriv: partial copy of pointer",
  1103. .insns = {
  1104. BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
  1105. BPF_MOV64_IMM(BPF_REG_0, 0),
  1106. BPF_EXIT_INSN(),
  1107. },
  1108. .errstr_unpriv = "R10 partial copy",
  1109. .result_unpriv = REJECT,
  1110. .result = ACCEPT,
  1111. },
  1112. {
  1113. "unpriv: pass pointer to tail_call",
  1114. .insns = {
  1115. BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
  1116. BPF_LD_MAP_FD(BPF_REG_2, 0),
  1117. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
  1118. BPF_MOV64_IMM(BPF_REG_0, 0),
  1119. BPF_EXIT_INSN(),
  1120. },
  1121. .prog_array_fixup = {1},
  1122. .errstr_unpriv = "R3 leaks addr into helper",
  1123. .result_unpriv = REJECT,
  1124. .result = ACCEPT,
  1125. },
  1126. {
  1127. "unpriv: cmp map pointer with zero",
  1128. .insns = {
  1129. BPF_MOV64_IMM(BPF_REG_1, 0),
  1130. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1131. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  1132. BPF_MOV64_IMM(BPF_REG_0, 0),
  1133. BPF_EXIT_INSN(),
  1134. },
  1135. .fixup = {1},
  1136. .errstr_unpriv = "R1 pointer comparison",
  1137. .result_unpriv = REJECT,
  1138. .result = ACCEPT,
  1139. },
  1140. {
  1141. "unpriv: write into frame pointer",
  1142. .insns = {
  1143. BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
  1144. BPF_MOV64_IMM(BPF_REG_0, 0),
  1145. BPF_EXIT_INSN(),
  1146. },
  1147. .errstr = "frame pointer is read only",
  1148. .result = REJECT,
  1149. },
  1150. {
  1151. "unpriv: cmp of frame pointer",
  1152. .insns = {
  1153. BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
  1154. BPF_MOV64_IMM(BPF_REG_0, 0),
  1155. BPF_EXIT_INSN(),
  1156. },
  1157. .errstr_unpriv = "R10 pointer comparison",
  1158. .result_unpriv = REJECT,
  1159. .result = ACCEPT,
  1160. },
  1161. {
  1162. "unpriv: cmp of stack pointer",
  1163. .insns = {
  1164. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1165. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1166. BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
  1167. BPF_MOV64_IMM(BPF_REG_0, 0),
  1168. BPF_EXIT_INSN(),
  1169. },
  1170. .errstr_unpriv = "R2 pointer comparison",
  1171. .result_unpriv = REJECT,
  1172. .result = ACCEPT,
  1173. },
  1174. {
  1175. "unpriv: obfuscate stack pointer",
  1176. .insns = {
  1177. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1178. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1179. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1180. BPF_MOV64_IMM(BPF_REG_0, 0),
  1181. BPF_EXIT_INSN(),
  1182. },
  1183. .errstr_unpriv = "R2 pointer arithmetic",
  1184. .result_unpriv = REJECT,
  1185. .result = ACCEPT,
  1186. },
  1187. {
  1188. "raw_stack: no skb_load_bytes",
  1189. .insns = {
  1190. BPF_MOV64_IMM(BPF_REG_2, 4),
  1191. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1192. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1193. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1194. BPF_MOV64_IMM(BPF_REG_4, 8),
  1195. /* Call to skb_load_bytes() omitted. */
  1196. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1197. BPF_EXIT_INSN(),
  1198. },
  1199. .result = REJECT,
  1200. .errstr = "invalid read from stack off -8+0 size 8",
  1201. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1202. },
  1203. {
  1204. "raw_stack: skb_load_bytes, no init",
  1205. .insns = {
  1206. BPF_MOV64_IMM(BPF_REG_2, 4),
  1207. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1208. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1209. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1210. BPF_MOV64_IMM(BPF_REG_4, 8),
  1211. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1212. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1213. BPF_EXIT_INSN(),
  1214. },
  1215. .result = ACCEPT,
  1216. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1217. },
  1218. {
  1219. "raw_stack: skb_load_bytes, init",
  1220. .insns = {
  1221. BPF_MOV64_IMM(BPF_REG_2, 4),
  1222. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1223. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1224. BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
  1225. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1226. BPF_MOV64_IMM(BPF_REG_4, 8),
  1227. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1228. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1229. BPF_EXIT_INSN(),
  1230. },
  1231. .result = ACCEPT,
  1232. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1233. },
  1234. {
  1235. "raw_stack: skb_load_bytes, spilled regs around bounds",
  1236. .insns = {
  1237. BPF_MOV64_IMM(BPF_REG_2, 4),
  1238. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1239. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  1240. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */
  1241. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), /* spill ctx from R1 */
  1242. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1243. BPF_MOV64_IMM(BPF_REG_4, 8),
  1244. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1245. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */
  1246. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), /* fill ctx into R2 */
  1247. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1248. offsetof(struct __sk_buff, mark)),
  1249. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  1250. offsetof(struct __sk_buff, priority)),
  1251. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  1252. BPF_EXIT_INSN(),
  1253. },
  1254. .result = ACCEPT,
  1255. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1256. },
  1257. {
  1258. "raw_stack: skb_load_bytes, spilled regs corruption",
  1259. .insns = {
  1260. BPF_MOV64_IMM(BPF_REG_2, 4),
  1261. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1262. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1263. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */
  1264. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1265. BPF_MOV64_IMM(BPF_REG_4, 8),
  1266. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1267. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), /* fill ctx into R0 */
  1268. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1269. offsetof(struct __sk_buff, mark)),
  1270. BPF_EXIT_INSN(),
  1271. },
  1272. .result = REJECT,
  1273. .errstr = "R0 invalid mem access 'inv'",
  1274. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1275. },
  1276. {
  1277. "raw_stack: skb_load_bytes, spilled regs corruption 2",
  1278. .insns = {
  1279. BPF_MOV64_IMM(BPF_REG_2, 4),
  1280. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1281. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  1282. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */
  1283. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */
  1284. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), /* spill ctx from R1 */
  1285. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1286. BPF_MOV64_IMM(BPF_REG_4, 8),
  1287. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1288. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */
  1289. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), /* fill ctx into R2 */
  1290. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), /* fill ctx into R3 */
  1291. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1292. offsetof(struct __sk_buff, mark)),
  1293. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  1294. offsetof(struct __sk_buff, priority)),
  1295. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  1296. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
  1297. offsetof(struct __sk_buff, pkt_type)),
  1298. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
  1299. BPF_EXIT_INSN(),
  1300. },
  1301. .result = REJECT,
  1302. .errstr = "R3 invalid mem access 'inv'",
  1303. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1304. },
  1305. {
  1306. "raw_stack: skb_load_bytes, spilled regs + data",
  1307. .insns = {
  1308. BPF_MOV64_IMM(BPF_REG_2, 4),
  1309. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1310. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  1311. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */
  1312. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */
  1313. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), /* spill ctx from R1 */
  1314. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1315. BPF_MOV64_IMM(BPF_REG_4, 8),
  1316. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1317. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */
  1318. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), /* fill ctx into R2 */
  1319. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), /* fill data into R3 */
  1320. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1321. offsetof(struct __sk_buff, mark)),
  1322. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  1323. offsetof(struct __sk_buff, priority)),
  1324. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  1325. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
  1326. BPF_EXIT_INSN(),
  1327. },
  1328. .result = ACCEPT,
  1329. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1330. },
  1331. {
  1332. "raw_stack: skb_load_bytes, invalid access 1",
  1333. .insns = {
  1334. BPF_MOV64_IMM(BPF_REG_2, 4),
  1335. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1336. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
  1337. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1338. BPF_MOV64_IMM(BPF_REG_4, 8),
  1339. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1340. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1341. BPF_EXIT_INSN(),
  1342. },
  1343. .result = REJECT,
  1344. .errstr = "invalid stack type R3 off=-513 access_size=8",
  1345. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1346. },
  1347. {
  1348. "raw_stack: skb_load_bytes, invalid access 2",
  1349. .insns = {
  1350. BPF_MOV64_IMM(BPF_REG_2, 4),
  1351. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1352. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
  1353. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1354. BPF_MOV64_IMM(BPF_REG_4, 8),
  1355. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1356. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1357. BPF_EXIT_INSN(),
  1358. },
  1359. .result = REJECT,
  1360. .errstr = "invalid stack type R3 off=-1 access_size=8",
  1361. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1362. },
  1363. {
  1364. "raw_stack: skb_load_bytes, invalid access 3",
  1365. .insns = {
  1366. BPF_MOV64_IMM(BPF_REG_2, 4),
  1367. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1368. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
  1369. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1370. BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
  1371. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1372. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1373. BPF_EXIT_INSN(),
  1374. },
  1375. .result = REJECT,
  1376. .errstr = "invalid stack type R3 off=-1 access_size=-1",
  1377. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1378. },
  1379. {
  1380. "raw_stack: skb_load_bytes, invalid access 4",
  1381. .insns = {
  1382. BPF_MOV64_IMM(BPF_REG_2, 4),
  1383. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1384. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
  1385. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1386. BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
  1387. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1388. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1389. BPF_EXIT_INSN(),
  1390. },
  1391. .result = REJECT,
  1392. .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
  1393. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1394. },
  1395. {
  1396. "raw_stack: skb_load_bytes, invalid access 5",
  1397. .insns = {
  1398. BPF_MOV64_IMM(BPF_REG_2, 4),
  1399. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1400. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  1401. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1402. BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
  1403. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1404. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1405. BPF_EXIT_INSN(),
  1406. },
  1407. .result = REJECT,
  1408. .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
  1409. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1410. },
  1411. {
  1412. "raw_stack: skb_load_bytes, invalid access 6",
  1413. .insns = {
  1414. BPF_MOV64_IMM(BPF_REG_2, 4),
  1415. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1416. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  1417. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1418. BPF_MOV64_IMM(BPF_REG_4, 0),
  1419. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1420. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1421. BPF_EXIT_INSN(),
  1422. },
  1423. .result = REJECT,
  1424. .errstr = "invalid stack type R3 off=-512 access_size=0",
  1425. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1426. },
  1427. {
  1428. "raw_stack: skb_load_bytes, large access",
  1429. .insns = {
  1430. BPF_MOV64_IMM(BPF_REG_2, 4),
  1431. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1432. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  1433. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1434. BPF_MOV64_IMM(BPF_REG_4, 512),
  1435. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
  1436. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1437. BPF_EXIT_INSN(),
  1438. },
  1439. .result = ACCEPT,
  1440. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1441. },
  1442. {
  1443. "pkt: test1",
  1444. .insns = {
  1445. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1446. offsetof(struct __sk_buff, data)),
  1447. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1448. offsetof(struct __sk_buff, data_end)),
  1449. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1450. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1451. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  1452. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1453. BPF_MOV64_IMM(BPF_REG_0, 0),
  1454. BPF_EXIT_INSN(),
  1455. },
  1456. .result = ACCEPT,
  1457. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1458. },
  1459. {
  1460. "pkt: test2",
  1461. .insns = {
  1462. BPF_MOV64_IMM(BPF_REG_0, 1),
  1463. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
  1464. offsetof(struct __sk_buff, data_end)),
  1465. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1466. offsetof(struct __sk_buff, data)),
  1467. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  1468. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  1469. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
  1470. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
  1471. BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
  1472. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
  1473. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1474. offsetof(struct __sk_buff, data)),
  1475. BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
  1476. BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
  1477. BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
  1478. BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
  1479. BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
  1480. BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
  1481. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
  1482. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  1483. offsetof(struct __sk_buff, data_end)),
  1484. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  1485. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
  1486. BPF_MOV64_IMM(BPF_REG_0, 0),
  1487. BPF_EXIT_INSN(),
  1488. },
  1489. .result = ACCEPT,
  1490. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1491. },
  1492. {
  1493. "pkt: test3",
  1494. .insns = {
  1495. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1496. offsetof(struct __sk_buff, data)),
  1497. BPF_MOV64_IMM(BPF_REG_0, 0),
  1498. BPF_EXIT_INSN(),
  1499. },
  1500. .errstr = "invalid bpf_context access off=76",
  1501. .result = REJECT,
  1502. .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
  1503. },
  1504. {
  1505. "pkt: test4",
  1506. .insns = {
  1507. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1508. offsetof(struct __sk_buff, data)),
  1509. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1510. offsetof(struct __sk_buff, data_end)),
  1511. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1512. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1513. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  1514. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  1515. BPF_MOV64_IMM(BPF_REG_0, 0),
  1516. BPF_EXIT_INSN(),
  1517. },
  1518. .errstr = "cannot write",
  1519. .result = REJECT,
  1520. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1521. },
  1522. };
  1523. static int probe_filter_length(struct bpf_insn *fp)
  1524. {
  1525. int len = 0;
  1526. for (len = MAX_INSNS - 1; len > 0; --len)
  1527. if (fp[len].code != 0 || fp[len].imm != 0)
  1528. break;
  1529. return len + 1;
  1530. }
  1531. static int create_map(void)
  1532. {
  1533. int map_fd;
  1534. map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
  1535. sizeof(long long), sizeof(long long), 1024, 0);
  1536. if (map_fd < 0)
  1537. printf("failed to create map '%s'\n", strerror(errno));
  1538. return map_fd;
  1539. }
  1540. static int create_prog_array(void)
  1541. {
  1542. int map_fd;
  1543. map_fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY,
  1544. sizeof(int), sizeof(int), 4, 0);
  1545. if (map_fd < 0)
  1546. printf("failed to create prog_array '%s'\n", strerror(errno));
  1547. return map_fd;
  1548. }
  1549. static int test(void)
  1550. {
  1551. int prog_fd, i, pass_cnt = 0, err_cnt = 0;
  1552. bool unpriv = geteuid() != 0;
  1553. for (i = 0; i < ARRAY_SIZE(tests); i++) {
  1554. struct bpf_insn *prog = tests[i].insns;
  1555. int prog_type = tests[i].prog_type;
  1556. int prog_len = probe_filter_length(prog);
  1557. int *fixup = tests[i].fixup;
  1558. int *prog_array_fixup = tests[i].prog_array_fixup;
  1559. int expected_result;
  1560. const char *expected_errstr;
  1561. int map_fd = -1, prog_array_fd = -1;
  1562. if (*fixup) {
  1563. map_fd = create_map();
  1564. do {
  1565. prog[*fixup].imm = map_fd;
  1566. fixup++;
  1567. } while (*fixup);
  1568. }
  1569. if (*prog_array_fixup) {
  1570. prog_array_fd = create_prog_array();
  1571. do {
  1572. prog[*prog_array_fixup].imm = prog_array_fd;
  1573. prog_array_fixup++;
  1574. } while (*prog_array_fixup);
  1575. }
  1576. printf("#%d %s ", i, tests[i].descr);
  1577. prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
  1578. prog, prog_len * sizeof(struct bpf_insn),
  1579. "GPL", 0);
  1580. if (unpriv && tests[i].result_unpriv != UNDEF)
  1581. expected_result = tests[i].result_unpriv;
  1582. else
  1583. expected_result = tests[i].result;
  1584. if (unpriv && tests[i].errstr_unpriv)
  1585. expected_errstr = tests[i].errstr_unpriv;
  1586. else
  1587. expected_errstr = tests[i].errstr;
  1588. if (expected_result == ACCEPT) {
  1589. if (prog_fd < 0) {
  1590. printf("FAIL\nfailed to load prog '%s'\n",
  1591. strerror(errno));
  1592. printf("%s", bpf_log_buf);
  1593. err_cnt++;
  1594. goto fail;
  1595. }
  1596. } else {
  1597. if (prog_fd >= 0) {
  1598. printf("FAIL\nunexpected success to load\n");
  1599. printf("%s", bpf_log_buf);
  1600. err_cnt++;
  1601. goto fail;
  1602. }
  1603. if (strstr(bpf_log_buf, expected_errstr) == 0) {
  1604. printf("FAIL\nunexpected error message: %s",
  1605. bpf_log_buf);
  1606. err_cnt++;
  1607. goto fail;
  1608. }
  1609. }
  1610. pass_cnt++;
  1611. printf("OK\n");
  1612. fail:
  1613. if (map_fd >= 0)
  1614. close(map_fd);
  1615. if (prog_array_fd >= 0)
  1616. close(prog_array_fd);
  1617. close(prog_fd);
  1618. }
  1619. printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
  1620. return 0;
  1621. }
  1622. int main(void)
  1623. {
  1624. struct rlimit r = {1 << 20, 1 << 20};
  1625. setrlimit(RLIMIT_MEMLOCK, &r);
  1626. return test();
  1627. }