test_verifier.c 89 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081
  1. /*
  2. * Testsuite for eBPF verifier
  3. *
  4. * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. */
  10. #include <stdio.h>
  11. #include <unistd.h>
  12. #include <errno.h>
  13. #include <string.h>
  14. #include <stddef.h>
  15. #include <stdbool.h>
  16. #include <sched.h>
  17. #include <sys/resource.h>
  18. #include <linux/unistd.h>
  19. #include <linux/filter.h>
  20. #include <linux/bpf_perf_event.h>
  21. #include <linux/bpf.h>
  22. #include "../../../include/linux/filter.h"
  23. #include "bpf_sys.h"
  24. #ifndef ARRAY_SIZE
  25. # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
  26. #endif
  27. #define MAX_INSNS 512
  28. #define MAX_FIXUPS 8
  29. struct bpf_test {
  30. const char *descr;
  31. struct bpf_insn insns[MAX_INSNS];
  32. int fixup_map1[MAX_FIXUPS];
  33. int fixup_map2[MAX_FIXUPS];
  34. int fixup_prog[MAX_FIXUPS];
  35. const char *errstr;
  36. const char *errstr_unpriv;
  37. enum {
  38. UNDEF,
  39. ACCEPT,
  40. REJECT
  41. } result, result_unpriv;
  42. enum bpf_prog_type prog_type;
  43. };
  44. /* Note we want this to be 64 bit aligned so that the end of our array is
  45. * actually the end of the structure.
  46. */
  47. #define MAX_ENTRIES 11
  48. struct test_val {
  49. unsigned int index;
  50. int foo[MAX_ENTRIES];
  51. };
  52. static struct bpf_test tests[] = {
  53. {
  54. "add+sub+mul",
  55. .insns = {
  56. BPF_MOV64_IMM(BPF_REG_1, 1),
  57. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
  58. BPF_MOV64_IMM(BPF_REG_2, 3),
  59. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
  60. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
  61. BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
  62. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  63. BPF_EXIT_INSN(),
  64. },
  65. .result = ACCEPT,
  66. },
  67. {
  68. "unreachable",
  69. .insns = {
  70. BPF_EXIT_INSN(),
  71. BPF_EXIT_INSN(),
  72. },
  73. .errstr = "unreachable",
  74. .result = REJECT,
  75. },
  76. {
  77. "unreachable2",
  78. .insns = {
  79. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  80. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  81. BPF_EXIT_INSN(),
  82. },
  83. .errstr = "unreachable",
  84. .result = REJECT,
  85. },
  86. {
  87. "out of range jump",
  88. .insns = {
  89. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  90. BPF_EXIT_INSN(),
  91. },
  92. .errstr = "jump out of range",
  93. .result = REJECT,
  94. },
  95. {
  96. "out of range jump2",
  97. .insns = {
  98. BPF_JMP_IMM(BPF_JA, 0, 0, -2),
  99. BPF_EXIT_INSN(),
  100. },
  101. .errstr = "jump out of range",
  102. .result = REJECT,
  103. },
  104. {
  105. "test1 ld_imm64",
  106. .insns = {
  107. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  108. BPF_LD_IMM64(BPF_REG_0, 0),
  109. BPF_LD_IMM64(BPF_REG_0, 0),
  110. BPF_LD_IMM64(BPF_REG_0, 1),
  111. BPF_LD_IMM64(BPF_REG_0, 1),
  112. BPF_MOV64_IMM(BPF_REG_0, 2),
  113. BPF_EXIT_INSN(),
  114. },
  115. .errstr = "invalid BPF_LD_IMM insn",
  116. .errstr_unpriv = "R1 pointer comparison",
  117. .result = REJECT,
  118. },
  119. {
  120. "test2 ld_imm64",
  121. .insns = {
  122. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  123. BPF_LD_IMM64(BPF_REG_0, 0),
  124. BPF_LD_IMM64(BPF_REG_0, 0),
  125. BPF_LD_IMM64(BPF_REG_0, 1),
  126. BPF_LD_IMM64(BPF_REG_0, 1),
  127. BPF_EXIT_INSN(),
  128. },
  129. .errstr = "invalid BPF_LD_IMM insn",
  130. .errstr_unpriv = "R1 pointer comparison",
  131. .result = REJECT,
  132. },
  133. {
  134. "test3 ld_imm64",
  135. .insns = {
  136. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  137. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  138. BPF_LD_IMM64(BPF_REG_0, 0),
  139. BPF_LD_IMM64(BPF_REG_0, 0),
  140. BPF_LD_IMM64(BPF_REG_0, 1),
  141. BPF_LD_IMM64(BPF_REG_0, 1),
  142. BPF_EXIT_INSN(),
  143. },
  144. .errstr = "invalid bpf_ld_imm64 insn",
  145. .result = REJECT,
  146. },
  147. {
  148. "test4 ld_imm64",
  149. .insns = {
  150. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  151. BPF_EXIT_INSN(),
  152. },
  153. .errstr = "invalid bpf_ld_imm64 insn",
  154. .result = REJECT,
  155. },
  156. {
  157. "test5 ld_imm64",
  158. .insns = {
  159. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  160. },
  161. .errstr = "invalid bpf_ld_imm64 insn",
  162. .result = REJECT,
  163. },
  164. {
  165. "no bpf_exit",
  166. .insns = {
  167. BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
  168. },
  169. .errstr = "jump out of range",
  170. .result = REJECT,
  171. },
  172. {
  173. "loop (back-edge)",
  174. .insns = {
  175. BPF_JMP_IMM(BPF_JA, 0, 0, -1),
  176. BPF_EXIT_INSN(),
  177. },
  178. .errstr = "back-edge",
  179. .result = REJECT,
  180. },
  181. {
  182. "loop2 (back-edge)",
  183. .insns = {
  184. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  185. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  186. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  187. BPF_JMP_IMM(BPF_JA, 0, 0, -4),
  188. BPF_EXIT_INSN(),
  189. },
  190. .errstr = "back-edge",
  191. .result = REJECT,
  192. },
  193. {
  194. "conditional loop",
  195. .insns = {
  196. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  197. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  198. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  199. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
  200. BPF_EXIT_INSN(),
  201. },
  202. .errstr = "back-edge",
  203. .result = REJECT,
  204. },
  205. {
  206. "read uninitialized register",
  207. .insns = {
  208. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  209. BPF_EXIT_INSN(),
  210. },
  211. .errstr = "R2 !read_ok",
  212. .result = REJECT,
  213. },
  214. {
  215. "read invalid register",
  216. .insns = {
  217. BPF_MOV64_REG(BPF_REG_0, -1),
  218. BPF_EXIT_INSN(),
  219. },
  220. .errstr = "R15 is invalid",
  221. .result = REJECT,
  222. },
  223. {
  224. "program doesn't init R0 before exit",
  225. .insns = {
  226. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
  227. BPF_EXIT_INSN(),
  228. },
  229. .errstr = "R0 !read_ok",
  230. .result = REJECT,
  231. },
  232. {
  233. "program doesn't init R0 before exit in all branches",
  234. .insns = {
  235. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  236. BPF_MOV64_IMM(BPF_REG_0, 1),
  237. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
  238. BPF_EXIT_INSN(),
  239. },
  240. .errstr = "R0 !read_ok",
  241. .errstr_unpriv = "R1 pointer comparison",
  242. .result = REJECT,
  243. },
  244. {
  245. "stack out of bounds",
  246. .insns = {
  247. BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
  248. BPF_EXIT_INSN(),
  249. },
  250. .errstr = "invalid stack",
  251. .result = REJECT,
  252. },
  253. {
  254. "invalid call insn1",
  255. .insns = {
  256. BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
  257. BPF_EXIT_INSN(),
  258. },
  259. .errstr = "BPF_CALL uses reserved",
  260. .result = REJECT,
  261. },
  262. {
  263. "invalid call insn2",
  264. .insns = {
  265. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
  266. BPF_EXIT_INSN(),
  267. },
  268. .errstr = "BPF_CALL uses reserved",
  269. .result = REJECT,
  270. },
  271. {
  272. "invalid function call",
  273. .insns = {
  274. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
  275. BPF_EXIT_INSN(),
  276. },
  277. .errstr = "invalid func unknown#1234567",
  278. .result = REJECT,
  279. },
  280. {
  281. "uninitialized stack1",
  282. .insns = {
  283. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  284. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  285. BPF_LD_MAP_FD(BPF_REG_1, 0),
  286. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  287. BPF_FUNC_map_lookup_elem),
  288. BPF_EXIT_INSN(),
  289. },
  290. .fixup_map1 = { 2 },
  291. .errstr = "invalid indirect read from stack",
  292. .result = REJECT,
  293. },
  294. {
  295. "uninitialized stack2",
  296. .insns = {
  297. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  298. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
  299. BPF_EXIT_INSN(),
  300. },
  301. .errstr = "invalid read from stack",
  302. .result = REJECT,
  303. },
  304. {
  305. "invalid argument register",
  306. .insns = {
  307. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  308. BPF_FUNC_get_cgroup_classid),
  309. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  310. BPF_FUNC_get_cgroup_classid),
  311. BPF_EXIT_INSN(),
  312. },
  313. .errstr = "R1 !read_ok",
  314. .result = REJECT,
  315. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  316. },
  317. {
  318. "non-invalid argument register",
  319. .insns = {
  320. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
  321. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  322. BPF_FUNC_get_cgroup_classid),
  323. BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
  324. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  325. BPF_FUNC_get_cgroup_classid),
  326. BPF_EXIT_INSN(),
  327. },
  328. .result = ACCEPT,
  329. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  330. },
  331. {
  332. "check valid spill/fill",
  333. .insns = {
  334. /* spill R1(ctx) into stack */
  335. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  336. /* fill it back into R2 */
  337. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
  338. /* should be able to access R0 = *(R2 + 8) */
  339. /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
  340. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  341. BPF_EXIT_INSN(),
  342. },
  343. .errstr_unpriv = "R0 leaks addr",
  344. .result = ACCEPT,
  345. .result_unpriv = REJECT,
  346. },
  347. {
  348. "check valid spill/fill, skb mark",
  349. .insns = {
  350. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
  351. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
  352. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  353. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  354. offsetof(struct __sk_buff, mark)),
  355. BPF_EXIT_INSN(),
  356. },
  357. .result = ACCEPT,
  358. .result_unpriv = ACCEPT,
  359. },
  360. {
  361. "check corrupted spill/fill",
  362. .insns = {
  363. /* spill R1(ctx) into stack */
  364. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  365. /* mess up with R1 pointer on stack */
  366. BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
  367. /* fill back into R0 should fail */
  368. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  369. BPF_EXIT_INSN(),
  370. },
  371. .errstr_unpriv = "attempt to corrupt spilled",
  372. .errstr = "corrupted spill",
  373. .result = REJECT,
  374. },
  375. {
  376. "invalid src register in STX",
  377. .insns = {
  378. BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
  379. BPF_EXIT_INSN(),
  380. },
  381. .errstr = "R15 is invalid",
  382. .result = REJECT,
  383. },
  384. {
  385. "invalid dst register in STX",
  386. .insns = {
  387. BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
  388. BPF_EXIT_INSN(),
  389. },
  390. .errstr = "R14 is invalid",
  391. .result = REJECT,
  392. },
  393. {
  394. "invalid dst register in ST",
  395. .insns = {
  396. BPF_ST_MEM(BPF_B, 14, -1, -1),
  397. BPF_EXIT_INSN(),
  398. },
  399. .errstr = "R14 is invalid",
  400. .result = REJECT,
  401. },
  402. {
  403. "invalid src register in LDX",
  404. .insns = {
  405. BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
  406. BPF_EXIT_INSN(),
  407. },
  408. .errstr = "R12 is invalid",
  409. .result = REJECT,
  410. },
  411. {
  412. "invalid dst register in LDX",
  413. .insns = {
  414. BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
  415. BPF_EXIT_INSN(),
  416. },
  417. .errstr = "R11 is invalid",
  418. .result = REJECT,
  419. },
  420. {
  421. "junk insn",
  422. .insns = {
  423. BPF_RAW_INSN(0, 0, 0, 0, 0),
  424. BPF_EXIT_INSN(),
  425. },
  426. .errstr = "invalid BPF_LD_IMM",
  427. .result = REJECT,
  428. },
  429. {
  430. "junk insn2",
  431. .insns = {
  432. BPF_RAW_INSN(1, 0, 0, 0, 0),
  433. BPF_EXIT_INSN(),
  434. },
  435. .errstr = "BPF_LDX uses reserved fields",
  436. .result = REJECT,
  437. },
  438. {
  439. "junk insn3",
  440. .insns = {
  441. BPF_RAW_INSN(-1, 0, 0, 0, 0),
  442. BPF_EXIT_INSN(),
  443. },
  444. .errstr = "invalid BPF_ALU opcode f0",
  445. .result = REJECT,
  446. },
  447. {
  448. "junk insn4",
  449. .insns = {
  450. BPF_RAW_INSN(-1, -1, -1, -1, -1),
  451. BPF_EXIT_INSN(),
  452. },
  453. .errstr = "invalid BPF_ALU opcode f0",
  454. .result = REJECT,
  455. },
  456. {
  457. "junk insn5",
  458. .insns = {
  459. BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
  460. BPF_EXIT_INSN(),
  461. },
  462. .errstr = "BPF_ALU uses reserved fields",
  463. .result = REJECT,
  464. },
  465. {
  466. "misaligned read from stack",
  467. .insns = {
  468. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  469. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
  470. BPF_EXIT_INSN(),
  471. },
  472. .errstr = "misaligned access",
  473. .result = REJECT,
  474. },
  475. {
  476. "invalid map_fd for function call",
  477. .insns = {
  478. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  479. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
  480. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  481. BPF_LD_MAP_FD(BPF_REG_1, 0),
  482. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  483. BPF_FUNC_map_delete_elem),
  484. BPF_EXIT_INSN(),
  485. },
  486. .errstr = "fd 0 is not pointing to valid bpf_map",
  487. .result = REJECT,
  488. },
  489. {
  490. "don't check return value before access",
  491. .insns = {
  492. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  493. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  494. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  495. BPF_LD_MAP_FD(BPF_REG_1, 0),
  496. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  497. BPF_FUNC_map_lookup_elem),
  498. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  499. BPF_EXIT_INSN(),
  500. },
  501. .fixup_map1 = { 3 },
  502. .errstr = "R0 invalid mem access 'map_value_or_null'",
  503. .result = REJECT,
  504. },
  505. {
  506. "access memory with incorrect alignment",
  507. .insns = {
  508. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  509. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  510. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  511. BPF_LD_MAP_FD(BPF_REG_1, 0),
  512. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  513. BPF_FUNC_map_lookup_elem),
  514. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  515. BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
  516. BPF_EXIT_INSN(),
  517. },
  518. .fixup_map1 = { 3 },
  519. .errstr = "misaligned access",
  520. .result = REJECT,
  521. },
  522. {
  523. "sometimes access memory with incorrect alignment",
  524. .insns = {
  525. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  526. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  527. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  528. BPF_LD_MAP_FD(BPF_REG_1, 0),
  529. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  530. BPF_FUNC_map_lookup_elem),
  531. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  532. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  533. BPF_EXIT_INSN(),
  534. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
  535. BPF_EXIT_INSN(),
  536. },
  537. .fixup_map1 = { 3 },
  538. .errstr = "R0 invalid mem access",
  539. .errstr_unpriv = "R0 leaks addr",
  540. .result = REJECT,
  541. },
  542. {
  543. "jump test 1",
  544. .insns = {
  545. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  546. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
  547. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  548. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  549. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
  550. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
  551. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
  552. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
  553. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
  554. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
  555. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
  556. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
  557. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  558. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
  559. BPF_MOV64_IMM(BPF_REG_0, 0),
  560. BPF_EXIT_INSN(),
  561. },
  562. .errstr_unpriv = "R1 pointer comparison",
  563. .result_unpriv = REJECT,
  564. .result = ACCEPT,
  565. },
  566. {
  567. "jump test 2",
  568. .insns = {
  569. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  570. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
  571. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  572. BPF_JMP_IMM(BPF_JA, 0, 0, 14),
  573. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
  574. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  575. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  576. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
  577. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  578. BPF_JMP_IMM(BPF_JA, 0, 0, 8),
  579. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
  580. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  581. BPF_JMP_IMM(BPF_JA, 0, 0, 5),
  582. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
  583. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  584. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  585. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  586. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  587. BPF_MOV64_IMM(BPF_REG_0, 0),
  588. BPF_EXIT_INSN(),
  589. },
  590. .errstr_unpriv = "R1 pointer comparison",
  591. .result_unpriv = REJECT,
  592. .result = ACCEPT,
  593. },
  594. {
  595. "jump test 3",
  596. .insns = {
  597. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  598. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  599. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  600. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  601. BPF_JMP_IMM(BPF_JA, 0, 0, 19),
  602. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
  603. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  604. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  605. BPF_JMP_IMM(BPF_JA, 0, 0, 15),
  606. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
  607. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  608. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
  609. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  610. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
  611. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  612. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
  613. BPF_JMP_IMM(BPF_JA, 0, 0, 7),
  614. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
  615. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  616. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
  617. BPF_JMP_IMM(BPF_JA, 0, 0, 3),
  618. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
  619. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  620. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
  621. BPF_LD_MAP_FD(BPF_REG_1, 0),
  622. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  623. BPF_FUNC_map_delete_elem),
  624. BPF_EXIT_INSN(),
  625. },
  626. .fixup_map1 = { 24 },
  627. .errstr_unpriv = "R1 pointer comparison",
  628. .result_unpriv = REJECT,
  629. .result = ACCEPT,
  630. },
  631. {
  632. "jump test 4",
  633. .insns = {
  634. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  635. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  636. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  637. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  638. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  639. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  640. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  641. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  642. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  643. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  644. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  645. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  646. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  647. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  648. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  649. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  650. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  651. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  652. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  653. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  654. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  655. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  656. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  657. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  658. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  659. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  660. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  661. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  662. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  663. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  664. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  665. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  666. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  667. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  668. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  669. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  670. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  671. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  672. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  673. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  674. BPF_MOV64_IMM(BPF_REG_0, 0),
  675. BPF_EXIT_INSN(),
  676. },
  677. .errstr_unpriv = "R1 pointer comparison",
  678. .result_unpriv = REJECT,
  679. .result = ACCEPT,
  680. },
  681. {
  682. "jump test 5",
  683. .insns = {
  684. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  685. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  686. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  687. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  688. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  689. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  690. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  691. BPF_MOV64_IMM(BPF_REG_0, 0),
  692. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  693. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  694. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  695. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  696. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  697. BPF_MOV64_IMM(BPF_REG_0, 0),
  698. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  699. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  700. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  701. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  702. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  703. BPF_MOV64_IMM(BPF_REG_0, 0),
  704. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  705. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  706. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  707. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  708. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  709. BPF_MOV64_IMM(BPF_REG_0, 0),
  710. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  711. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  712. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  713. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  714. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  715. BPF_MOV64_IMM(BPF_REG_0, 0),
  716. BPF_EXIT_INSN(),
  717. },
  718. .errstr_unpriv = "R1 pointer comparison",
  719. .result_unpriv = REJECT,
  720. .result = ACCEPT,
  721. },
  722. {
  723. "access skb fields ok",
  724. .insns = {
  725. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  726. offsetof(struct __sk_buff, len)),
  727. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  728. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  729. offsetof(struct __sk_buff, mark)),
  730. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  731. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  732. offsetof(struct __sk_buff, pkt_type)),
  733. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  734. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  735. offsetof(struct __sk_buff, queue_mapping)),
  736. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  737. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  738. offsetof(struct __sk_buff, protocol)),
  739. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  740. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  741. offsetof(struct __sk_buff, vlan_present)),
  742. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  743. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  744. offsetof(struct __sk_buff, vlan_tci)),
  745. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  746. BPF_EXIT_INSN(),
  747. },
  748. .result = ACCEPT,
  749. },
  750. {
  751. "access skb fields bad1",
  752. .insns = {
  753. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
  754. BPF_EXIT_INSN(),
  755. },
  756. .errstr = "invalid bpf_context access",
  757. .result = REJECT,
  758. },
  759. {
  760. "access skb fields bad2",
  761. .insns = {
  762. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
  763. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  764. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  765. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  766. BPF_LD_MAP_FD(BPF_REG_1, 0),
  767. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  768. BPF_FUNC_map_lookup_elem),
  769. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  770. BPF_EXIT_INSN(),
  771. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  772. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  773. offsetof(struct __sk_buff, pkt_type)),
  774. BPF_EXIT_INSN(),
  775. },
  776. .fixup_map1 = { 4 },
  777. .errstr = "different pointers",
  778. .errstr_unpriv = "R1 pointer comparison",
  779. .result = REJECT,
  780. },
  781. {
  782. "access skb fields bad3",
  783. .insns = {
  784. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  785. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  786. offsetof(struct __sk_buff, pkt_type)),
  787. BPF_EXIT_INSN(),
  788. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  789. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  790. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  791. BPF_LD_MAP_FD(BPF_REG_1, 0),
  792. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  793. BPF_FUNC_map_lookup_elem),
  794. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  795. BPF_EXIT_INSN(),
  796. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  797. BPF_JMP_IMM(BPF_JA, 0, 0, -12),
  798. },
  799. .fixup_map1 = { 6 },
  800. .errstr = "different pointers",
  801. .errstr_unpriv = "R1 pointer comparison",
  802. .result = REJECT,
  803. },
  804. {
  805. "access skb fields bad4",
  806. .insns = {
  807. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
  808. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  809. offsetof(struct __sk_buff, len)),
  810. BPF_MOV64_IMM(BPF_REG_0, 0),
  811. BPF_EXIT_INSN(),
  812. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  813. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  814. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  815. BPF_LD_MAP_FD(BPF_REG_1, 0),
  816. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  817. BPF_FUNC_map_lookup_elem),
  818. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  819. BPF_EXIT_INSN(),
  820. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  821. BPF_JMP_IMM(BPF_JA, 0, 0, -13),
  822. },
  823. .fixup_map1 = { 7 },
  824. .errstr = "different pointers",
  825. .errstr_unpriv = "R1 pointer comparison",
  826. .result = REJECT,
  827. },
  828. {
  829. "check skb->mark is not writeable by sockets",
  830. .insns = {
  831. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  832. offsetof(struct __sk_buff, mark)),
  833. BPF_EXIT_INSN(),
  834. },
  835. .errstr = "invalid bpf_context access",
  836. .errstr_unpriv = "R1 leaks addr",
  837. .result = REJECT,
  838. },
  839. {
  840. "check skb->tc_index is not writeable by sockets",
  841. .insns = {
  842. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  843. offsetof(struct __sk_buff, tc_index)),
  844. BPF_EXIT_INSN(),
  845. },
  846. .errstr = "invalid bpf_context access",
  847. .errstr_unpriv = "R1 leaks addr",
  848. .result = REJECT,
  849. },
  850. {
  851. "check non-u32 access to cb",
  852. .insns = {
  853. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
  854. offsetof(struct __sk_buff, cb[0])),
  855. BPF_EXIT_INSN(),
  856. },
  857. .errstr = "invalid bpf_context access",
  858. .errstr_unpriv = "R1 leaks addr",
  859. .result = REJECT,
  860. },
  861. {
  862. "check out of range skb->cb access",
  863. .insns = {
  864. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  865. offsetof(struct __sk_buff, cb[0]) + 256),
  866. BPF_EXIT_INSN(),
  867. },
  868. .errstr = "invalid bpf_context access",
  869. .errstr_unpriv = "",
  870. .result = REJECT,
  871. .prog_type = BPF_PROG_TYPE_SCHED_ACT,
  872. },
  873. {
  874. "write skb fields from socket prog",
  875. .insns = {
  876. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  877. offsetof(struct __sk_buff, cb[4])),
  878. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  879. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  880. offsetof(struct __sk_buff, mark)),
  881. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  882. offsetof(struct __sk_buff, tc_index)),
  883. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  884. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  885. offsetof(struct __sk_buff, cb[0])),
  886. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  887. offsetof(struct __sk_buff, cb[2])),
  888. BPF_EXIT_INSN(),
  889. },
  890. .result = ACCEPT,
  891. .errstr_unpriv = "R1 leaks addr",
  892. .result_unpriv = REJECT,
  893. },
  894. {
  895. "write skb fields from tc_cls_act prog",
  896. .insns = {
  897. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  898. offsetof(struct __sk_buff, cb[0])),
  899. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  900. offsetof(struct __sk_buff, mark)),
  901. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  902. offsetof(struct __sk_buff, tc_index)),
  903. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  904. offsetof(struct __sk_buff, tc_index)),
  905. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  906. offsetof(struct __sk_buff, cb[3])),
  907. BPF_EXIT_INSN(),
  908. },
  909. .errstr_unpriv = "",
  910. .result_unpriv = REJECT,
  911. .result = ACCEPT,
  912. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  913. },
  914. {
  915. "PTR_TO_STACK store/load",
  916. .insns = {
  917. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  918. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  919. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  920. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  921. BPF_EXIT_INSN(),
  922. },
  923. .result = ACCEPT,
  924. },
  925. {
  926. "PTR_TO_STACK store/load - bad alignment on off",
  927. .insns = {
  928. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  929. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  930. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  931. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  932. BPF_EXIT_INSN(),
  933. },
  934. .result = REJECT,
  935. .errstr = "misaligned access off -6 size 8",
  936. },
  937. {
  938. "PTR_TO_STACK store/load - bad alignment on reg",
  939. .insns = {
  940. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  941. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  942. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  943. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  944. BPF_EXIT_INSN(),
  945. },
  946. .result = REJECT,
  947. .errstr = "misaligned access off -2 size 8",
  948. },
  949. {
  950. "PTR_TO_STACK store/load - out of bounds low",
  951. .insns = {
  952. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  953. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
  954. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  955. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  956. BPF_EXIT_INSN(),
  957. },
  958. .result = REJECT,
  959. .errstr = "invalid stack off=-79992 size=8",
  960. },
  961. {
  962. "PTR_TO_STACK store/load - out of bounds high",
  963. .insns = {
  964. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  965. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  966. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  967. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  968. BPF_EXIT_INSN(),
  969. },
  970. .result = REJECT,
  971. .errstr = "invalid stack off=0 size=8",
  972. },
  973. {
  974. "unpriv: return pointer",
  975. .insns = {
  976. BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
  977. BPF_EXIT_INSN(),
  978. },
  979. .result = ACCEPT,
  980. .result_unpriv = REJECT,
  981. .errstr_unpriv = "R0 leaks addr",
  982. },
  983. {
  984. "unpriv: add const to pointer",
  985. .insns = {
  986. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  987. BPF_MOV64_IMM(BPF_REG_0, 0),
  988. BPF_EXIT_INSN(),
  989. },
  990. .result = ACCEPT,
  991. .result_unpriv = REJECT,
  992. .errstr_unpriv = "R1 pointer arithmetic",
  993. },
  994. {
  995. "unpriv: add pointer to pointer",
  996. .insns = {
  997. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
  998. BPF_MOV64_IMM(BPF_REG_0, 0),
  999. BPF_EXIT_INSN(),
  1000. },
  1001. .result = ACCEPT,
  1002. .result_unpriv = REJECT,
  1003. .errstr_unpriv = "R1 pointer arithmetic",
  1004. },
  1005. {
  1006. "unpriv: neg pointer",
  1007. .insns = {
  1008. BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
  1009. BPF_MOV64_IMM(BPF_REG_0, 0),
  1010. BPF_EXIT_INSN(),
  1011. },
  1012. .result = ACCEPT,
  1013. .result_unpriv = REJECT,
  1014. .errstr_unpriv = "R1 pointer arithmetic",
  1015. },
  1016. {
  1017. "unpriv: cmp pointer with const",
  1018. .insns = {
  1019. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  1020. BPF_MOV64_IMM(BPF_REG_0, 0),
  1021. BPF_EXIT_INSN(),
  1022. },
  1023. .result = ACCEPT,
  1024. .result_unpriv = REJECT,
  1025. .errstr_unpriv = "R1 pointer comparison",
  1026. },
  1027. {
  1028. "unpriv: cmp pointer with pointer",
  1029. .insns = {
  1030. BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  1031. BPF_MOV64_IMM(BPF_REG_0, 0),
  1032. BPF_EXIT_INSN(),
  1033. },
  1034. .result = ACCEPT,
  1035. .result_unpriv = REJECT,
  1036. .errstr_unpriv = "R10 pointer comparison",
  1037. },
  1038. {
  1039. "unpriv: check that printk is disallowed",
  1040. .insns = {
  1041. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1042. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1043. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1044. BPF_MOV64_IMM(BPF_REG_2, 8),
  1045. BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
  1046. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1047. BPF_FUNC_trace_printk),
  1048. BPF_MOV64_IMM(BPF_REG_0, 0),
  1049. BPF_EXIT_INSN(),
  1050. },
  1051. .errstr_unpriv = "unknown func bpf_trace_printk#6",
  1052. .result_unpriv = REJECT,
  1053. .result = ACCEPT,
  1054. },
  1055. {
  1056. "unpriv: pass pointer to helper function",
  1057. .insns = {
  1058. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1059. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1060. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1061. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1062. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  1063. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1064. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1065. BPF_FUNC_map_update_elem),
  1066. BPF_MOV64_IMM(BPF_REG_0, 0),
  1067. BPF_EXIT_INSN(),
  1068. },
  1069. .fixup_map1 = { 3 },
  1070. .errstr_unpriv = "R4 leaks addr",
  1071. .result_unpriv = REJECT,
  1072. .result = ACCEPT,
  1073. },
  1074. {
  1075. "unpriv: indirectly pass pointer on stack to helper function",
  1076. .insns = {
  1077. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1078. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1079. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1080. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1081. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1082. BPF_FUNC_map_lookup_elem),
  1083. BPF_MOV64_IMM(BPF_REG_0, 0),
  1084. BPF_EXIT_INSN(),
  1085. },
  1086. .fixup_map1 = { 3 },
  1087. .errstr = "invalid indirect read from stack off -8+0 size 8",
  1088. .result = REJECT,
  1089. },
  1090. {
  1091. "unpriv: mangle pointer on stack 1",
  1092. .insns = {
  1093. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1094. BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
  1095. BPF_MOV64_IMM(BPF_REG_0, 0),
  1096. BPF_EXIT_INSN(),
  1097. },
  1098. .errstr_unpriv = "attempt to corrupt spilled",
  1099. .result_unpriv = REJECT,
  1100. .result = ACCEPT,
  1101. },
  1102. {
  1103. "unpriv: mangle pointer on stack 2",
  1104. .insns = {
  1105. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1106. BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
  1107. BPF_MOV64_IMM(BPF_REG_0, 0),
  1108. BPF_EXIT_INSN(),
  1109. },
  1110. .errstr_unpriv = "attempt to corrupt spilled",
  1111. .result_unpriv = REJECT,
  1112. .result = ACCEPT,
  1113. },
  1114. {
  1115. "unpriv: read pointer from stack in small chunks",
  1116. .insns = {
  1117. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1118. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
  1119. BPF_MOV64_IMM(BPF_REG_0, 0),
  1120. BPF_EXIT_INSN(),
  1121. },
  1122. .errstr = "invalid size",
  1123. .result = REJECT,
  1124. },
  1125. {
  1126. "unpriv: write pointer into ctx",
  1127. .insns = {
  1128. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
  1129. BPF_MOV64_IMM(BPF_REG_0, 0),
  1130. BPF_EXIT_INSN(),
  1131. },
  1132. .errstr_unpriv = "R1 leaks addr",
  1133. .result_unpriv = REJECT,
  1134. .errstr = "invalid bpf_context access",
  1135. .result = REJECT,
  1136. },
  1137. {
  1138. "unpriv: spill/fill of ctx",
  1139. .insns = {
  1140. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1141. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1142. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1143. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1144. BPF_MOV64_IMM(BPF_REG_0, 0),
  1145. BPF_EXIT_INSN(),
  1146. },
  1147. .result = ACCEPT,
  1148. },
  1149. {
  1150. "unpriv: spill/fill of ctx 2",
  1151. .insns = {
  1152. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1153. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1154. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1155. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1156. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1157. BPF_FUNC_get_hash_recalc),
  1158. BPF_EXIT_INSN(),
  1159. },
  1160. .result = ACCEPT,
  1161. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1162. },
  1163. {
  1164. "unpriv: spill/fill of ctx 3",
  1165. .insns = {
  1166. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1167. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1168. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1169. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
  1170. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1171. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1172. BPF_FUNC_get_hash_recalc),
  1173. BPF_EXIT_INSN(),
  1174. },
  1175. .result = REJECT,
  1176. .errstr = "R1 type=fp expected=ctx",
  1177. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1178. },
  1179. {
  1180. "unpriv: spill/fill of ctx 4",
  1181. .insns = {
  1182. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1183. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1184. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1185. BPF_MOV64_IMM(BPF_REG_0, 1),
  1186. BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
  1187. BPF_REG_0, -8, 0),
  1188. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1189. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1190. BPF_FUNC_get_hash_recalc),
  1191. BPF_EXIT_INSN(),
  1192. },
  1193. .result = REJECT,
  1194. .errstr = "R1 type=inv expected=ctx",
  1195. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1196. },
  1197. {
  1198. "unpriv: spill/fill of different pointers stx",
  1199. .insns = {
  1200. BPF_MOV64_IMM(BPF_REG_3, 42),
  1201. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1202. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1203. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  1204. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1205. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  1206. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
  1207. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  1208. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1209. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1210. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
  1211. offsetof(struct __sk_buff, mark)),
  1212. BPF_MOV64_IMM(BPF_REG_0, 0),
  1213. BPF_EXIT_INSN(),
  1214. },
  1215. .result = REJECT,
  1216. .errstr = "same insn cannot be used with different pointers",
  1217. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1218. },
  1219. {
  1220. "unpriv: spill/fill of different pointers ldx",
  1221. .insns = {
  1222. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1223. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1224. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  1225. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1226. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
  1227. -(__s32)offsetof(struct bpf_perf_event_data,
  1228. sample_period) - 8),
  1229. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
  1230. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  1231. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1232. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1233. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
  1234. offsetof(struct bpf_perf_event_data,
  1235. sample_period)),
  1236. BPF_MOV64_IMM(BPF_REG_0, 0),
  1237. BPF_EXIT_INSN(),
  1238. },
  1239. .result = REJECT,
  1240. .errstr = "same insn cannot be used with different pointers",
  1241. .prog_type = BPF_PROG_TYPE_PERF_EVENT,
  1242. },
  1243. {
  1244. "unpriv: write pointer into map elem value",
  1245. .insns = {
  1246. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1247. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1248. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1249. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1250. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1251. BPF_FUNC_map_lookup_elem),
  1252. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  1253. BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
  1254. BPF_EXIT_INSN(),
  1255. },
  1256. .fixup_map1 = { 3 },
  1257. .errstr_unpriv = "R0 leaks addr",
  1258. .result_unpriv = REJECT,
  1259. .result = ACCEPT,
  1260. },
  1261. {
  1262. "unpriv: partial copy of pointer",
  1263. .insns = {
  1264. BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
  1265. BPF_MOV64_IMM(BPF_REG_0, 0),
  1266. BPF_EXIT_INSN(),
  1267. },
  1268. .errstr_unpriv = "R10 partial copy",
  1269. .result_unpriv = REJECT,
  1270. .result = ACCEPT,
  1271. },
  1272. {
  1273. "unpriv: pass pointer to tail_call",
  1274. .insns = {
  1275. BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
  1276. BPF_LD_MAP_FD(BPF_REG_2, 0),
  1277. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1278. BPF_FUNC_tail_call),
  1279. BPF_MOV64_IMM(BPF_REG_0, 0),
  1280. BPF_EXIT_INSN(),
  1281. },
  1282. .fixup_prog = { 1 },
  1283. .errstr_unpriv = "R3 leaks addr into helper",
  1284. .result_unpriv = REJECT,
  1285. .result = ACCEPT,
  1286. },
  1287. {
  1288. "unpriv: cmp map pointer with zero",
  1289. .insns = {
  1290. BPF_MOV64_IMM(BPF_REG_1, 0),
  1291. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1292. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  1293. BPF_MOV64_IMM(BPF_REG_0, 0),
  1294. BPF_EXIT_INSN(),
  1295. },
  1296. .fixup_map1 = { 1 },
  1297. .errstr_unpriv = "R1 pointer comparison",
  1298. .result_unpriv = REJECT,
  1299. .result = ACCEPT,
  1300. },
  1301. {
  1302. "unpriv: write into frame pointer",
  1303. .insns = {
  1304. BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
  1305. BPF_MOV64_IMM(BPF_REG_0, 0),
  1306. BPF_EXIT_INSN(),
  1307. },
  1308. .errstr = "frame pointer is read only",
  1309. .result = REJECT,
  1310. },
  1311. {
  1312. "unpriv: spill/fill frame pointer",
  1313. .insns = {
  1314. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1315. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1316. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
  1317. BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
  1318. BPF_MOV64_IMM(BPF_REG_0, 0),
  1319. BPF_EXIT_INSN(),
  1320. },
  1321. .errstr = "frame pointer is read only",
  1322. .result = REJECT,
  1323. },
  1324. {
  1325. "unpriv: cmp of frame pointer",
  1326. .insns = {
  1327. BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
  1328. BPF_MOV64_IMM(BPF_REG_0, 0),
  1329. BPF_EXIT_INSN(),
  1330. },
  1331. .errstr_unpriv = "R10 pointer comparison",
  1332. .result_unpriv = REJECT,
  1333. .result = ACCEPT,
  1334. },
  1335. {
  1336. "unpriv: cmp of stack pointer",
  1337. .insns = {
  1338. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1339. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1340. BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
  1341. BPF_MOV64_IMM(BPF_REG_0, 0),
  1342. BPF_EXIT_INSN(),
  1343. },
  1344. .errstr_unpriv = "R2 pointer comparison",
  1345. .result_unpriv = REJECT,
  1346. .result = ACCEPT,
  1347. },
  1348. {
  1349. "unpriv: obfuscate stack pointer",
  1350. .insns = {
  1351. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1352. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1353. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1354. BPF_MOV64_IMM(BPF_REG_0, 0),
  1355. BPF_EXIT_INSN(),
  1356. },
  1357. .errstr_unpriv = "R2 pointer arithmetic",
  1358. .result_unpriv = REJECT,
  1359. .result = ACCEPT,
  1360. },
  1361. {
  1362. "raw_stack: no skb_load_bytes",
  1363. .insns = {
  1364. BPF_MOV64_IMM(BPF_REG_2, 4),
  1365. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1366. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1367. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1368. BPF_MOV64_IMM(BPF_REG_4, 8),
  1369. /* Call to skb_load_bytes() omitted. */
  1370. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1371. BPF_EXIT_INSN(),
  1372. },
  1373. .result = REJECT,
  1374. .errstr = "invalid read from stack off -8+0 size 8",
  1375. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1376. },
  1377. {
  1378. "raw_stack: skb_load_bytes, negative len",
  1379. .insns = {
  1380. BPF_MOV64_IMM(BPF_REG_2, 4),
  1381. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1382. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1383. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1384. BPF_MOV64_IMM(BPF_REG_4, -8),
  1385. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1386. BPF_FUNC_skb_load_bytes),
  1387. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1388. BPF_EXIT_INSN(),
  1389. },
  1390. .result = REJECT,
  1391. .errstr = "invalid stack type R3",
  1392. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1393. },
  1394. {
  1395. "raw_stack: skb_load_bytes, negative len 2",
  1396. .insns = {
  1397. BPF_MOV64_IMM(BPF_REG_2, 4),
  1398. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1399. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1400. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1401. BPF_MOV64_IMM(BPF_REG_4, ~0),
  1402. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1403. BPF_FUNC_skb_load_bytes),
  1404. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1405. BPF_EXIT_INSN(),
  1406. },
  1407. .result = REJECT,
  1408. .errstr = "invalid stack type R3",
  1409. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1410. },
  1411. {
  1412. "raw_stack: skb_load_bytes, zero len",
  1413. .insns = {
  1414. BPF_MOV64_IMM(BPF_REG_2, 4),
  1415. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1416. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1417. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1418. BPF_MOV64_IMM(BPF_REG_4, 0),
  1419. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1420. BPF_FUNC_skb_load_bytes),
  1421. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1422. BPF_EXIT_INSN(),
  1423. },
  1424. .result = REJECT,
  1425. .errstr = "invalid stack type R3",
  1426. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1427. },
  1428. {
  1429. "raw_stack: skb_load_bytes, no init",
  1430. .insns = {
  1431. BPF_MOV64_IMM(BPF_REG_2, 4),
  1432. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1433. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1434. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1435. BPF_MOV64_IMM(BPF_REG_4, 8),
  1436. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1437. BPF_FUNC_skb_load_bytes),
  1438. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1439. BPF_EXIT_INSN(),
  1440. },
  1441. .result = ACCEPT,
  1442. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1443. },
  1444. {
  1445. "raw_stack: skb_load_bytes, init",
  1446. .insns = {
  1447. BPF_MOV64_IMM(BPF_REG_2, 4),
  1448. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1449. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1450. BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
  1451. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1452. BPF_MOV64_IMM(BPF_REG_4, 8),
  1453. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1454. BPF_FUNC_skb_load_bytes),
  1455. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1456. BPF_EXIT_INSN(),
  1457. },
  1458. .result = ACCEPT,
  1459. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1460. },
  1461. {
  1462. "raw_stack: skb_load_bytes, spilled regs around bounds",
  1463. .insns = {
  1464. BPF_MOV64_IMM(BPF_REG_2, 4),
  1465. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1466. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  1467. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
  1468. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
  1469. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1470. BPF_MOV64_IMM(BPF_REG_4, 8),
  1471. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1472. BPF_FUNC_skb_load_bytes),
  1473. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
  1474. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
  1475. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1476. offsetof(struct __sk_buff, mark)),
  1477. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  1478. offsetof(struct __sk_buff, priority)),
  1479. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  1480. BPF_EXIT_INSN(),
  1481. },
  1482. .result = ACCEPT,
  1483. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1484. },
  1485. {
  1486. "raw_stack: skb_load_bytes, spilled regs corruption",
  1487. .insns = {
  1488. BPF_MOV64_IMM(BPF_REG_2, 4),
  1489. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1490. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1491. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1492. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1493. BPF_MOV64_IMM(BPF_REG_4, 8),
  1494. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1495. BPF_FUNC_skb_load_bytes),
  1496. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1497. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1498. offsetof(struct __sk_buff, mark)),
  1499. BPF_EXIT_INSN(),
  1500. },
  1501. .result = REJECT,
  1502. .errstr = "R0 invalid mem access 'inv'",
  1503. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1504. },
  1505. {
  1506. "raw_stack: skb_load_bytes, spilled regs corruption 2",
  1507. .insns = {
  1508. BPF_MOV64_IMM(BPF_REG_2, 4),
  1509. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1510. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  1511. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
  1512. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1513. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
  1514. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1515. BPF_MOV64_IMM(BPF_REG_4, 8),
  1516. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1517. BPF_FUNC_skb_load_bytes),
  1518. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
  1519. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
  1520. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
  1521. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1522. offsetof(struct __sk_buff, mark)),
  1523. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  1524. offsetof(struct __sk_buff, priority)),
  1525. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  1526. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
  1527. offsetof(struct __sk_buff, pkt_type)),
  1528. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
  1529. BPF_EXIT_INSN(),
  1530. },
  1531. .result = REJECT,
  1532. .errstr = "R3 invalid mem access 'inv'",
  1533. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1534. },
  1535. {
  1536. "raw_stack: skb_load_bytes, spilled regs + data",
  1537. .insns = {
  1538. BPF_MOV64_IMM(BPF_REG_2, 4),
  1539. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1540. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  1541. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
  1542. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1543. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
  1544. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1545. BPF_MOV64_IMM(BPF_REG_4, 8),
  1546. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1547. BPF_FUNC_skb_load_bytes),
  1548. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
  1549. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
  1550. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
  1551. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1552. offsetof(struct __sk_buff, mark)),
  1553. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  1554. offsetof(struct __sk_buff, priority)),
  1555. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  1556. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
  1557. BPF_EXIT_INSN(),
  1558. },
  1559. .result = ACCEPT,
  1560. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1561. },
  1562. {
  1563. "raw_stack: skb_load_bytes, invalid access 1",
  1564. .insns = {
  1565. BPF_MOV64_IMM(BPF_REG_2, 4),
  1566. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1567. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
  1568. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1569. BPF_MOV64_IMM(BPF_REG_4, 8),
  1570. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1571. BPF_FUNC_skb_load_bytes),
  1572. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1573. BPF_EXIT_INSN(),
  1574. },
  1575. .result = REJECT,
  1576. .errstr = "invalid stack type R3 off=-513 access_size=8",
  1577. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1578. },
  1579. {
  1580. "raw_stack: skb_load_bytes, invalid access 2",
  1581. .insns = {
  1582. BPF_MOV64_IMM(BPF_REG_2, 4),
  1583. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1584. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
  1585. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1586. BPF_MOV64_IMM(BPF_REG_4, 8),
  1587. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1588. BPF_FUNC_skb_load_bytes),
  1589. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1590. BPF_EXIT_INSN(),
  1591. },
  1592. .result = REJECT,
  1593. .errstr = "invalid stack type R3 off=-1 access_size=8",
  1594. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1595. },
  1596. {
  1597. "raw_stack: skb_load_bytes, invalid access 3",
  1598. .insns = {
  1599. BPF_MOV64_IMM(BPF_REG_2, 4),
  1600. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1601. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
  1602. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1603. BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
  1604. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1605. BPF_FUNC_skb_load_bytes),
  1606. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1607. BPF_EXIT_INSN(),
  1608. },
  1609. .result = REJECT,
  1610. .errstr = "invalid stack type R3 off=-1 access_size=-1",
  1611. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1612. },
  1613. {
  1614. "raw_stack: skb_load_bytes, invalid access 4",
  1615. .insns = {
  1616. BPF_MOV64_IMM(BPF_REG_2, 4),
  1617. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1618. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
  1619. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1620. BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
  1621. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1622. BPF_FUNC_skb_load_bytes),
  1623. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1624. BPF_EXIT_INSN(),
  1625. },
  1626. .result = REJECT,
  1627. .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
  1628. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1629. },
  1630. {
  1631. "raw_stack: skb_load_bytes, invalid access 5",
  1632. .insns = {
  1633. BPF_MOV64_IMM(BPF_REG_2, 4),
  1634. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1635. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  1636. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1637. BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
  1638. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1639. BPF_FUNC_skb_load_bytes),
  1640. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1641. BPF_EXIT_INSN(),
  1642. },
  1643. .result = REJECT,
  1644. .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
  1645. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1646. },
  1647. {
  1648. "raw_stack: skb_load_bytes, invalid access 6",
  1649. .insns = {
  1650. BPF_MOV64_IMM(BPF_REG_2, 4),
  1651. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1652. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  1653. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1654. BPF_MOV64_IMM(BPF_REG_4, 0),
  1655. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1656. BPF_FUNC_skb_load_bytes),
  1657. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1658. BPF_EXIT_INSN(),
  1659. },
  1660. .result = REJECT,
  1661. .errstr = "invalid stack type R3 off=-512 access_size=0",
  1662. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1663. },
  1664. {
  1665. "raw_stack: skb_load_bytes, large access",
  1666. .insns = {
  1667. BPF_MOV64_IMM(BPF_REG_2, 4),
  1668. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1669. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  1670. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1671. BPF_MOV64_IMM(BPF_REG_4, 512),
  1672. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1673. BPF_FUNC_skb_load_bytes),
  1674. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1675. BPF_EXIT_INSN(),
  1676. },
  1677. .result = ACCEPT,
  1678. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1679. },
  1680. {
  1681. "direct packet access: test1",
  1682. .insns = {
  1683. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1684. offsetof(struct __sk_buff, data)),
  1685. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1686. offsetof(struct __sk_buff, data_end)),
  1687. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1688. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1689. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  1690. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1691. BPF_MOV64_IMM(BPF_REG_0, 0),
  1692. BPF_EXIT_INSN(),
  1693. },
  1694. .result = ACCEPT,
  1695. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1696. },
  1697. {
  1698. "direct packet access: test2",
  1699. .insns = {
  1700. BPF_MOV64_IMM(BPF_REG_0, 1),
  1701. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
  1702. offsetof(struct __sk_buff, data_end)),
  1703. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1704. offsetof(struct __sk_buff, data)),
  1705. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  1706. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  1707. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
  1708. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
  1709. BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
  1710. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
  1711. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1712. offsetof(struct __sk_buff, data)),
  1713. BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
  1714. BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
  1715. BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
  1716. BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
  1717. BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
  1718. BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
  1719. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
  1720. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  1721. offsetof(struct __sk_buff, data_end)),
  1722. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  1723. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
  1724. BPF_MOV64_IMM(BPF_REG_0, 0),
  1725. BPF_EXIT_INSN(),
  1726. },
  1727. .result = ACCEPT,
  1728. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1729. },
  1730. {
  1731. "direct packet access: test3",
  1732. .insns = {
  1733. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1734. offsetof(struct __sk_buff, data)),
  1735. BPF_MOV64_IMM(BPF_REG_0, 0),
  1736. BPF_EXIT_INSN(),
  1737. },
  1738. .errstr = "invalid bpf_context access off=76",
  1739. .result = REJECT,
  1740. .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
  1741. },
  1742. {
  1743. "direct packet access: test4 (write)",
  1744. .insns = {
  1745. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1746. offsetof(struct __sk_buff, data)),
  1747. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1748. offsetof(struct __sk_buff, data_end)),
  1749. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1750. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1751. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  1752. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  1753. BPF_MOV64_IMM(BPF_REG_0, 0),
  1754. BPF_EXIT_INSN(),
  1755. },
  1756. .result = ACCEPT,
  1757. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1758. },
  1759. {
  1760. "direct packet access: test5 (pkt_end >= reg, good access)",
  1761. .insns = {
  1762. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1763. offsetof(struct __sk_buff, data)),
  1764. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1765. offsetof(struct __sk_buff, data_end)),
  1766. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1767. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1768. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
  1769. BPF_MOV64_IMM(BPF_REG_0, 1),
  1770. BPF_EXIT_INSN(),
  1771. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1772. BPF_MOV64_IMM(BPF_REG_0, 0),
  1773. BPF_EXIT_INSN(),
  1774. },
  1775. .result = ACCEPT,
  1776. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1777. },
  1778. {
  1779. "direct packet access: test6 (pkt_end >= reg, bad access)",
  1780. .insns = {
  1781. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1782. offsetof(struct __sk_buff, data)),
  1783. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1784. offsetof(struct __sk_buff, data_end)),
  1785. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1786. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1787. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
  1788. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1789. BPF_MOV64_IMM(BPF_REG_0, 1),
  1790. BPF_EXIT_INSN(),
  1791. BPF_MOV64_IMM(BPF_REG_0, 0),
  1792. BPF_EXIT_INSN(),
  1793. },
  1794. .errstr = "invalid access to packet",
  1795. .result = REJECT,
  1796. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1797. },
  1798. {
  1799. "direct packet access: test7 (pkt_end >= reg, both accesses)",
  1800. .insns = {
  1801. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1802. offsetof(struct __sk_buff, data)),
  1803. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1804. offsetof(struct __sk_buff, data_end)),
  1805. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1806. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1807. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
  1808. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1809. BPF_MOV64_IMM(BPF_REG_0, 1),
  1810. BPF_EXIT_INSN(),
  1811. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1812. BPF_MOV64_IMM(BPF_REG_0, 0),
  1813. BPF_EXIT_INSN(),
  1814. },
  1815. .errstr = "invalid access to packet",
  1816. .result = REJECT,
  1817. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1818. },
  1819. {
  1820. "direct packet access: test8 (double test, variant 1)",
  1821. .insns = {
  1822. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1823. offsetof(struct __sk_buff, data)),
  1824. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1825. offsetof(struct __sk_buff, data_end)),
  1826. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1827. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1828. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
  1829. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  1830. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1831. BPF_MOV64_IMM(BPF_REG_0, 1),
  1832. BPF_EXIT_INSN(),
  1833. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1834. BPF_MOV64_IMM(BPF_REG_0, 0),
  1835. BPF_EXIT_INSN(),
  1836. },
  1837. .result = ACCEPT,
  1838. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1839. },
  1840. {
  1841. "direct packet access: test9 (double test, variant 2)",
  1842. .insns = {
  1843. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1844. offsetof(struct __sk_buff, data)),
  1845. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1846. offsetof(struct __sk_buff, data_end)),
  1847. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1848. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1849. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
  1850. BPF_MOV64_IMM(BPF_REG_0, 1),
  1851. BPF_EXIT_INSN(),
  1852. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  1853. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1854. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1855. BPF_MOV64_IMM(BPF_REG_0, 0),
  1856. BPF_EXIT_INSN(),
  1857. },
  1858. .result = ACCEPT,
  1859. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1860. },
  1861. {
  1862. "direct packet access: test10 (write invalid)",
  1863. .insns = {
  1864. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1865. offsetof(struct __sk_buff, data)),
  1866. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1867. offsetof(struct __sk_buff, data_end)),
  1868. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1869. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1870. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
  1871. BPF_MOV64_IMM(BPF_REG_0, 0),
  1872. BPF_EXIT_INSN(),
  1873. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  1874. BPF_MOV64_IMM(BPF_REG_0, 0),
  1875. BPF_EXIT_INSN(),
  1876. },
  1877. .errstr = "invalid access to packet",
  1878. .result = REJECT,
  1879. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1880. },
  1881. {
  1882. "helper access to packet: test1, valid packet_ptr range",
  1883. .insns = {
  1884. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1885. offsetof(struct xdp_md, data)),
  1886. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1887. offsetof(struct xdp_md, data_end)),
  1888. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  1889. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  1890. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
  1891. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1892. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  1893. BPF_MOV64_IMM(BPF_REG_4, 0),
  1894. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1895. BPF_FUNC_map_update_elem),
  1896. BPF_MOV64_IMM(BPF_REG_0, 0),
  1897. BPF_EXIT_INSN(),
  1898. },
  1899. .fixup_map1 = { 5 },
  1900. .result_unpriv = ACCEPT,
  1901. .result = ACCEPT,
  1902. .prog_type = BPF_PROG_TYPE_XDP,
  1903. },
  1904. {
  1905. "helper access to packet: test2, unchecked packet_ptr",
  1906. .insns = {
  1907. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1908. offsetof(struct xdp_md, data)),
  1909. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1910. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1911. BPF_FUNC_map_lookup_elem),
  1912. BPF_MOV64_IMM(BPF_REG_0, 0),
  1913. BPF_EXIT_INSN(),
  1914. },
  1915. .fixup_map1 = { 1 },
  1916. .result = REJECT,
  1917. .errstr = "invalid access to packet",
  1918. .prog_type = BPF_PROG_TYPE_XDP,
  1919. },
  1920. {
  1921. "helper access to packet: test3, variable add",
  1922. .insns = {
  1923. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1924. offsetof(struct xdp_md, data)),
  1925. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1926. offsetof(struct xdp_md, data_end)),
  1927. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1928. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
  1929. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
  1930. BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
  1931. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1932. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
  1933. BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
  1934. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
  1935. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
  1936. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1937. BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
  1938. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1939. BPF_FUNC_map_lookup_elem),
  1940. BPF_MOV64_IMM(BPF_REG_0, 0),
  1941. BPF_EXIT_INSN(),
  1942. },
  1943. .fixup_map1 = { 11 },
  1944. .result = ACCEPT,
  1945. .prog_type = BPF_PROG_TYPE_XDP,
  1946. },
  1947. {
  1948. "helper access to packet: test4, packet_ptr with bad range",
  1949. .insns = {
  1950. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1951. offsetof(struct xdp_md, data)),
  1952. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1953. offsetof(struct xdp_md, data_end)),
  1954. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1955. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  1956. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
  1957. BPF_MOV64_IMM(BPF_REG_0, 0),
  1958. BPF_EXIT_INSN(),
  1959. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1960. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1961. BPF_FUNC_map_lookup_elem),
  1962. BPF_MOV64_IMM(BPF_REG_0, 0),
  1963. BPF_EXIT_INSN(),
  1964. },
  1965. .fixup_map1 = { 7 },
  1966. .result = REJECT,
  1967. .errstr = "invalid access to packet",
  1968. .prog_type = BPF_PROG_TYPE_XDP,
  1969. },
  1970. {
  1971. "helper access to packet: test5, packet_ptr with too short range",
  1972. .insns = {
  1973. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1974. offsetof(struct xdp_md, data)),
  1975. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1976. offsetof(struct xdp_md, data_end)),
  1977. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  1978. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1979. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
  1980. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
  1981. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1982. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1983. BPF_FUNC_map_lookup_elem),
  1984. BPF_MOV64_IMM(BPF_REG_0, 0),
  1985. BPF_EXIT_INSN(),
  1986. },
  1987. .fixup_map1 = { 6 },
  1988. .result = REJECT,
  1989. .errstr = "invalid access to packet",
  1990. .prog_type = BPF_PROG_TYPE_XDP,
  1991. },
  1992. {
  1993. "helper access to packet: test6, cls valid packet_ptr range",
  1994. .insns = {
  1995. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1996. offsetof(struct __sk_buff, data)),
  1997. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1998. offsetof(struct __sk_buff, data_end)),
  1999. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  2000. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  2001. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
  2002. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2003. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  2004. BPF_MOV64_IMM(BPF_REG_4, 0),
  2005. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2006. BPF_FUNC_map_update_elem),
  2007. BPF_MOV64_IMM(BPF_REG_0, 0),
  2008. BPF_EXIT_INSN(),
  2009. },
  2010. .fixup_map1 = { 5 },
  2011. .result = ACCEPT,
  2012. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2013. },
  2014. {
  2015. "helper access to packet: test7, cls unchecked packet_ptr",
  2016. .insns = {
  2017. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2018. offsetof(struct __sk_buff, data)),
  2019. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2020. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2021. BPF_FUNC_map_lookup_elem),
  2022. BPF_MOV64_IMM(BPF_REG_0, 0),
  2023. BPF_EXIT_INSN(),
  2024. },
  2025. .fixup_map1 = { 1 },
  2026. .result = REJECT,
  2027. .errstr = "invalid access to packet",
  2028. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2029. },
  2030. {
  2031. "helper access to packet: test8, cls variable add",
  2032. .insns = {
  2033. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2034. offsetof(struct __sk_buff, data)),
  2035. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2036. offsetof(struct __sk_buff, data_end)),
  2037. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2038. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
  2039. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
  2040. BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
  2041. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2042. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
  2043. BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
  2044. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
  2045. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
  2046. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2047. BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
  2048. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2049. BPF_FUNC_map_lookup_elem),
  2050. BPF_MOV64_IMM(BPF_REG_0, 0),
  2051. BPF_EXIT_INSN(),
  2052. },
  2053. .fixup_map1 = { 11 },
  2054. .result = ACCEPT,
  2055. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2056. },
  2057. {
  2058. "helper access to packet: test9, cls packet_ptr with bad range",
  2059. .insns = {
  2060. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2061. offsetof(struct __sk_buff, data)),
  2062. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2063. offsetof(struct __sk_buff, data_end)),
  2064. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2065. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  2066. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
  2067. BPF_MOV64_IMM(BPF_REG_0, 0),
  2068. BPF_EXIT_INSN(),
  2069. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2070. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2071. BPF_FUNC_map_lookup_elem),
  2072. BPF_MOV64_IMM(BPF_REG_0, 0),
  2073. BPF_EXIT_INSN(),
  2074. },
  2075. .fixup_map1 = { 7 },
  2076. .result = REJECT,
  2077. .errstr = "invalid access to packet",
  2078. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2079. },
  2080. {
  2081. "helper access to packet: test10, cls packet_ptr with too short range",
  2082. .insns = {
  2083. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2084. offsetof(struct __sk_buff, data)),
  2085. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2086. offsetof(struct __sk_buff, data_end)),
  2087. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  2088. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2089. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
  2090. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
  2091. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2092. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2093. BPF_FUNC_map_lookup_elem),
  2094. BPF_MOV64_IMM(BPF_REG_0, 0),
  2095. BPF_EXIT_INSN(),
  2096. },
  2097. .fixup_map1 = { 6 },
  2098. .result = REJECT,
  2099. .errstr = "invalid access to packet",
  2100. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2101. },
  2102. {
  2103. "helper access to packet: test11, cls unsuitable helper 1",
  2104. .insns = {
  2105. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2106. offsetof(struct __sk_buff, data)),
  2107. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2108. offsetof(struct __sk_buff, data_end)),
  2109. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2110. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2111. BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
  2112. BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
  2113. BPF_MOV64_IMM(BPF_REG_2, 0),
  2114. BPF_MOV64_IMM(BPF_REG_4, 42),
  2115. BPF_MOV64_IMM(BPF_REG_5, 0),
  2116. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2117. BPF_FUNC_skb_store_bytes),
  2118. BPF_MOV64_IMM(BPF_REG_0, 0),
  2119. BPF_EXIT_INSN(),
  2120. },
  2121. .result = REJECT,
  2122. .errstr = "helper access to the packet",
  2123. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2124. },
  2125. {
  2126. "helper access to packet: test12, cls unsuitable helper 2",
  2127. .insns = {
  2128. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2129. offsetof(struct __sk_buff, data)),
  2130. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2131. offsetof(struct __sk_buff, data_end)),
  2132. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2133. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
  2134. BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
  2135. BPF_MOV64_IMM(BPF_REG_2, 0),
  2136. BPF_MOV64_IMM(BPF_REG_4, 4),
  2137. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2138. BPF_FUNC_skb_load_bytes),
  2139. BPF_MOV64_IMM(BPF_REG_0, 0),
  2140. BPF_EXIT_INSN(),
  2141. },
  2142. .result = REJECT,
  2143. .errstr = "helper access to the packet",
  2144. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2145. },
  2146. {
  2147. "helper access to packet: test13, cls helper ok",
  2148. .insns = {
  2149. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2150. offsetof(struct __sk_buff, data)),
  2151. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2152. offsetof(struct __sk_buff, data_end)),
  2153. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2154. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2155. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2156. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2157. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2158. BPF_MOV64_IMM(BPF_REG_2, 4),
  2159. BPF_MOV64_IMM(BPF_REG_3, 0),
  2160. BPF_MOV64_IMM(BPF_REG_4, 0),
  2161. BPF_MOV64_IMM(BPF_REG_5, 0),
  2162. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2163. BPF_FUNC_csum_diff),
  2164. BPF_MOV64_IMM(BPF_REG_0, 0),
  2165. BPF_EXIT_INSN(),
  2166. },
  2167. .result = ACCEPT,
  2168. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2169. },
  2170. {
  2171. "helper access to packet: test14, cls helper fail sub",
  2172. .insns = {
  2173. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2174. offsetof(struct __sk_buff, data)),
  2175. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2176. offsetof(struct __sk_buff, data_end)),
  2177. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2178. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2179. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2180. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2181. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
  2182. BPF_MOV64_IMM(BPF_REG_2, 4),
  2183. BPF_MOV64_IMM(BPF_REG_3, 0),
  2184. BPF_MOV64_IMM(BPF_REG_4, 0),
  2185. BPF_MOV64_IMM(BPF_REG_5, 0),
  2186. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2187. BPF_FUNC_csum_diff),
  2188. BPF_MOV64_IMM(BPF_REG_0, 0),
  2189. BPF_EXIT_INSN(),
  2190. },
  2191. .result = REJECT,
  2192. .errstr = "type=inv expected=fp",
  2193. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2194. },
  2195. {
  2196. "helper access to packet: test15, cls helper fail range 1",
  2197. .insns = {
  2198. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2199. offsetof(struct __sk_buff, data)),
  2200. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2201. offsetof(struct __sk_buff, data_end)),
  2202. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2203. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2204. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2205. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2206. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2207. BPF_MOV64_IMM(BPF_REG_2, 8),
  2208. BPF_MOV64_IMM(BPF_REG_3, 0),
  2209. BPF_MOV64_IMM(BPF_REG_4, 0),
  2210. BPF_MOV64_IMM(BPF_REG_5, 0),
  2211. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2212. BPF_FUNC_csum_diff),
  2213. BPF_MOV64_IMM(BPF_REG_0, 0),
  2214. BPF_EXIT_INSN(),
  2215. },
  2216. .result = REJECT,
  2217. .errstr = "invalid access to packet",
  2218. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2219. },
  2220. {
  2221. "helper access to packet: test16, cls helper fail range 2",
  2222. .insns = {
  2223. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2224. offsetof(struct __sk_buff, data)),
  2225. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2226. offsetof(struct __sk_buff, data_end)),
  2227. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2228. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2229. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2230. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2231. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2232. BPF_MOV64_IMM(BPF_REG_2, -9),
  2233. BPF_MOV64_IMM(BPF_REG_3, 0),
  2234. BPF_MOV64_IMM(BPF_REG_4, 0),
  2235. BPF_MOV64_IMM(BPF_REG_5, 0),
  2236. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2237. BPF_FUNC_csum_diff),
  2238. BPF_MOV64_IMM(BPF_REG_0, 0),
  2239. BPF_EXIT_INSN(),
  2240. },
  2241. .result = REJECT,
  2242. .errstr = "invalid access to packet",
  2243. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2244. },
  2245. {
  2246. "helper access to packet: test17, cls helper fail range 3",
  2247. .insns = {
  2248. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2249. offsetof(struct __sk_buff, data)),
  2250. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2251. offsetof(struct __sk_buff, data_end)),
  2252. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2253. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2254. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2255. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2256. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2257. BPF_MOV64_IMM(BPF_REG_2, ~0),
  2258. BPF_MOV64_IMM(BPF_REG_3, 0),
  2259. BPF_MOV64_IMM(BPF_REG_4, 0),
  2260. BPF_MOV64_IMM(BPF_REG_5, 0),
  2261. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2262. BPF_FUNC_csum_diff),
  2263. BPF_MOV64_IMM(BPF_REG_0, 0),
  2264. BPF_EXIT_INSN(),
  2265. },
  2266. .result = REJECT,
  2267. .errstr = "invalid access to packet",
  2268. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2269. },
  2270. {
  2271. "helper access to packet: test18, cls helper fail range zero",
  2272. .insns = {
  2273. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2274. offsetof(struct __sk_buff, data)),
  2275. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2276. offsetof(struct __sk_buff, data_end)),
  2277. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2278. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2279. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2280. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2281. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2282. BPF_MOV64_IMM(BPF_REG_2, 0),
  2283. BPF_MOV64_IMM(BPF_REG_3, 0),
  2284. BPF_MOV64_IMM(BPF_REG_4, 0),
  2285. BPF_MOV64_IMM(BPF_REG_5, 0),
  2286. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2287. BPF_FUNC_csum_diff),
  2288. BPF_MOV64_IMM(BPF_REG_0, 0),
  2289. BPF_EXIT_INSN(),
  2290. },
  2291. .result = REJECT,
  2292. .errstr = "invalid access to packet",
  2293. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2294. },
  2295. {
  2296. "helper access to packet: test19, pkt end as input",
  2297. .insns = {
  2298. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2299. offsetof(struct __sk_buff, data)),
  2300. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2301. offsetof(struct __sk_buff, data_end)),
  2302. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2303. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2304. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2305. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2306. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  2307. BPF_MOV64_IMM(BPF_REG_2, 4),
  2308. BPF_MOV64_IMM(BPF_REG_3, 0),
  2309. BPF_MOV64_IMM(BPF_REG_4, 0),
  2310. BPF_MOV64_IMM(BPF_REG_5, 0),
  2311. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2312. BPF_FUNC_csum_diff),
  2313. BPF_MOV64_IMM(BPF_REG_0, 0),
  2314. BPF_EXIT_INSN(),
  2315. },
  2316. .result = REJECT,
  2317. .errstr = "R1 type=pkt_end expected=fp",
  2318. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2319. },
  2320. {
  2321. "helper access to packet: test20, wrong reg",
  2322. .insns = {
  2323. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2324. offsetof(struct __sk_buff, data)),
  2325. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2326. offsetof(struct __sk_buff, data_end)),
  2327. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2328. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2329. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2330. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2331. BPF_MOV64_IMM(BPF_REG_2, 4),
  2332. BPF_MOV64_IMM(BPF_REG_3, 0),
  2333. BPF_MOV64_IMM(BPF_REG_4, 0),
  2334. BPF_MOV64_IMM(BPF_REG_5, 0),
  2335. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2336. BPF_FUNC_csum_diff),
  2337. BPF_MOV64_IMM(BPF_REG_0, 0),
  2338. BPF_EXIT_INSN(),
  2339. },
  2340. .result = REJECT,
  2341. .errstr = "invalid access to packet",
  2342. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2343. },
  2344. {
  2345. "valid map access into an array with a constant",
  2346. .insns = {
  2347. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2348. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2349. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2350. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2351. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2352. BPF_FUNC_map_lookup_elem),
  2353. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  2354. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  2355. offsetof(struct test_val, foo)),
  2356. BPF_EXIT_INSN(),
  2357. },
  2358. .fixup_map2 = { 3 },
  2359. .errstr_unpriv = "R0 leaks addr",
  2360. .result_unpriv = REJECT,
  2361. .result = ACCEPT,
  2362. },
  2363. {
  2364. "valid map access into an array with a register",
  2365. .insns = {
  2366. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2367. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2368. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2369. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2370. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2371. BPF_FUNC_map_lookup_elem),
  2372. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  2373. BPF_MOV64_IMM(BPF_REG_1, 4),
  2374. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  2375. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2376. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  2377. offsetof(struct test_val, foo)),
  2378. BPF_EXIT_INSN(),
  2379. },
  2380. .fixup_map2 = { 3 },
  2381. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  2382. .result_unpriv = REJECT,
  2383. .result = ACCEPT,
  2384. },
  2385. {
  2386. "valid map access into an array with a variable",
  2387. .insns = {
  2388. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2389. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2390. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2391. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2392. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2393. BPF_FUNC_map_lookup_elem),
  2394. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  2395. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  2396. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
  2397. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  2398. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2399. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  2400. offsetof(struct test_val, foo)),
  2401. BPF_EXIT_INSN(),
  2402. },
  2403. .fixup_map2 = { 3 },
  2404. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  2405. .result_unpriv = REJECT,
  2406. .result = ACCEPT,
  2407. },
  2408. {
  2409. "valid map access into an array with a signed variable",
  2410. .insns = {
  2411. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2412. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2413. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2414. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2415. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2416. BPF_FUNC_map_lookup_elem),
  2417. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  2418. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  2419. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
  2420. BPF_MOV32_IMM(BPF_REG_1, 0),
  2421. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
  2422. BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
  2423. BPF_MOV32_IMM(BPF_REG_1, 0),
  2424. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  2425. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2426. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  2427. offsetof(struct test_val, foo)),
  2428. BPF_EXIT_INSN(),
  2429. },
  2430. .fixup_map2 = { 3 },
  2431. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  2432. .result_unpriv = REJECT,
  2433. .result = ACCEPT,
  2434. },
  2435. {
  2436. "invalid map access into an array with a constant",
  2437. .insns = {
  2438. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2439. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2440. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2441. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2442. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2443. BPF_FUNC_map_lookup_elem),
  2444. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  2445. BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
  2446. offsetof(struct test_val, foo)),
  2447. BPF_EXIT_INSN(),
  2448. },
  2449. .fixup_map2 = { 3 },
  2450. .errstr = "invalid access to map value, value_size=48 off=48 size=8",
  2451. .result = REJECT,
  2452. },
  2453. {
  2454. "invalid map access into an array with a register",
  2455. .insns = {
  2456. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2457. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2458. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2459. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2460. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2461. BPF_FUNC_map_lookup_elem),
  2462. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  2463. BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
  2464. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  2465. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2466. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  2467. offsetof(struct test_val, foo)),
  2468. BPF_EXIT_INSN(),
  2469. },
  2470. .fixup_map2 = { 3 },
  2471. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  2472. .errstr = "R0 min value is outside of the array range",
  2473. .result_unpriv = REJECT,
  2474. .result = REJECT,
  2475. },
  2476. {
  2477. "invalid map access into an array with a variable",
  2478. .insns = {
  2479. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2480. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2481. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2482. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2483. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2484. BPF_FUNC_map_lookup_elem),
  2485. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  2486. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  2487. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  2488. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2489. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  2490. offsetof(struct test_val, foo)),
  2491. BPF_EXIT_INSN(),
  2492. },
  2493. .fixup_map2 = { 3 },
  2494. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  2495. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  2496. .result_unpriv = REJECT,
  2497. .result = REJECT,
  2498. },
  2499. {
  2500. "invalid map access into an array with no floor check",
  2501. .insns = {
  2502. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2503. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2504. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2505. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2506. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2507. BPF_FUNC_map_lookup_elem),
  2508. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  2509. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  2510. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
  2511. BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
  2512. BPF_MOV32_IMM(BPF_REG_1, 0),
  2513. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  2514. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2515. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  2516. offsetof(struct test_val, foo)),
  2517. BPF_EXIT_INSN(),
  2518. },
  2519. .fixup_map2 = { 3 },
  2520. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  2521. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  2522. .result_unpriv = REJECT,
  2523. .result = REJECT,
  2524. },
  2525. {
  2526. "invalid map access into an array with a invalid max check",
  2527. .insns = {
  2528. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2529. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2530. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2531. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2532. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2533. BPF_FUNC_map_lookup_elem),
  2534. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  2535. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  2536. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
  2537. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  2538. BPF_MOV32_IMM(BPF_REG_1, 0),
  2539. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  2540. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2541. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  2542. offsetof(struct test_val, foo)),
  2543. BPF_EXIT_INSN(),
  2544. },
  2545. .fixup_map2 = { 3 },
  2546. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  2547. .errstr = "invalid access to map value, value_size=48 off=44 size=8",
  2548. .result_unpriv = REJECT,
  2549. .result = REJECT,
  2550. },
  2551. {
  2552. "invalid map access into an array with a invalid max check",
  2553. .insns = {
  2554. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2555. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2556. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2557. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2558. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2559. BPF_FUNC_map_lookup_elem),
  2560. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
  2561. BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
  2562. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2563. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2564. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2565. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2566. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2567. BPF_FUNC_map_lookup_elem),
  2568. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  2569. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
  2570. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  2571. offsetof(struct test_val, foo)),
  2572. BPF_EXIT_INSN(),
  2573. },
  2574. .fixup_map2 = { 3, 11 },
  2575. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  2576. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  2577. .result_unpriv = REJECT,
  2578. .result = REJECT,
  2579. },
  2580. {
  2581. "multiple registers share map_lookup_elem result",
  2582. .insns = {
  2583. BPF_MOV64_IMM(BPF_REG_1, 10),
  2584. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  2585. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2586. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2587. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2588. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2589. BPF_FUNC_map_lookup_elem),
  2590. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  2591. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  2592. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  2593. BPF_EXIT_INSN(),
  2594. },
  2595. .fixup_map1 = { 4 },
  2596. .result = ACCEPT,
  2597. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  2598. },
  2599. {
  2600. "invalid memory access with multiple map_lookup_elem calls",
  2601. .insns = {
  2602. BPF_MOV64_IMM(BPF_REG_1, 10),
  2603. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  2604. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2605. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2606. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2607. BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
  2608. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  2609. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2610. BPF_FUNC_map_lookup_elem),
  2611. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  2612. BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
  2613. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  2614. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2615. BPF_FUNC_map_lookup_elem),
  2616. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  2617. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  2618. BPF_EXIT_INSN(),
  2619. },
  2620. .fixup_map1 = { 4 },
  2621. .result = REJECT,
  2622. .errstr = "R4 !read_ok",
  2623. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  2624. },
  2625. {
  2626. "valid indirect map_lookup_elem access with 2nd lookup in branch",
  2627. .insns = {
  2628. BPF_MOV64_IMM(BPF_REG_1, 10),
  2629. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  2630. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2631. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2632. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2633. BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
  2634. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  2635. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2636. BPF_FUNC_map_lookup_elem),
  2637. BPF_MOV64_IMM(BPF_REG_2, 10),
  2638. BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
  2639. BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
  2640. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  2641. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2642. BPF_FUNC_map_lookup_elem),
  2643. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  2644. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  2645. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  2646. BPF_EXIT_INSN(),
  2647. },
  2648. .fixup_map1 = { 4 },
  2649. .result = ACCEPT,
  2650. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  2651. },
  2652. {
  2653. "multiple registers share map_lookup_elem bad reg type",
  2654. .insns = {
  2655. BPF_MOV64_IMM(BPF_REG_1, 10),
  2656. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  2657. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2658. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2659. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2660. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2661. BPF_FUNC_map_lookup_elem),
  2662. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  2663. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  2664. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  2665. BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
  2666. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  2667. BPF_MOV64_IMM(BPF_REG_1, 1),
  2668. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  2669. BPF_MOV64_IMM(BPF_REG_1, 2),
  2670. BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
  2671. BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
  2672. BPF_MOV64_IMM(BPF_REG_1, 3),
  2673. BPF_EXIT_INSN(),
  2674. },
  2675. .fixup_map1 = { 4 },
  2676. .result = REJECT,
  2677. .errstr = "R3 invalid mem access 'inv'",
  2678. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  2679. },
  2680. {
  2681. "invalid map access from else condition",
  2682. .insns = {
  2683. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2684. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2685. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2686. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2687. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  2688. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  2689. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  2690. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
  2691. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
  2692. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  2693. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2694. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
  2695. BPF_EXIT_INSN(),
  2696. },
  2697. .fixup_map2 = { 3 },
  2698. .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
  2699. .result = REJECT,
  2700. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  2701. .result_unpriv = REJECT,
  2702. },
  2703. {
  2704. "constant register |= constant should keep constant type",
  2705. .insns = {
  2706. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  2707. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  2708. BPF_MOV64_IMM(BPF_REG_2, 34),
  2709. BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
  2710. BPF_MOV64_IMM(BPF_REG_3, 0),
  2711. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  2712. BPF_EXIT_INSN(),
  2713. },
  2714. .result = ACCEPT,
  2715. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  2716. },
  2717. {
  2718. "constant register |= constant should not bypass stack boundary checks",
  2719. .insns = {
  2720. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  2721. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  2722. BPF_MOV64_IMM(BPF_REG_2, 34),
  2723. BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
  2724. BPF_MOV64_IMM(BPF_REG_3, 0),
  2725. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  2726. BPF_EXIT_INSN(),
  2727. },
  2728. .errstr = "invalid stack type R1 off=-48 access_size=58",
  2729. .result = REJECT,
  2730. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  2731. },
  2732. {
  2733. "constant register |= constant register should keep constant type",
  2734. .insns = {
  2735. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  2736. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  2737. BPF_MOV64_IMM(BPF_REG_2, 34),
  2738. BPF_MOV64_IMM(BPF_REG_4, 13),
  2739. BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
  2740. BPF_MOV64_IMM(BPF_REG_3, 0),
  2741. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  2742. BPF_EXIT_INSN(),
  2743. },
  2744. .result = ACCEPT,
  2745. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  2746. },
  2747. {
  2748. "constant register |= constant register should not bypass stack boundary checks",
  2749. .insns = {
  2750. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  2751. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  2752. BPF_MOV64_IMM(BPF_REG_2, 34),
  2753. BPF_MOV64_IMM(BPF_REG_4, 24),
  2754. BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
  2755. BPF_MOV64_IMM(BPF_REG_3, 0),
  2756. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  2757. BPF_EXIT_INSN(),
  2758. },
  2759. .errstr = "invalid stack type R1 off=-48 access_size=58",
  2760. .result = REJECT,
  2761. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  2762. },
  2763. {
  2764. "invalid direct packet write for LWT_IN",
  2765. .insns = {
  2766. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2767. offsetof(struct __sk_buff, data)),
  2768. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2769. offsetof(struct __sk_buff, data_end)),
  2770. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2771. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2772. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2773. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  2774. BPF_MOV64_IMM(BPF_REG_0, 0),
  2775. BPF_EXIT_INSN(),
  2776. },
  2777. .errstr = "cannot write into packet",
  2778. .result = REJECT,
  2779. .prog_type = BPF_PROG_TYPE_LWT_IN,
  2780. },
  2781. {
  2782. "invalid direct packet write for LWT_OUT",
  2783. .insns = {
  2784. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2785. offsetof(struct __sk_buff, data)),
  2786. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2787. offsetof(struct __sk_buff, data_end)),
  2788. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2789. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2790. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2791. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  2792. BPF_MOV64_IMM(BPF_REG_0, 0),
  2793. BPF_EXIT_INSN(),
  2794. },
  2795. .errstr = "cannot write into packet",
  2796. .result = REJECT,
  2797. .prog_type = BPF_PROG_TYPE_LWT_OUT,
  2798. },
  2799. {
  2800. "direct packet write for LWT_XMIT",
  2801. .insns = {
  2802. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2803. offsetof(struct __sk_buff, data)),
  2804. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2805. offsetof(struct __sk_buff, data_end)),
  2806. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2807. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2808. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2809. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  2810. BPF_MOV64_IMM(BPF_REG_0, 0),
  2811. BPF_EXIT_INSN(),
  2812. },
  2813. .result = ACCEPT,
  2814. .prog_type = BPF_PROG_TYPE_LWT_XMIT,
  2815. },
  2816. {
  2817. "direct packet read for LWT_IN",
  2818. .insns = {
  2819. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2820. offsetof(struct __sk_buff, data)),
  2821. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2822. offsetof(struct __sk_buff, data_end)),
  2823. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2824. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2825. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2826. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2827. BPF_MOV64_IMM(BPF_REG_0, 0),
  2828. BPF_EXIT_INSN(),
  2829. },
  2830. .result = ACCEPT,
  2831. .prog_type = BPF_PROG_TYPE_LWT_IN,
  2832. },
  2833. {
  2834. "direct packet read for LWT_OUT",
  2835. .insns = {
  2836. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2837. offsetof(struct __sk_buff, data)),
  2838. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2839. offsetof(struct __sk_buff, data_end)),
  2840. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2841. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2842. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2843. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2844. BPF_MOV64_IMM(BPF_REG_0, 0),
  2845. BPF_EXIT_INSN(),
  2846. },
  2847. .result = ACCEPT,
  2848. .prog_type = BPF_PROG_TYPE_LWT_OUT,
  2849. },
  2850. {
  2851. "direct packet read for LWT_XMIT",
  2852. .insns = {
  2853. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2854. offsetof(struct __sk_buff, data)),
  2855. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2856. offsetof(struct __sk_buff, data_end)),
  2857. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2858. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2859. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2860. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2861. BPF_MOV64_IMM(BPF_REG_0, 0),
  2862. BPF_EXIT_INSN(),
  2863. },
  2864. .result = ACCEPT,
  2865. .prog_type = BPF_PROG_TYPE_LWT_XMIT,
  2866. },
  2867. {
  2868. "invalid access of tc_classid for LWT_IN",
  2869. .insns = {
  2870. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  2871. offsetof(struct __sk_buff, tc_classid)),
  2872. BPF_EXIT_INSN(),
  2873. },
  2874. .result = REJECT,
  2875. .errstr = "invalid bpf_context access",
  2876. },
  2877. {
  2878. "invalid access of tc_classid for LWT_OUT",
  2879. .insns = {
  2880. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  2881. offsetof(struct __sk_buff, tc_classid)),
  2882. BPF_EXIT_INSN(),
  2883. },
  2884. .result = REJECT,
  2885. .errstr = "invalid bpf_context access",
  2886. },
  2887. {
  2888. "invalid access of tc_classid for LWT_XMIT",
  2889. .insns = {
  2890. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  2891. offsetof(struct __sk_buff, tc_classid)),
  2892. BPF_EXIT_INSN(),
  2893. },
  2894. .result = REJECT,
  2895. .errstr = "invalid bpf_context access",
  2896. },
  2897. };
  2898. static int probe_filter_length(const struct bpf_insn *fp)
  2899. {
  2900. int len;
  2901. for (len = MAX_INSNS - 1; len > 0; --len)
  2902. if (fp[len].code != 0 || fp[len].imm != 0)
  2903. break;
  2904. return len + 1;
  2905. }
  2906. static int create_map(uint32_t size_value, uint32_t max_elem)
  2907. {
  2908. int fd;
  2909. fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(long long),
  2910. size_value, max_elem, BPF_F_NO_PREALLOC);
  2911. if (fd < 0)
  2912. printf("Failed to create hash map '%s'!\n", strerror(errno));
  2913. return fd;
  2914. }
  2915. static int create_prog_array(void)
  2916. {
  2917. int fd;
  2918. fd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
  2919. sizeof(int), 4, 0);
  2920. if (fd < 0)
  2921. printf("Failed to create prog array '%s'!\n", strerror(errno));
  2922. return fd;
  2923. }
  2924. static char bpf_vlog[32768];
  2925. static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
  2926. int *fd_f1, int *fd_f2, int *fd_f3)
  2927. {
  2928. int *fixup_map1 = test->fixup_map1;
  2929. int *fixup_map2 = test->fixup_map2;
  2930. int *fixup_prog = test->fixup_prog;
  2931. /* Allocating HTs with 1 elem is fine here, since we only test
  2932. * for verifier and not do a runtime lookup, so the only thing
  2933. * that really matters is value size in this case.
  2934. */
  2935. if (*fixup_map1) {
  2936. *fd_f1 = create_map(sizeof(long long), 1);
  2937. do {
  2938. prog[*fixup_map1].imm = *fd_f1;
  2939. fixup_map1++;
  2940. } while (*fixup_map1);
  2941. }
  2942. if (*fixup_map2) {
  2943. *fd_f2 = create_map(sizeof(struct test_val), 1);
  2944. do {
  2945. prog[*fixup_map2].imm = *fd_f2;
  2946. fixup_map2++;
  2947. } while (*fixup_map2);
  2948. }
  2949. if (*fixup_prog) {
  2950. *fd_f3 = create_prog_array();
  2951. do {
  2952. prog[*fixup_prog].imm = *fd_f3;
  2953. fixup_prog++;
  2954. } while (*fixup_prog);
  2955. }
  2956. }
  2957. static void do_test_single(struct bpf_test *test, bool unpriv,
  2958. int *passes, int *errors)
  2959. {
  2960. struct bpf_insn *prog = test->insns;
  2961. int prog_len = probe_filter_length(prog);
  2962. int prog_type = test->prog_type;
  2963. int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
  2964. int fd_prog, expected_ret;
  2965. const char *expected_err;
  2966. do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
  2967. fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
  2968. prog, prog_len * sizeof(struct bpf_insn),
  2969. "GPL", bpf_vlog, sizeof(bpf_vlog));
  2970. expected_ret = unpriv && test->result_unpriv != UNDEF ?
  2971. test->result_unpriv : test->result;
  2972. expected_err = unpriv && test->errstr_unpriv ?
  2973. test->errstr_unpriv : test->errstr;
  2974. if (expected_ret == ACCEPT) {
  2975. if (fd_prog < 0) {
  2976. printf("FAIL\nFailed to load prog '%s'!\n",
  2977. strerror(errno));
  2978. goto fail_log;
  2979. }
  2980. } else {
  2981. if (fd_prog >= 0) {
  2982. printf("FAIL\nUnexpected success to load!\n");
  2983. goto fail_log;
  2984. }
  2985. if (!strstr(bpf_vlog, expected_err)) {
  2986. printf("FAIL\nUnexpected error message!\n");
  2987. goto fail_log;
  2988. }
  2989. }
  2990. (*passes)++;
  2991. printf("OK\n");
  2992. close_fds:
  2993. close(fd_prog);
  2994. close(fd_f1);
  2995. close(fd_f2);
  2996. close(fd_f3);
  2997. sched_yield();
  2998. return;
  2999. fail_log:
  3000. (*errors)++;
  3001. printf("%s", bpf_vlog);
  3002. goto close_fds;
  3003. }
  3004. static int do_test(bool unpriv, unsigned int from, unsigned int to)
  3005. {
  3006. int i, passes = 0, errors = 0;
  3007. for (i = from; i < to; i++) {
  3008. struct bpf_test *test = &tests[i];
  3009. /* Program types that are not supported by non-root we
  3010. * skip right away.
  3011. */
  3012. if (unpriv && test->prog_type)
  3013. continue;
  3014. printf("#%d %s ", i, test->descr);
  3015. do_test_single(test, unpriv, &passes, &errors);
  3016. }
  3017. printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
  3018. return errors ? -errors : 0;
  3019. }
  3020. int main(int argc, char **argv)
  3021. {
  3022. struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
  3023. struct rlimit rlim = { 1 << 20, 1 << 20 };
  3024. unsigned int from = 0, to = ARRAY_SIZE(tests);
  3025. bool unpriv = geteuid() != 0;
  3026. if (argc == 3) {
  3027. unsigned int l = atoi(argv[argc - 2]);
  3028. unsigned int u = atoi(argv[argc - 1]);
  3029. if (l < to && u < to) {
  3030. from = l;
  3031. to = u + 1;
  3032. }
  3033. } else if (argc == 2) {
  3034. unsigned int t = atoi(argv[argc - 1]);
  3035. if (t < to) {
  3036. from = t;
  3037. to = t + 1;
  3038. }
  3039. }
  3040. setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
  3041. return do_test(unpriv, from, to);
  3042. }