test_verifier.c 140 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684
  1. /*
  2. * Testsuite for eBPF verifier
  3. *
  4. * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. */
  10. #include <stdint.h>
  11. #include <stdio.h>
  12. #include <stdlib.h>
  13. #include <unistd.h>
  14. #include <errno.h>
  15. #include <string.h>
  16. #include <stddef.h>
  17. #include <stdbool.h>
  18. #include <sched.h>
  19. #include <sys/capability.h>
  20. #include <sys/resource.h>
  21. #include <linux/unistd.h>
  22. #include <linux/filter.h>
  23. #include <linux/bpf_perf_event.h>
  24. #include <linux/bpf.h>
  25. #include <bpf/bpf.h>
  26. #include "../../../include/linux/filter.h"
  27. #ifndef ARRAY_SIZE
  28. # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
  29. #endif
  30. #define MAX_INSNS 512
  31. #define MAX_FIXUPS 8
  32. struct bpf_test {
  33. const char *descr;
  34. struct bpf_insn insns[MAX_INSNS];
  35. int fixup_map1[MAX_FIXUPS];
  36. int fixup_map2[MAX_FIXUPS];
  37. int fixup_prog[MAX_FIXUPS];
  38. const char *errstr;
  39. const char *errstr_unpriv;
  40. enum {
  41. UNDEF,
  42. ACCEPT,
  43. REJECT
  44. } result, result_unpriv;
  45. enum bpf_prog_type prog_type;
  46. };
  47. /* Note we want this to be 64 bit aligned so that the end of our array is
  48. * actually the end of the structure.
  49. */
  50. #define MAX_ENTRIES 11
  51. struct test_val {
  52. unsigned int index;
  53. int foo[MAX_ENTRIES];
  54. };
  55. static struct bpf_test tests[] = {
  56. {
  57. "add+sub+mul",
  58. .insns = {
  59. BPF_MOV64_IMM(BPF_REG_1, 1),
  60. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
  61. BPF_MOV64_IMM(BPF_REG_2, 3),
  62. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
  63. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
  64. BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
  65. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  66. BPF_EXIT_INSN(),
  67. },
  68. .result = ACCEPT,
  69. },
  70. {
  71. "unreachable",
  72. .insns = {
  73. BPF_EXIT_INSN(),
  74. BPF_EXIT_INSN(),
  75. },
  76. .errstr = "unreachable",
  77. .result = REJECT,
  78. },
  79. {
  80. "unreachable2",
  81. .insns = {
  82. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  83. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  84. BPF_EXIT_INSN(),
  85. },
  86. .errstr = "unreachable",
  87. .result = REJECT,
  88. },
  89. {
  90. "out of range jump",
  91. .insns = {
  92. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  93. BPF_EXIT_INSN(),
  94. },
  95. .errstr = "jump out of range",
  96. .result = REJECT,
  97. },
  98. {
  99. "out of range jump2",
  100. .insns = {
  101. BPF_JMP_IMM(BPF_JA, 0, 0, -2),
  102. BPF_EXIT_INSN(),
  103. },
  104. .errstr = "jump out of range",
  105. .result = REJECT,
  106. },
  107. {
  108. "test1 ld_imm64",
  109. .insns = {
  110. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  111. BPF_LD_IMM64(BPF_REG_0, 0),
  112. BPF_LD_IMM64(BPF_REG_0, 0),
  113. BPF_LD_IMM64(BPF_REG_0, 1),
  114. BPF_LD_IMM64(BPF_REG_0, 1),
  115. BPF_MOV64_IMM(BPF_REG_0, 2),
  116. BPF_EXIT_INSN(),
  117. },
  118. .errstr = "invalid BPF_LD_IMM insn",
  119. .errstr_unpriv = "R1 pointer comparison",
  120. .result = REJECT,
  121. },
  122. {
  123. "test2 ld_imm64",
  124. .insns = {
  125. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  126. BPF_LD_IMM64(BPF_REG_0, 0),
  127. BPF_LD_IMM64(BPF_REG_0, 0),
  128. BPF_LD_IMM64(BPF_REG_0, 1),
  129. BPF_LD_IMM64(BPF_REG_0, 1),
  130. BPF_EXIT_INSN(),
  131. },
  132. .errstr = "invalid BPF_LD_IMM insn",
  133. .errstr_unpriv = "R1 pointer comparison",
  134. .result = REJECT,
  135. },
  136. {
  137. "test3 ld_imm64",
  138. .insns = {
  139. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  140. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  141. BPF_LD_IMM64(BPF_REG_0, 0),
  142. BPF_LD_IMM64(BPF_REG_0, 0),
  143. BPF_LD_IMM64(BPF_REG_0, 1),
  144. BPF_LD_IMM64(BPF_REG_0, 1),
  145. BPF_EXIT_INSN(),
  146. },
  147. .errstr = "invalid bpf_ld_imm64 insn",
  148. .result = REJECT,
  149. },
  150. {
  151. "test4 ld_imm64",
  152. .insns = {
  153. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  154. BPF_EXIT_INSN(),
  155. },
  156. .errstr = "invalid bpf_ld_imm64 insn",
  157. .result = REJECT,
  158. },
  159. {
  160. "test5 ld_imm64",
  161. .insns = {
  162. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  163. },
  164. .errstr = "invalid bpf_ld_imm64 insn",
  165. .result = REJECT,
  166. },
  167. {
  168. "no bpf_exit",
  169. .insns = {
  170. BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
  171. },
  172. .errstr = "jump out of range",
  173. .result = REJECT,
  174. },
  175. {
  176. "loop (back-edge)",
  177. .insns = {
  178. BPF_JMP_IMM(BPF_JA, 0, 0, -1),
  179. BPF_EXIT_INSN(),
  180. },
  181. .errstr = "back-edge",
  182. .result = REJECT,
  183. },
  184. {
  185. "loop2 (back-edge)",
  186. .insns = {
  187. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  188. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  189. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  190. BPF_JMP_IMM(BPF_JA, 0, 0, -4),
  191. BPF_EXIT_INSN(),
  192. },
  193. .errstr = "back-edge",
  194. .result = REJECT,
  195. },
  196. {
  197. "conditional loop",
  198. .insns = {
  199. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  200. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  201. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  202. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
  203. BPF_EXIT_INSN(),
  204. },
  205. .errstr = "back-edge",
  206. .result = REJECT,
  207. },
  208. {
  209. "read uninitialized register",
  210. .insns = {
  211. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  212. BPF_EXIT_INSN(),
  213. },
  214. .errstr = "R2 !read_ok",
  215. .result = REJECT,
  216. },
  217. {
  218. "read invalid register",
  219. .insns = {
  220. BPF_MOV64_REG(BPF_REG_0, -1),
  221. BPF_EXIT_INSN(),
  222. },
  223. .errstr = "R15 is invalid",
  224. .result = REJECT,
  225. },
  226. {
  227. "program doesn't init R0 before exit",
  228. .insns = {
  229. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
  230. BPF_EXIT_INSN(),
  231. },
  232. .errstr = "R0 !read_ok",
  233. .result = REJECT,
  234. },
  235. {
  236. "program doesn't init R0 before exit in all branches",
  237. .insns = {
  238. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  239. BPF_MOV64_IMM(BPF_REG_0, 1),
  240. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
  241. BPF_EXIT_INSN(),
  242. },
  243. .errstr = "R0 !read_ok",
  244. .errstr_unpriv = "R1 pointer comparison",
  245. .result = REJECT,
  246. },
  247. {
  248. "stack out of bounds",
  249. .insns = {
  250. BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
  251. BPF_EXIT_INSN(),
  252. },
  253. .errstr = "invalid stack",
  254. .result = REJECT,
  255. },
  256. {
  257. "invalid call insn1",
  258. .insns = {
  259. BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
  260. BPF_EXIT_INSN(),
  261. },
  262. .errstr = "BPF_CALL uses reserved",
  263. .result = REJECT,
  264. },
  265. {
  266. "invalid call insn2",
  267. .insns = {
  268. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
  269. BPF_EXIT_INSN(),
  270. },
  271. .errstr = "BPF_CALL uses reserved",
  272. .result = REJECT,
  273. },
  274. {
  275. "invalid function call",
  276. .insns = {
  277. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
  278. BPF_EXIT_INSN(),
  279. },
  280. .errstr = "invalid func unknown#1234567",
  281. .result = REJECT,
  282. },
  283. {
  284. "uninitialized stack1",
  285. .insns = {
  286. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  287. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  288. BPF_LD_MAP_FD(BPF_REG_1, 0),
  289. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  290. BPF_FUNC_map_lookup_elem),
  291. BPF_EXIT_INSN(),
  292. },
  293. .fixup_map1 = { 2 },
  294. .errstr = "invalid indirect read from stack",
  295. .result = REJECT,
  296. },
  297. {
  298. "uninitialized stack2",
  299. .insns = {
  300. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  301. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
  302. BPF_EXIT_INSN(),
  303. },
  304. .errstr = "invalid read from stack",
  305. .result = REJECT,
  306. },
  307. {
  308. "invalid argument register",
  309. .insns = {
  310. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  311. BPF_FUNC_get_cgroup_classid),
  312. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  313. BPF_FUNC_get_cgroup_classid),
  314. BPF_EXIT_INSN(),
  315. },
  316. .errstr = "R1 !read_ok",
  317. .result = REJECT,
  318. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  319. },
  320. {
  321. "non-invalid argument register",
  322. .insns = {
  323. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
  324. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  325. BPF_FUNC_get_cgroup_classid),
  326. BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
  327. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  328. BPF_FUNC_get_cgroup_classid),
  329. BPF_EXIT_INSN(),
  330. },
  331. .result = ACCEPT,
  332. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  333. },
  334. {
  335. "check valid spill/fill",
  336. .insns = {
  337. /* spill R1(ctx) into stack */
  338. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  339. /* fill it back into R2 */
  340. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
  341. /* should be able to access R0 = *(R2 + 8) */
  342. /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
  343. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  344. BPF_EXIT_INSN(),
  345. },
  346. .errstr_unpriv = "R0 leaks addr",
  347. .result = ACCEPT,
  348. .result_unpriv = REJECT,
  349. },
  350. {
  351. "check valid spill/fill, skb mark",
  352. .insns = {
  353. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
  354. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
  355. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  356. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  357. offsetof(struct __sk_buff, mark)),
  358. BPF_EXIT_INSN(),
  359. },
  360. .result = ACCEPT,
  361. .result_unpriv = ACCEPT,
  362. },
  363. {
  364. "check corrupted spill/fill",
  365. .insns = {
  366. /* spill R1(ctx) into stack */
  367. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  368. /* mess up with R1 pointer on stack */
  369. BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
  370. /* fill back into R0 should fail */
  371. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  372. BPF_EXIT_INSN(),
  373. },
  374. .errstr_unpriv = "attempt to corrupt spilled",
  375. .errstr = "corrupted spill",
  376. .result = REJECT,
  377. },
  378. {
  379. "invalid src register in STX",
  380. .insns = {
  381. BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
  382. BPF_EXIT_INSN(),
  383. },
  384. .errstr = "R15 is invalid",
  385. .result = REJECT,
  386. },
  387. {
  388. "invalid dst register in STX",
  389. .insns = {
  390. BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
  391. BPF_EXIT_INSN(),
  392. },
  393. .errstr = "R14 is invalid",
  394. .result = REJECT,
  395. },
  396. {
  397. "invalid dst register in ST",
  398. .insns = {
  399. BPF_ST_MEM(BPF_B, 14, -1, -1),
  400. BPF_EXIT_INSN(),
  401. },
  402. .errstr = "R14 is invalid",
  403. .result = REJECT,
  404. },
  405. {
  406. "invalid src register in LDX",
  407. .insns = {
  408. BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
  409. BPF_EXIT_INSN(),
  410. },
  411. .errstr = "R12 is invalid",
  412. .result = REJECT,
  413. },
  414. {
  415. "invalid dst register in LDX",
  416. .insns = {
  417. BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
  418. BPF_EXIT_INSN(),
  419. },
  420. .errstr = "R11 is invalid",
  421. .result = REJECT,
  422. },
  423. {
  424. "junk insn",
  425. .insns = {
  426. BPF_RAW_INSN(0, 0, 0, 0, 0),
  427. BPF_EXIT_INSN(),
  428. },
  429. .errstr = "invalid BPF_LD_IMM",
  430. .result = REJECT,
  431. },
  432. {
  433. "junk insn2",
  434. .insns = {
  435. BPF_RAW_INSN(1, 0, 0, 0, 0),
  436. BPF_EXIT_INSN(),
  437. },
  438. .errstr = "BPF_LDX uses reserved fields",
  439. .result = REJECT,
  440. },
  441. {
  442. "junk insn3",
  443. .insns = {
  444. BPF_RAW_INSN(-1, 0, 0, 0, 0),
  445. BPF_EXIT_INSN(),
  446. },
  447. .errstr = "invalid BPF_ALU opcode f0",
  448. .result = REJECT,
  449. },
  450. {
  451. "junk insn4",
  452. .insns = {
  453. BPF_RAW_INSN(-1, -1, -1, -1, -1),
  454. BPF_EXIT_INSN(),
  455. },
  456. .errstr = "invalid BPF_ALU opcode f0",
  457. .result = REJECT,
  458. },
  459. {
  460. "junk insn5",
  461. .insns = {
  462. BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
  463. BPF_EXIT_INSN(),
  464. },
  465. .errstr = "BPF_ALU uses reserved fields",
  466. .result = REJECT,
  467. },
  468. {
  469. "misaligned read from stack",
  470. .insns = {
  471. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  472. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
  473. BPF_EXIT_INSN(),
  474. },
  475. .errstr = "misaligned access",
  476. .result = REJECT,
  477. },
  478. {
  479. "invalid map_fd for function call",
  480. .insns = {
  481. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  482. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
  483. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  484. BPF_LD_MAP_FD(BPF_REG_1, 0),
  485. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  486. BPF_FUNC_map_delete_elem),
  487. BPF_EXIT_INSN(),
  488. },
  489. .errstr = "fd 0 is not pointing to valid bpf_map",
  490. .result = REJECT,
  491. },
  492. {
  493. "don't check return value before access",
  494. .insns = {
  495. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  496. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  497. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  498. BPF_LD_MAP_FD(BPF_REG_1, 0),
  499. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  500. BPF_FUNC_map_lookup_elem),
  501. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  502. BPF_EXIT_INSN(),
  503. },
  504. .fixup_map1 = { 3 },
  505. .errstr = "R0 invalid mem access 'map_value_or_null'",
  506. .result = REJECT,
  507. },
  508. {
  509. "access memory with incorrect alignment",
  510. .insns = {
  511. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  512. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  513. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  514. BPF_LD_MAP_FD(BPF_REG_1, 0),
  515. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  516. BPF_FUNC_map_lookup_elem),
  517. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  518. BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
  519. BPF_EXIT_INSN(),
  520. },
  521. .fixup_map1 = { 3 },
  522. .errstr = "misaligned access",
  523. .result = REJECT,
  524. },
  525. {
  526. "sometimes access memory with incorrect alignment",
  527. .insns = {
  528. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  529. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  530. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  531. BPF_LD_MAP_FD(BPF_REG_1, 0),
  532. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  533. BPF_FUNC_map_lookup_elem),
  534. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  535. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  536. BPF_EXIT_INSN(),
  537. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
  538. BPF_EXIT_INSN(),
  539. },
  540. .fixup_map1 = { 3 },
  541. .errstr = "R0 invalid mem access",
  542. .errstr_unpriv = "R0 leaks addr",
  543. .result = REJECT,
  544. },
  545. {
  546. "jump test 1",
  547. .insns = {
  548. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  549. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
  550. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  551. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  552. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
  553. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
  554. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
  555. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
  556. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
  557. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
  558. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
  559. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
  560. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  561. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
  562. BPF_MOV64_IMM(BPF_REG_0, 0),
  563. BPF_EXIT_INSN(),
  564. },
  565. .errstr_unpriv = "R1 pointer comparison",
  566. .result_unpriv = REJECT,
  567. .result = ACCEPT,
  568. },
  569. {
  570. "jump test 2",
  571. .insns = {
  572. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  573. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
  574. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  575. BPF_JMP_IMM(BPF_JA, 0, 0, 14),
  576. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
  577. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  578. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  579. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
  580. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  581. BPF_JMP_IMM(BPF_JA, 0, 0, 8),
  582. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
  583. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  584. BPF_JMP_IMM(BPF_JA, 0, 0, 5),
  585. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
  586. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  587. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  588. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  589. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  590. BPF_MOV64_IMM(BPF_REG_0, 0),
  591. BPF_EXIT_INSN(),
  592. },
  593. .errstr_unpriv = "R1 pointer comparison",
  594. .result_unpriv = REJECT,
  595. .result = ACCEPT,
  596. },
  597. {
  598. "jump test 3",
  599. .insns = {
  600. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  601. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  602. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  603. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  604. BPF_JMP_IMM(BPF_JA, 0, 0, 19),
  605. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
  606. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  607. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  608. BPF_JMP_IMM(BPF_JA, 0, 0, 15),
  609. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
  610. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  611. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
  612. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  613. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
  614. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  615. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
  616. BPF_JMP_IMM(BPF_JA, 0, 0, 7),
  617. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
  618. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  619. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
  620. BPF_JMP_IMM(BPF_JA, 0, 0, 3),
  621. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
  622. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  623. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
  624. BPF_LD_MAP_FD(BPF_REG_1, 0),
  625. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  626. BPF_FUNC_map_delete_elem),
  627. BPF_EXIT_INSN(),
  628. },
  629. .fixup_map1 = { 24 },
  630. .errstr_unpriv = "R1 pointer comparison",
  631. .result_unpriv = REJECT,
  632. .result = ACCEPT,
  633. },
  634. {
  635. "jump test 4",
  636. .insns = {
  637. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  638. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  639. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  640. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  641. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  642. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  643. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  644. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  645. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  646. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  647. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  648. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  649. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  650. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  651. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  652. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  653. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  654. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  655. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  656. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  657. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  658. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  659. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  660. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  661. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  662. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  663. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  664. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  665. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  666. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  667. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  668. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  669. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  670. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  671. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  672. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  673. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  674. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  675. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  676. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  677. BPF_MOV64_IMM(BPF_REG_0, 0),
  678. BPF_EXIT_INSN(),
  679. },
  680. .errstr_unpriv = "R1 pointer comparison",
  681. .result_unpriv = REJECT,
  682. .result = ACCEPT,
  683. },
  684. {
  685. "jump test 5",
  686. .insns = {
  687. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  688. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  689. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  690. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  691. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  692. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  693. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  694. BPF_MOV64_IMM(BPF_REG_0, 0),
  695. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  696. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  697. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  698. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  699. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  700. BPF_MOV64_IMM(BPF_REG_0, 0),
  701. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  702. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  703. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  704. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  705. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  706. BPF_MOV64_IMM(BPF_REG_0, 0),
  707. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  708. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  709. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  710. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  711. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  712. BPF_MOV64_IMM(BPF_REG_0, 0),
  713. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  714. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  715. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  716. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  717. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  718. BPF_MOV64_IMM(BPF_REG_0, 0),
  719. BPF_EXIT_INSN(),
  720. },
  721. .errstr_unpriv = "R1 pointer comparison",
  722. .result_unpriv = REJECT,
  723. .result = ACCEPT,
  724. },
  725. {
  726. "access skb fields ok",
  727. .insns = {
  728. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  729. offsetof(struct __sk_buff, len)),
  730. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  731. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  732. offsetof(struct __sk_buff, mark)),
  733. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  734. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  735. offsetof(struct __sk_buff, pkt_type)),
  736. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  737. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  738. offsetof(struct __sk_buff, queue_mapping)),
  739. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  740. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  741. offsetof(struct __sk_buff, protocol)),
  742. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  743. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  744. offsetof(struct __sk_buff, vlan_present)),
  745. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  746. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  747. offsetof(struct __sk_buff, vlan_tci)),
  748. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  749. BPF_EXIT_INSN(),
  750. },
  751. .result = ACCEPT,
  752. },
  753. {
  754. "access skb fields bad1",
  755. .insns = {
  756. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
  757. BPF_EXIT_INSN(),
  758. },
  759. .errstr = "invalid bpf_context access",
  760. .result = REJECT,
  761. },
  762. {
  763. "access skb fields bad2",
  764. .insns = {
  765. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
  766. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  767. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  768. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  769. BPF_LD_MAP_FD(BPF_REG_1, 0),
  770. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  771. BPF_FUNC_map_lookup_elem),
  772. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  773. BPF_EXIT_INSN(),
  774. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  775. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  776. offsetof(struct __sk_buff, pkt_type)),
  777. BPF_EXIT_INSN(),
  778. },
  779. .fixup_map1 = { 4 },
  780. .errstr = "different pointers",
  781. .errstr_unpriv = "R1 pointer comparison",
  782. .result = REJECT,
  783. },
  784. {
  785. "access skb fields bad3",
  786. .insns = {
  787. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  788. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  789. offsetof(struct __sk_buff, pkt_type)),
  790. BPF_EXIT_INSN(),
  791. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  792. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  793. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  794. BPF_LD_MAP_FD(BPF_REG_1, 0),
  795. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  796. BPF_FUNC_map_lookup_elem),
  797. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  798. BPF_EXIT_INSN(),
  799. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  800. BPF_JMP_IMM(BPF_JA, 0, 0, -12),
  801. },
  802. .fixup_map1 = { 6 },
  803. .errstr = "different pointers",
  804. .errstr_unpriv = "R1 pointer comparison",
  805. .result = REJECT,
  806. },
  807. {
  808. "access skb fields bad4",
  809. .insns = {
  810. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
  811. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  812. offsetof(struct __sk_buff, len)),
  813. BPF_MOV64_IMM(BPF_REG_0, 0),
  814. BPF_EXIT_INSN(),
  815. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  816. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  817. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  818. BPF_LD_MAP_FD(BPF_REG_1, 0),
  819. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  820. BPF_FUNC_map_lookup_elem),
  821. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  822. BPF_EXIT_INSN(),
  823. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  824. BPF_JMP_IMM(BPF_JA, 0, 0, -13),
  825. },
  826. .fixup_map1 = { 7 },
  827. .errstr = "different pointers",
  828. .errstr_unpriv = "R1 pointer comparison",
  829. .result = REJECT,
  830. },
  831. {
  832. "check skb->mark is not writeable by sockets",
  833. .insns = {
  834. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  835. offsetof(struct __sk_buff, mark)),
  836. BPF_EXIT_INSN(),
  837. },
  838. .errstr = "invalid bpf_context access",
  839. .errstr_unpriv = "R1 leaks addr",
  840. .result = REJECT,
  841. },
  842. {
  843. "check skb->tc_index is not writeable by sockets",
  844. .insns = {
  845. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  846. offsetof(struct __sk_buff, tc_index)),
  847. BPF_EXIT_INSN(),
  848. },
  849. .errstr = "invalid bpf_context access",
  850. .errstr_unpriv = "R1 leaks addr",
  851. .result = REJECT,
  852. },
  853. {
  854. "check cb access: byte",
  855. .insns = {
  856. BPF_MOV64_IMM(BPF_REG_0, 0),
  857. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  858. offsetof(struct __sk_buff, cb[0])),
  859. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  860. offsetof(struct __sk_buff, cb[0]) + 1),
  861. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  862. offsetof(struct __sk_buff, cb[0]) + 2),
  863. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  864. offsetof(struct __sk_buff, cb[0]) + 3),
  865. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  866. offsetof(struct __sk_buff, cb[1])),
  867. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  868. offsetof(struct __sk_buff, cb[1]) + 1),
  869. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  870. offsetof(struct __sk_buff, cb[1]) + 2),
  871. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  872. offsetof(struct __sk_buff, cb[1]) + 3),
  873. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  874. offsetof(struct __sk_buff, cb[2])),
  875. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  876. offsetof(struct __sk_buff, cb[2]) + 1),
  877. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  878. offsetof(struct __sk_buff, cb[2]) + 2),
  879. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  880. offsetof(struct __sk_buff, cb[2]) + 3),
  881. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  882. offsetof(struct __sk_buff, cb[3])),
  883. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  884. offsetof(struct __sk_buff, cb[3]) + 1),
  885. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  886. offsetof(struct __sk_buff, cb[3]) + 2),
  887. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  888. offsetof(struct __sk_buff, cb[3]) + 3),
  889. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  890. offsetof(struct __sk_buff, cb[4])),
  891. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  892. offsetof(struct __sk_buff, cb[4]) + 1),
  893. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  894. offsetof(struct __sk_buff, cb[4]) + 2),
  895. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  896. offsetof(struct __sk_buff, cb[4]) + 3),
  897. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  898. offsetof(struct __sk_buff, cb[0])),
  899. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  900. offsetof(struct __sk_buff, cb[0]) + 1),
  901. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  902. offsetof(struct __sk_buff, cb[0]) + 2),
  903. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  904. offsetof(struct __sk_buff, cb[0]) + 3),
  905. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  906. offsetof(struct __sk_buff, cb[1])),
  907. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  908. offsetof(struct __sk_buff, cb[1]) + 1),
  909. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  910. offsetof(struct __sk_buff, cb[1]) + 2),
  911. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  912. offsetof(struct __sk_buff, cb[1]) + 3),
  913. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  914. offsetof(struct __sk_buff, cb[2])),
  915. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  916. offsetof(struct __sk_buff, cb[2]) + 1),
  917. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  918. offsetof(struct __sk_buff, cb[2]) + 2),
  919. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  920. offsetof(struct __sk_buff, cb[2]) + 3),
  921. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  922. offsetof(struct __sk_buff, cb[3])),
  923. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  924. offsetof(struct __sk_buff, cb[3]) + 1),
  925. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  926. offsetof(struct __sk_buff, cb[3]) + 2),
  927. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  928. offsetof(struct __sk_buff, cb[3]) + 3),
  929. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  930. offsetof(struct __sk_buff, cb[4])),
  931. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  932. offsetof(struct __sk_buff, cb[4]) + 1),
  933. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  934. offsetof(struct __sk_buff, cb[4]) + 2),
  935. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  936. offsetof(struct __sk_buff, cb[4]) + 3),
  937. BPF_EXIT_INSN(),
  938. },
  939. .result = ACCEPT,
  940. },
  941. {
  942. "check cb access: byte, oob 1",
  943. .insns = {
  944. BPF_MOV64_IMM(BPF_REG_0, 0),
  945. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  946. offsetof(struct __sk_buff, cb[4]) + 4),
  947. BPF_EXIT_INSN(),
  948. },
  949. .errstr = "invalid bpf_context access",
  950. .result = REJECT,
  951. },
  952. {
  953. "check cb access: byte, oob 2",
  954. .insns = {
  955. BPF_MOV64_IMM(BPF_REG_0, 0),
  956. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  957. offsetof(struct __sk_buff, cb[0]) - 1),
  958. BPF_EXIT_INSN(),
  959. },
  960. .errstr = "invalid bpf_context access",
  961. .result = REJECT,
  962. },
  963. {
  964. "check cb access: byte, oob 3",
  965. .insns = {
  966. BPF_MOV64_IMM(BPF_REG_0, 0),
  967. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  968. offsetof(struct __sk_buff, cb[4]) + 4),
  969. BPF_EXIT_INSN(),
  970. },
  971. .errstr = "invalid bpf_context access",
  972. .result = REJECT,
  973. },
  974. {
  975. "check cb access: byte, oob 4",
  976. .insns = {
  977. BPF_MOV64_IMM(BPF_REG_0, 0),
  978. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  979. offsetof(struct __sk_buff, cb[0]) - 1),
  980. BPF_EXIT_INSN(),
  981. },
  982. .errstr = "invalid bpf_context access",
  983. .result = REJECT,
  984. },
  985. {
  986. "check cb access: byte, wrong type",
  987. .insns = {
  988. BPF_MOV64_IMM(BPF_REG_0, 0),
  989. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  990. offsetof(struct __sk_buff, cb[0])),
  991. BPF_EXIT_INSN(),
  992. },
  993. .errstr = "invalid bpf_context access",
  994. .result = REJECT,
  995. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  996. },
  997. {
  998. "check cb access: half",
  999. .insns = {
  1000. BPF_MOV64_IMM(BPF_REG_0, 0),
  1001. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1002. offsetof(struct __sk_buff, cb[0])),
  1003. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1004. offsetof(struct __sk_buff, cb[0]) + 2),
  1005. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1006. offsetof(struct __sk_buff, cb[1])),
  1007. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1008. offsetof(struct __sk_buff, cb[1]) + 2),
  1009. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1010. offsetof(struct __sk_buff, cb[2])),
  1011. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1012. offsetof(struct __sk_buff, cb[2]) + 2),
  1013. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1014. offsetof(struct __sk_buff, cb[3])),
  1015. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1016. offsetof(struct __sk_buff, cb[3]) + 2),
  1017. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1018. offsetof(struct __sk_buff, cb[4])),
  1019. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1020. offsetof(struct __sk_buff, cb[4]) + 2),
  1021. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1022. offsetof(struct __sk_buff, cb[0])),
  1023. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1024. offsetof(struct __sk_buff, cb[0]) + 2),
  1025. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1026. offsetof(struct __sk_buff, cb[1])),
  1027. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1028. offsetof(struct __sk_buff, cb[1]) + 2),
  1029. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1030. offsetof(struct __sk_buff, cb[2])),
  1031. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1032. offsetof(struct __sk_buff, cb[2]) + 2),
  1033. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1034. offsetof(struct __sk_buff, cb[3])),
  1035. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1036. offsetof(struct __sk_buff, cb[3]) + 2),
  1037. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1038. offsetof(struct __sk_buff, cb[4])),
  1039. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1040. offsetof(struct __sk_buff, cb[4]) + 2),
  1041. BPF_EXIT_INSN(),
  1042. },
  1043. .result = ACCEPT,
  1044. },
  1045. {
  1046. "check cb access: half, unaligned",
  1047. .insns = {
  1048. BPF_MOV64_IMM(BPF_REG_0, 0),
  1049. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1050. offsetof(struct __sk_buff, cb[0]) + 1),
  1051. BPF_EXIT_INSN(),
  1052. },
  1053. .errstr = "misaligned access",
  1054. .result = REJECT,
  1055. },
  1056. {
  1057. "check cb access: half, oob 1",
  1058. .insns = {
  1059. BPF_MOV64_IMM(BPF_REG_0, 0),
  1060. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1061. offsetof(struct __sk_buff, cb[4]) + 4),
  1062. BPF_EXIT_INSN(),
  1063. },
  1064. .errstr = "invalid bpf_context access",
  1065. .result = REJECT,
  1066. },
  1067. {
  1068. "check cb access: half, oob 2",
  1069. .insns = {
  1070. BPF_MOV64_IMM(BPF_REG_0, 0),
  1071. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1072. offsetof(struct __sk_buff, cb[0]) - 2),
  1073. BPF_EXIT_INSN(),
  1074. },
  1075. .errstr = "invalid bpf_context access",
  1076. .result = REJECT,
  1077. },
  1078. {
  1079. "check cb access: half, oob 3",
  1080. .insns = {
  1081. BPF_MOV64_IMM(BPF_REG_0, 0),
  1082. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1083. offsetof(struct __sk_buff, cb[4]) + 4),
  1084. BPF_EXIT_INSN(),
  1085. },
  1086. .errstr = "invalid bpf_context access",
  1087. .result = REJECT,
  1088. },
  1089. {
  1090. "check cb access: half, oob 4",
  1091. .insns = {
  1092. BPF_MOV64_IMM(BPF_REG_0, 0),
  1093. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1094. offsetof(struct __sk_buff, cb[0]) - 2),
  1095. BPF_EXIT_INSN(),
  1096. },
  1097. .errstr = "invalid bpf_context access",
  1098. .result = REJECT,
  1099. },
  1100. {
  1101. "check cb access: half, wrong type",
  1102. .insns = {
  1103. BPF_MOV64_IMM(BPF_REG_0, 0),
  1104. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1105. offsetof(struct __sk_buff, cb[0])),
  1106. BPF_EXIT_INSN(),
  1107. },
  1108. .errstr = "invalid bpf_context access",
  1109. .result = REJECT,
  1110. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  1111. },
  1112. {
  1113. "check cb access: word",
  1114. .insns = {
  1115. BPF_MOV64_IMM(BPF_REG_0, 0),
  1116. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1117. offsetof(struct __sk_buff, cb[0])),
  1118. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1119. offsetof(struct __sk_buff, cb[1])),
  1120. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1121. offsetof(struct __sk_buff, cb[2])),
  1122. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1123. offsetof(struct __sk_buff, cb[3])),
  1124. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1125. offsetof(struct __sk_buff, cb[4])),
  1126. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1127. offsetof(struct __sk_buff, cb[0])),
  1128. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1129. offsetof(struct __sk_buff, cb[1])),
  1130. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1131. offsetof(struct __sk_buff, cb[2])),
  1132. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1133. offsetof(struct __sk_buff, cb[3])),
  1134. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1135. offsetof(struct __sk_buff, cb[4])),
  1136. BPF_EXIT_INSN(),
  1137. },
  1138. .result = ACCEPT,
  1139. },
  1140. {
  1141. "check cb access: word, unaligned 1",
  1142. .insns = {
  1143. BPF_MOV64_IMM(BPF_REG_0, 0),
  1144. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1145. offsetof(struct __sk_buff, cb[0]) + 2),
  1146. BPF_EXIT_INSN(),
  1147. },
  1148. .errstr = "misaligned access",
  1149. .result = REJECT,
  1150. },
  1151. {
  1152. "check cb access: word, unaligned 2",
  1153. .insns = {
  1154. BPF_MOV64_IMM(BPF_REG_0, 0),
  1155. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1156. offsetof(struct __sk_buff, cb[4]) + 1),
  1157. BPF_EXIT_INSN(),
  1158. },
  1159. .errstr = "misaligned access",
  1160. .result = REJECT,
  1161. },
  1162. {
  1163. "check cb access: word, unaligned 3",
  1164. .insns = {
  1165. BPF_MOV64_IMM(BPF_REG_0, 0),
  1166. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1167. offsetof(struct __sk_buff, cb[4]) + 2),
  1168. BPF_EXIT_INSN(),
  1169. },
  1170. .errstr = "misaligned access",
  1171. .result = REJECT,
  1172. },
  1173. {
  1174. "check cb access: word, unaligned 4",
  1175. .insns = {
  1176. BPF_MOV64_IMM(BPF_REG_0, 0),
  1177. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1178. offsetof(struct __sk_buff, cb[4]) + 3),
  1179. BPF_EXIT_INSN(),
  1180. },
  1181. .errstr = "misaligned access",
  1182. .result = REJECT,
  1183. },
  1184. {
  1185. "check cb access: double",
  1186. .insns = {
  1187. BPF_MOV64_IMM(BPF_REG_0, 0),
  1188. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  1189. offsetof(struct __sk_buff, cb[0])),
  1190. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  1191. offsetof(struct __sk_buff, cb[2])),
  1192. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
  1193. offsetof(struct __sk_buff, cb[0])),
  1194. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
  1195. offsetof(struct __sk_buff, cb[2])),
  1196. BPF_EXIT_INSN(),
  1197. },
  1198. .result = ACCEPT,
  1199. },
  1200. {
  1201. "check cb access: double, unaligned 1",
  1202. .insns = {
  1203. BPF_MOV64_IMM(BPF_REG_0, 0),
  1204. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  1205. offsetof(struct __sk_buff, cb[1])),
  1206. BPF_EXIT_INSN(),
  1207. },
  1208. .errstr = "misaligned access",
  1209. .result = REJECT,
  1210. },
  1211. {
  1212. "check cb access: double, unaligned 2",
  1213. .insns = {
  1214. BPF_MOV64_IMM(BPF_REG_0, 0),
  1215. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  1216. offsetof(struct __sk_buff, cb[3])),
  1217. BPF_EXIT_INSN(),
  1218. },
  1219. .errstr = "misaligned access",
  1220. .result = REJECT,
  1221. },
  1222. {
  1223. "check cb access: double, oob 1",
  1224. .insns = {
  1225. BPF_MOV64_IMM(BPF_REG_0, 0),
  1226. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  1227. offsetof(struct __sk_buff, cb[4])),
  1228. BPF_EXIT_INSN(),
  1229. },
  1230. .errstr = "invalid bpf_context access",
  1231. .result = REJECT,
  1232. },
  1233. {
  1234. "check cb access: double, oob 2",
  1235. .insns = {
  1236. BPF_MOV64_IMM(BPF_REG_0, 0),
  1237. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  1238. offsetof(struct __sk_buff, cb[4]) + 8),
  1239. BPF_EXIT_INSN(),
  1240. },
  1241. .errstr = "invalid bpf_context access",
  1242. .result = REJECT,
  1243. },
  1244. {
  1245. "check cb access: double, oob 3",
  1246. .insns = {
  1247. BPF_MOV64_IMM(BPF_REG_0, 0),
  1248. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  1249. offsetof(struct __sk_buff, cb[0]) - 8),
  1250. BPF_EXIT_INSN(),
  1251. },
  1252. .errstr = "invalid bpf_context access",
  1253. .result = REJECT,
  1254. },
  1255. {
  1256. "check cb access: double, oob 4",
  1257. .insns = {
  1258. BPF_MOV64_IMM(BPF_REG_0, 0),
  1259. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
  1260. offsetof(struct __sk_buff, cb[4])),
  1261. BPF_EXIT_INSN(),
  1262. },
  1263. .errstr = "invalid bpf_context access",
  1264. .result = REJECT,
  1265. },
  1266. {
  1267. "check cb access: double, oob 5",
  1268. .insns = {
  1269. BPF_MOV64_IMM(BPF_REG_0, 0),
  1270. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
  1271. offsetof(struct __sk_buff, cb[4]) + 8),
  1272. BPF_EXIT_INSN(),
  1273. },
  1274. .errstr = "invalid bpf_context access",
  1275. .result = REJECT,
  1276. },
  1277. {
  1278. "check cb access: double, oob 6",
  1279. .insns = {
  1280. BPF_MOV64_IMM(BPF_REG_0, 0),
  1281. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
  1282. offsetof(struct __sk_buff, cb[0]) - 8),
  1283. BPF_EXIT_INSN(),
  1284. },
  1285. .errstr = "invalid bpf_context access",
  1286. .result = REJECT,
  1287. },
  1288. {
  1289. "check cb access: double, wrong type",
  1290. .insns = {
  1291. BPF_MOV64_IMM(BPF_REG_0, 0),
  1292. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  1293. offsetof(struct __sk_buff, cb[0])),
  1294. BPF_EXIT_INSN(),
  1295. },
  1296. .errstr = "invalid bpf_context access",
  1297. .result = REJECT,
  1298. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  1299. },
  1300. {
  1301. "check out of range skb->cb access",
  1302. .insns = {
  1303. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1304. offsetof(struct __sk_buff, cb[0]) + 256),
  1305. BPF_EXIT_INSN(),
  1306. },
  1307. .errstr = "invalid bpf_context access",
  1308. .errstr_unpriv = "",
  1309. .result = REJECT,
  1310. .prog_type = BPF_PROG_TYPE_SCHED_ACT,
  1311. },
  1312. {
  1313. "write skb fields from socket prog",
  1314. .insns = {
  1315. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1316. offsetof(struct __sk_buff, cb[4])),
  1317. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  1318. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1319. offsetof(struct __sk_buff, mark)),
  1320. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1321. offsetof(struct __sk_buff, tc_index)),
  1322. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  1323. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  1324. offsetof(struct __sk_buff, cb[0])),
  1325. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  1326. offsetof(struct __sk_buff, cb[2])),
  1327. BPF_EXIT_INSN(),
  1328. },
  1329. .result = ACCEPT,
  1330. .errstr_unpriv = "R1 leaks addr",
  1331. .result_unpriv = REJECT,
  1332. },
  1333. {
  1334. "write skb fields from tc_cls_act prog",
  1335. .insns = {
  1336. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1337. offsetof(struct __sk_buff, cb[0])),
  1338. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1339. offsetof(struct __sk_buff, mark)),
  1340. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1341. offsetof(struct __sk_buff, tc_index)),
  1342. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1343. offsetof(struct __sk_buff, tc_index)),
  1344. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1345. offsetof(struct __sk_buff, cb[3])),
  1346. BPF_EXIT_INSN(),
  1347. },
  1348. .errstr_unpriv = "",
  1349. .result_unpriv = REJECT,
  1350. .result = ACCEPT,
  1351. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1352. },
  1353. {
  1354. "PTR_TO_STACK store/load",
  1355. .insns = {
  1356. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1357. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  1358. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  1359. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  1360. BPF_EXIT_INSN(),
  1361. },
  1362. .result = ACCEPT,
  1363. },
  1364. {
  1365. "PTR_TO_STACK store/load - bad alignment on off",
  1366. .insns = {
  1367. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1368. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1369. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  1370. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  1371. BPF_EXIT_INSN(),
  1372. },
  1373. .result = REJECT,
  1374. .errstr = "misaligned access off -6 size 8",
  1375. },
  1376. {
  1377. "PTR_TO_STACK store/load - bad alignment on reg",
  1378. .insns = {
  1379. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1380. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  1381. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  1382. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  1383. BPF_EXIT_INSN(),
  1384. },
  1385. .result = REJECT,
  1386. .errstr = "misaligned access off -2 size 8",
  1387. },
  1388. {
  1389. "PTR_TO_STACK store/load - out of bounds low",
  1390. .insns = {
  1391. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1392. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
  1393. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  1394. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  1395. BPF_EXIT_INSN(),
  1396. },
  1397. .result = REJECT,
  1398. .errstr = "invalid stack off=-79992 size=8",
  1399. },
  1400. {
  1401. "PTR_TO_STACK store/load - out of bounds high",
  1402. .insns = {
  1403. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1404. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1405. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  1406. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  1407. BPF_EXIT_INSN(),
  1408. },
  1409. .result = REJECT,
  1410. .errstr = "invalid stack off=0 size=8",
  1411. },
  1412. {
  1413. "unpriv: return pointer",
  1414. .insns = {
  1415. BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
  1416. BPF_EXIT_INSN(),
  1417. },
  1418. .result = ACCEPT,
  1419. .result_unpriv = REJECT,
  1420. .errstr_unpriv = "R0 leaks addr",
  1421. },
  1422. {
  1423. "unpriv: add const to pointer",
  1424. .insns = {
  1425. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  1426. BPF_MOV64_IMM(BPF_REG_0, 0),
  1427. BPF_EXIT_INSN(),
  1428. },
  1429. .result = ACCEPT,
  1430. .result_unpriv = REJECT,
  1431. .errstr_unpriv = "R1 pointer arithmetic",
  1432. },
  1433. {
  1434. "unpriv: add pointer to pointer",
  1435. .insns = {
  1436. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
  1437. BPF_MOV64_IMM(BPF_REG_0, 0),
  1438. BPF_EXIT_INSN(),
  1439. },
  1440. .result = ACCEPT,
  1441. .result_unpriv = REJECT,
  1442. .errstr_unpriv = "R1 pointer arithmetic",
  1443. },
  1444. {
  1445. "unpriv: neg pointer",
  1446. .insns = {
  1447. BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
  1448. BPF_MOV64_IMM(BPF_REG_0, 0),
  1449. BPF_EXIT_INSN(),
  1450. },
  1451. .result = ACCEPT,
  1452. .result_unpriv = REJECT,
  1453. .errstr_unpriv = "R1 pointer arithmetic",
  1454. },
  1455. {
  1456. "unpriv: cmp pointer with const",
  1457. .insns = {
  1458. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  1459. BPF_MOV64_IMM(BPF_REG_0, 0),
  1460. BPF_EXIT_INSN(),
  1461. },
  1462. .result = ACCEPT,
  1463. .result_unpriv = REJECT,
  1464. .errstr_unpriv = "R1 pointer comparison",
  1465. },
  1466. {
  1467. "unpriv: cmp pointer with pointer",
  1468. .insns = {
  1469. BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  1470. BPF_MOV64_IMM(BPF_REG_0, 0),
  1471. BPF_EXIT_INSN(),
  1472. },
  1473. .result = ACCEPT,
  1474. .result_unpriv = REJECT,
  1475. .errstr_unpriv = "R10 pointer comparison",
  1476. },
  1477. {
  1478. "unpriv: check that printk is disallowed",
  1479. .insns = {
  1480. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1481. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1482. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1483. BPF_MOV64_IMM(BPF_REG_2, 8),
  1484. BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
  1485. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1486. BPF_FUNC_trace_printk),
  1487. BPF_MOV64_IMM(BPF_REG_0, 0),
  1488. BPF_EXIT_INSN(),
  1489. },
  1490. .errstr_unpriv = "unknown func bpf_trace_printk#6",
  1491. .result_unpriv = REJECT,
  1492. .result = ACCEPT,
  1493. },
  1494. {
  1495. "unpriv: pass pointer to helper function",
  1496. .insns = {
  1497. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1498. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1499. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1500. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1501. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  1502. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1503. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1504. BPF_FUNC_map_update_elem),
  1505. BPF_MOV64_IMM(BPF_REG_0, 0),
  1506. BPF_EXIT_INSN(),
  1507. },
  1508. .fixup_map1 = { 3 },
  1509. .errstr_unpriv = "R4 leaks addr",
  1510. .result_unpriv = REJECT,
  1511. .result = ACCEPT,
  1512. },
  1513. {
  1514. "unpriv: indirectly pass pointer on stack to helper function",
  1515. .insns = {
  1516. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1517. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1518. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1519. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1520. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1521. BPF_FUNC_map_lookup_elem),
  1522. BPF_MOV64_IMM(BPF_REG_0, 0),
  1523. BPF_EXIT_INSN(),
  1524. },
  1525. .fixup_map1 = { 3 },
  1526. .errstr = "invalid indirect read from stack off -8+0 size 8",
  1527. .result = REJECT,
  1528. },
  1529. {
  1530. "unpriv: mangle pointer on stack 1",
  1531. .insns = {
  1532. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1533. BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
  1534. BPF_MOV64_IMM(BPF_REG_0, 0),
  1535. BPF_EXIT_INSN(),
  1536. },
  1537. .errstr_unpriv = "attempt to corrupt spilled",
  1538. .result_unpriv = REJECT,
  1539. .result = ACCEPT,
  1540. },
  1541. {
  1542. "unpriv: mangle pointer on stack 2",
  1543. .insns = {
  1544. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1545. BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
  1546. BPF_MOV64_IMM(BPF_REG_0, 0),
  1547. BPF_EXIT_INSN(),
  1548. },
  1549. .errstr_unpriv = "attempt to corrupt spilled",
  1550. .result_unpriv = REJECT,
  1551. .result = ACCEPT,
  1552. },
  1553. {
  1554. "unpriv: read pointer from stack in small chunks",
  1555. .insns = {
  1556. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1557. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
  1558. BPF_MOV64_IMM(BPF_REG_0, 0),
  1559. BPF_EXIT_INSN(),
  1560. },
  1561. .errstr = "invalid size",
  1562. .result = REJECT,
  1563. },
  1564. {
  1565. "unpriv: write pointer into ctx",
  1566. .insns = {
  1567. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
  1568. BPF_MOV64_IMM(BPF_REG_0, 0),
  1569. BPF_EXIT_INSN(),
  1570. },
  1571. .errstr_unpriv = "R1 leaks addr",
  1572. .result_unpriv = REJECT,
  1573. .errstr = "invalid bpf_context access",
  1574. .result = REJECT,
  1575. },
  1576. {
  1577. "unpriv: spill/fill of ctx",
  1578. .insns = {
  1579. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1580. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1581. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1582. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1583. BPF_MOV64_IMM(BPF_REG_0, 0),
  1584. BPF_EXIT_INSN(),
  1585. },
  1586. .result = ACCEPT,
  1587. },
  1588. {
  1589. "unpriv: spill/fill of ctx 2",
  1590. .insns = {
  1591. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1592. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1593. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1594. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1595. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1596. BPF_FUNC_get_hash_recalc),
  1597. BPF_EXIT_INSN(),
  1598. },
  1599. .result = ACCEPT,
  1600. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1601. },
  1602. {
  1603. "unpriv: spill/fill of ctx 3",
  1604. .insns = {
  1605. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1606. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1607. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1608. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
  1609. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1610. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1611. BPF_FUNC_get_hash_recalc),
  1612. BPF_EXIT_INSN(),
  1613. },
  1614. .result = REJECT,
  1615. .errstr = "R1 type=fp expected=ctx",
  1616. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1617. },
  1618. {
  1619. "unpriv: spill/fill of ctx 4",
  1620. .insns = {
  1621. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1622. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1623. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1624. BPF_MOV64_IMM(BPF_REG_0, 1),
  1625. BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
  1626. BPF_REG_0, -8, 0),
  1627. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1628. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1629. BPF_FUNC_get_hash_recalc),
  1630. BPF_EXIT_INSN(),
  1631. },
  1632. .result = REJECT,
  1633. .errstr = "R1 type=inv expected=ctx",
  1634. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1635. },
  1636. {
  1637. "unpriv: spill/fill of different pointers stx",
  1638. .insns = {
  1639. BPF_MOV64_IMM(BPF_REG_3, 42),
  1640. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1641. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1642. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  1643. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1644. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  1645. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
  1646. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  1647. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1648. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1649. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
  1650. offsetof(struct __sk_buff, mark)),
  1651. BPF_MOV64_IMM(BPF_REG_0, 0),
  1652. BPF_EXIT_INSN(),
  1653. },
  1654. .result = REJECT,
  1655. .errstr = "same insn cannot be used with different pointers",
  1656. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1657. },
  1658. {
  1659. "unpriv: spill/fill of different pointers ldx",
  1660. .insns = {
  1661. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1662. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1663. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  1664. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1665. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
  1666. -(__s32)offsetof(struct bpf_perf_event_data,
  1667. sample_period) - 8),
  1668. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
  1669. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  1670. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1671. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1672. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
  1673. offsetof(struct bpf_perf_event_data,
  1674. sample_period)),
  1675. BPF_MOV64_IMM(BPF_REG_0, 0),
  1676. BPF_EXIT_INSN(),
  1677. },
  1678. .result = REJECT,
  1679. .errstr = "same insn cannot be used with different pointers",
  1680. .prog_type = BPF_PROG_TYPE_PERF_EVENT,
  1681. },
  1682. {
  1683. "unpriv: write pointer into map elem value",
  1684. .insns = {
  1685. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1686. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1687. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1688. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1689. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1690. BPF_FUNC_map_lookup_elem),
  1691. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  1692. BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
  1693. BPF_EXIT_INSN(),
  1694. },
  1695. .fixup_map1 = { 3 },
  1696. .errstr_unpriv = "R0 leaks addr",
  1697. .result_unpriv = REJECT,
  1698. .result = ACCEPT,
  1699. },
  1700. {
  1701. "unpriv: partial copy of pointer",
  1702. .insns = {
  1703. BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
  1704. BPF_MOV64_IMM(BPF_REG_0, 0),
  1705. BPF_EXIT_INSN(),
  1706. },
  1707. .errstr_unpriv = "R10 partial copy",
  1708. .result_unpriv = REJECT,
  1709. .result = ACCEPT,
  1710. },
  1711. {
  1712. "unpriv: pass pointer to tail_call",
  1713. .insns = {
  1714. BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
  1715. BPF_LD_MAP_FD(BPF_REG_2, 0),
  1716. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1717. BPF_FUNC_tail_call),
  1718. BPF_MOV64_IMM(BPF_REG_0, 0),
  1719. BPF_EXIT_INSN(),
  1720. },
  1721. .fixup_prog = { 1 },
  1722. .errstr_unpriv = "R3 leaks addr into helper",
  1723. .result_unpriv = REJECT,
  1724. .result = ACCEPT,
  1725. },
  1726. {
  1727. "unpriv: cmp map pointer with zero",
  1728. .insns = {
  1729. BPF_MOV64_IMM(BPF_REG_1, 0),
  1730. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1731. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  1732. BPF_MOV64_IMM(BPF_REG_0, 0),
  1733. BPF_EXIT_INSN(),
  1734. },
  1735. .fixup_map1 = { 1 },
  1736. .errstr_unpriv = "R1 pointer comparison",
  1737. .result_unpriv = REJECT,
  1738. .result = ACCEPT,
  1739. },
  1740. {
  1741. "unpriv: write into frame pointer",
  1742. .insns = {
  1743. BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
  1744. BPF_MOV64_IMM(BPF_REG_0, 0),
  1745. BPF_EXIT_INSN(),
  1746. },
  1747. .errstr = "frame pointer is read only",
  1748. .result = REJECT,
  1749. },
  1750. {
  1751. "unpriv: spill/fill frame pointer",
  1752. .insns = {
  1753. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1754. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1755. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
  1756. BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
  1757. BPF_MOV64_IMM(BPF_REG_0, 0),
  1758. BPF_EXIT_INSN(),
  1759. },
  1760. .errstr = "frame pointer is read only",
  1761. .result = REJECT,
  1762. },
  1763. {
  1764. "unpriv: cmp of frame pointer",
  1765. .insns = {
  1766. BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
  1767. BPF_MOV64_IMM(BPF_REG_0, 0),
  1768. BPF_EXIT_INSN(),
  1769. },
  1770. .errstr_unpriv = "R10 pointer comparison",
  1771. .result_unpriv = REJECT,
  1772. .result = ACCEPT,
  1773. },
  1774. {
  1775. "unpriv: cmp of stack pointer",
  1776. .insns = {
  1777. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1778. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1779. BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
  1780. BPF_MOV64_IMM(BPF_REG_0, 0),
  1781. BPF_EXIT_INSN(),
  1782. },
  1783. .errstr_unpriv = "R2 pointer comparison",
  1784. .result_unpriv = REJECT,
  1785. .result = ACCEPT,
  1786. },
  1787. {
  1788. "unpriv: obfuscate stack pointer",
  1789. .insns = {
  1790. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1791. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1792. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1793. BPF_MOV64_IMM(BPF_REG_0, 0),
  1794. BPF_EXIT_INSN(),
  1795. },
  1796. .errstr_unpriv = "R2 pointer arithmetic",
  1797. .result_unpriv = REJECT,
  1798. .result = ACCEPT,
  1799. },
  1800. {
  1801. "raw_stack: no skb_load_bytes",
  1802. .insns = {
  1803. BPF_MOV64_IMM(BPF_REG_2, 4),
  1804. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1805. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1806. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1807. BPF_MOV64_IMM(BPF_REG_4, 8),
  1808. /* Call to skb_load_bytes() omitted. */
  1809. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1810. BPF_EXIT_INSN(),
  1811. },
  1812. .result = REJECT,
  1813. .errstr = "invalid read from stack off -8+0 size 8",
  1814. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1815. },
  1816. {
  1817. "raw_stack: skb_load_bytes, negative len",
  1818. .insns = {
  1819. BPF_MOV64_IMM(BPF_REG_2, 4),
  1820. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1821. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1822. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1823. BPF_MOV64_IMM(BPF_REG_4, -8),
  1824. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1825. BPF_FUNC_skb_load_bytes),
  1826. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1827. BPF_EXIT_INSN(),
  1828. },
  1829. .result = REJECT,
  1830. .errstr = "invalid stack type R3",
  1831. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1832. },
  1833. {
  1834. "raw_stack: skb_load_bytes, negative len 2",
  1835. .insns = {
  1836. BPF_MOV64_IMM(BPF_REG_2, 4),
  1837. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1838. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1839. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1840. BPF_MOV64_IMM(BPF_REG_4, ~0),
  1841. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1842. BPF_FUNC_skb_load_bytes),
  1843. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1844. BPF_EXIT_INSN(),
  1845. },
  1846. .result = REJECT,
  1847. .errstr = "invalid stack type R3",
  1848. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1849. },
  1850. {
  1851. "raw_stack: skb_load_bytes, zero len",
  1852. .insns = {
  1853. BPF_MOV64_IMM(BPF_REG_2, 4),
  1854. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1855. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1856. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1857. BPF_MOV64_IMM(BPF_REG_4, 0),
  1858. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1859. BPF_FUNC_skb_load_bytes),
  1860. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1861. BPF_EXIT_INSN(),
  1862. },
  1863. .result = REJECT,
  1864. .errstr = "invalid stack type R3",
  1865. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1866. },
  1867. {
  1868. "raw_stack: skb_load_bytes, no init",
  1869. .insns = {
  1870. BPF_MOV64_IMM(BPF_REG_2, 4),
  1871. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1872. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1873. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1874. BPF_MOV64_IMM(BPF_REG_4, 8),
  1875. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1876. BPF_FUNC_skb_load_bytes),
  1877. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1878. BPF_EXIT_INSN(),
  1879. },
  1880. .result = ACCEPT,
  1881. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1882. },
  1883. {
  1884. "raw_stack: skb_load_bytes, init",
  1885. .insns = {
  1886. BPF_MOV64_IMM(BPF_REG_2, 4),
  1887. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1888. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1889. BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
  1890. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1891. BPF_MOV64_IMM(BPF_REG_4, 8),
  1892. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1893. BPF_FUNC_skb_load_bytes),
  1894. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1895. BPF_EXIT_INSN(),
  1896. },
  1897. .result = ACCEPT,
  1898. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1899. },
  1900. {
  1901. "raw_stack: skb_load_bytes, spilled regs around bounds",
  1902. .insns = {
  1903. BPF_MOV64_IMM(BPF_REG_2, 4),
  1904. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1905. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  1906. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
  1907. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
  1908. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1909. BPF_MOV64_IMM(BPF_REG_4, 8),
  1910. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1911. BPF_FUNC_skb_load_bytes),
  1912. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
  1913. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
  1914. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1915. offsetof(struct __sk_buff, mark)),
  1916. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  1917. offsetof(struct __sk_buff, priority)),
  1918. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  1919. BPF_EXIT_INSN(),
  1920. },
  1921. .result = ACCEPT,
  1922. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1923. },
  1924. {
  1925. "raw_stack: skb_load_bytes, spilled regs corruption",
  1926. .insns = {
  1927. BPF_MOV64_IMM(BPF_REG_2, 4),
  1928. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1929. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1930. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1931. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1932. BPF_MOV64_IMM(BPF_REG_4, 8),
  1933. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1934. BPF_FUNC_skb_load_bytes),
  1935. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1936. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1937. offsetof(struct __sk_buff, mark)),
  1938. BPF_EXIT_INSN(),
  1939. },
  1940. .result = REJECT,
  1941. .errstr = "R0 invalid mem access 'inv'",
  1942. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1943. },
  1944. {
  1945. "raw_stack: skb_load_bytes, spilled regs corruption 2",
  1946. .insns = {
  1947. BPF_MOV64_IMM(BPF_REG_2, 4),
  1948. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1949. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  1950. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
  1951. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1952. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
  1953. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1954. BPF_MOV64_IMM(BPF_REG_4, 8),
  1955. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1956. BPF_FUNC_skb_load_bytes),
  1957. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
  1958. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
  1959. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
  1960. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1961. offsetof(struct __sk_buff, mark)),
  1962. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  1963. offsetof(struct __sk_buff, priority)),
  1964. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  1965. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
  1966. offsetof(struct __sk_buff, pkt_type)),
  1967. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
  1968. BPF_EXIT_INSN(),
  1969. },
  1970. .result = REJECT,
  1971. .errstr = "R3 invalid mem access 'inv'",
  1972. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1973. },
  1974. {
  1975. "raw_stack: skb_load_bytes, spilled regs + data",
  1976. .insns = {
  1977. BPF_MOV64_IMM(BPF_REG_2, 4),
  1978. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1979. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  1980. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
  1981. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1982. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
  1983. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1984. BPF_MOV64_IMM(BPF_REG_4, 8),
  1985. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1986. BPF_FUNC_skb_load_bytes),
  1987. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
  1988. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
  1989. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
  1990. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  1991. offsetof(struct __sk_buff, mark)),
  1992. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  1993. offsetof(struct __sk_buff, priority)),
  1994. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  1995. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
  1996. BPF_EXIT_INSN(),
  1997. },
  1998. .result = ACCEPT,
  1999. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2000. },
  2001. {
  2002. "raw_stack: skb_load_bytes, invalid access 1",
  2003. .insns = {
  2004. BPF_MOV64_IMM(BPF_REG_2, 4),
  2005. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2006. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
  2007. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2008. BPF_MOV64_IMM(BPF_REG_4, 8),
  2009. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2010. BPF_FUNC_skb_load_bytes),
  2011. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2012. BPF_EXIT_INSN(),
  2013. },
  2014. .result = REJECT,
  2015. .errstr = "invalid stack type R3 off=-513 access_size=8",
  2016. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2017. },
  2018. {
  2019. "raw_stack: skb_load_bytes, invalid access 2",
  2020. .insns = {
  2021. BPF_MOV64_IMM(BPF_REG_2, 4),
  2022. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2023. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
  2024. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2025. BPF_MOV64_IMM(BPF_REG_4, 8),
  2026. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2027. BPF_FUNC_skb_load_bytes),
  2028. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2029. BPF_EXIT_INSN(),
  2030. },
  2031. .result = REJECT,
  2032. .errstr = "invalid stack type R3 off=-1 access_size=8",
  2033. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2034. },
  2035. {
  2036. "raw_stack: skb_load_bytes, invalid access 3",
  2037. .insns = {
  2038. BPF_MOV64_IMM(BPF_REG_2, 4),
  2039. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2040. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
  2041. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2042. BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
  2043. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2044. BPF_FUNC_skb_load_bytes),
  2045. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2046. BPF_EXIT_INSN(),
  2047. },
  2048. .result = REJECT,
  2049. .errstr = "invalid stack type R3 off=-1 access_size=-1",
  2050. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2051. },
  2052. {
  2053. "raw_stack: skb_load_bytes, invalid access 4",
  2054. .insns = {
  2055. BPF_MOV64_IMM(BPF_REG_2, 4),
  2056. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2057. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
  2058. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2059. BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
  2060. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2061. BPF_FUNC_skb_load_bytes),
  2062. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2063. BPF_EXIT_INSN(),
  2064. },
  2065. .result = REJECT,
  2066. .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
  2067. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2068. },
  2069. {
  2070. "raw_stack: skb_load_bytes, invalid access 5",
  2071. .insns = {
  2072. BPF_MOV64_IMM(BPF_REG_2, 4),
  2073. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2074. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  2075. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2076. BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
  2077. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2078. BPF_FUNC_skb_load_bytes),
  2079. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2080. BPF_EXIT_INSN(),
  2081. },
  2082. .result = REJECT,
  2083. .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
  2084. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2085. },
  2086. {
  2087. "raw_stack: skb_load_bytes, invalid access 6",
  2088. .insns = {
  2089. BPF_MOV64_IMM(BPF_REG_2, 4),
  2090. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2091. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  2092. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2093. BPF_MOV64_IMM(BPF_REG_4, 0),
  2094. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2095. BPF_FUNC_skb_load_bytes),
  2096. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2097. BPF_EXIT_INSN(),
  2098. },
  2099. .result = REJECT,
  2100. .errstr = "invalid stack type R3 off=-512 access_size=0",
  2101. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2102. },
  2103. {
  2104. "raw_stack: skb_load_bytes, large access",
  2105. .insns = {
  2106. BPF_MOV64_IMM(BPF_REG_2, 4),
  2107. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2108. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  2109. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2110. BPF_MOV64_IMM(BPF_REG_4, 512),
  2111. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2112. BPF_FUNC_skb_load_bytes),
  2113. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2114. BPF_EXIT_INSN(),
  2115. },
  2116. .result = ACCEPT,
  2117. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2118. },
  2119. {
  2120. "direct packet access: test1",
  2121. .insns = {
  2122. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2123. offsetof(struct __sk_buff, data)),
  2124. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2125. offsetof(struct __sk_buff, data_end)),
  2126. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2127. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2128. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2129. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2130. BPF_MOV64_IMM(BPF_REG_0, 0),
  2131. BPF_EXIT_INSN(),
  2132. },
  2133. .result = ACCEPT,
  2134. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2135. },
  2136. {
  2137. "direct packet access: test2",
  2138. .insns = {
  2139. BPF_MOV64_IMM(BPF_REG_0, 1),
  2140. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
  2141. offsetof(struct __sk_buff, data_end)),
  2142. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2143. offsetof(struct __sk_buff, data)),
  2144. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  2145. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  2146. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
  2147. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
  2148. BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
  2149. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
  2150. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2151. offsetof(struct __sk_buff, data)),
  2152. BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
  2153. BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
  2154. BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
  2155. BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
  2156. BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
  2157. BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
  2158. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
  2159. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  2160. offsetof(struct __sk_buff, data_end)),
  2161. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  2162. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
  2163. BPF_MOV64_IMM(BPF_REG_0, 0),
  2164. BPF_EXIT_INSN(),
  2165. },
  2166. .result = ACCEPT,
  2167. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2168. },
  2169. {
  2170. "direct packet access: test3",
  2171. .insns = {
  2172. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2173. offsetof(struct __sk_buff, data)),
  2174. BPF_MOV64_IMM(BPF_REG_0, 0),
  2175. BPF_EXIT_INSN(),
  2176. },
  2177. .errstr = "invalid bpf_context access off=76",
  2178. .result = REJECT,
  2179. .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
  2180. },
  2181. {
  2182. "direct packet access: test4 (write)",
  2183. .insns = {
  2184. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2185. offsetof(struct __sk_buff, data)),
  2186. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2187. offsetof(struct __sk_buff, data_end)),
  2188. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2189. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2190. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2191. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  2192. BPF_MOV64_IMM(BPF_REG_0, 0),
  2193. BPF_EXIT_INSN(),
  2194. },
  2195. .result = ACCEPT,
  2196. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2197. },
  2198. {
  2199. "direct packet access: test5 (pkt_end >= reg, good access)",
  2200. .insns = {
  2201. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2202. offsetof(struct __sk_buff, data)),
  2203. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2204. offsetof(struct __sk_buff, data_end)),
  2205. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2206. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2207. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
  2208. BPF_MOV64_IMM(BPF_REG_0, 1),
  2209. BPF_EXIT_INSN(),
  2210. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2211. BPF_MOV64_IMM(BPF_REG_0, 0),
  2212. BPF_EXIT_INSN(),
  2213. },
  2214. .result = ACCEPT,
  2215. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2216. },
  2217. {
  2218. "direct packet access: test6 (pkt_end >= reg, bad access)",
  2219. .insns = {
  2220. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2221. offsetof(struct __sk_buff, data)),
  2222. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2223. offsetof(struct __sk_buff, data_end)),
  2224. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2225. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2226. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
  2227. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2228. BPF_MOV64_IMM(BPF_REG_0, 1),
  2229. BPF_EXIT_INSN(),
  2230. BPF_MOV64_IMM(BPF_REG_0, 0),
  2231. BPF_EXIT_INSN(),
  2232. },
  2233. .errstr = "invalid access to packet",
  2234. .result = REJECT,
  2235. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2236. },
  2237. {
  2238. "direct packet access: test7 (pkt_end >= reg, both accesses)",
  2239. .insns = {
  2240. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2241. offsetof(struct __sk_buff, data)),
  2242. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2243. offsetof(struct __sk_buff, data_end)),
  2244. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2245. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2246. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
  2247. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2248. BPF_MOV64_IMM(BPF_REG_0, 1),
  2249. BPF_EXIT_INSN(),
  2250. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2251. BPF_MOV64_IMM(BPF_REG_0, 0),
  2252. BPF_EXIT_INSN(),
  2253. },
  2254. .errstr = "invalid access to packet",
  2255. .result = REJECT,
  2256. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2257. },
  2258. {
  2259. "direct packet access: test8 (double test, variant 1)",
  2260. .insns = {
  2261. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2262. offsetof(struct __sk_buff, data)),
  2263. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2264. offsetof(struct __sk_buff, data_end)),
  2265. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2266. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2267. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
  2268. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2269. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2270. BPF_MOV64_IMM(BPF_REG_0, 1),
  2271. BPF_EXIT_INSN(),
  2272. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2273. BPF_MOV64_IMM(BPF_REG_0, 0),
  2274. BPF_EXIT_INSN(),
  2275. },
  2276. .result = ACCEPT,
  2277. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2278. },
  2279. {
  2280. "direct packet access: test9 (double test, variant 2)",
  2281. .insns = {
  2282. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2283. offsetof(struct __sk_buff, data)),
  2284. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2285. offsetof(struct __sk_buff, data_end)),
  2286. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2287. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2288. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
  2289. BPF_MOV64_IMM(BPF_REG_0, 1),
  2290. BPF_EXIT_INSN(),
  2291. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2292. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2293. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2294. BPF_MOV64_IMM(BPF_REG_0, 0),
  2295. BPF_EXIT_INSN(),
  2296. },
  2297. .result = ACCEPT,
  2298. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2299. },
  2300. {
  2301. "direct packet access: test10 (write invalid)",
  2302. .insns = {
  2303. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2304. offsetof(struct __sk_buff, data)),
  2305. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2306. offsetof(struct __sk_buff, data_end)),
  2307. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2308. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2309. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
  2310. BPF_MOV64_IMM(BPF_REG_0, 0),
  2311. BPF_EXIT_INSN(),
  2312. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  2313. BPF_MOV64_IMM(BPF_REG_0, 0),
  2314. BPF_EXIT_INSN(),
  2315. },
  2316. .errstr = "invalid access to packet",
  2317. .result = REJECT,
  2318. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2319. },
  2320. {
  2321. "direct packet access: test11 (shift, good access)",
  2322. .insns = {
  2323. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2324. offsetof(struct __sk_buff, data)),
  2325. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2326. offsetof(struct __sk_buff, data_end)),
  2327. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2328. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
  2329. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
  2330. BPF_MOV64_IMM(BPF_REG_3, 144),
  2331. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  2332. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
  2333. BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
  2334. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  2335. BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
  2336. BPF_MOV64_IMM(BPF_REG_0, 1),
  2337. BPF_EXIT_INSN(),
  2338. BPF_MOV64_IMM(BPF_REG_0, 0),
  2339. BPF_EXIT_INSN(),
  2340. },
  2341. .result = ACCEPT,
  2342. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2343. },
  2344. {
  2345. "direct packet access: test12 (and, good access)",
  2346. .insns = {
  2347. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2348. offsetof(struct __sk_buff, data)),
  2349. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2350. offsetof(struct __sk_buff, data_end)),
  2351. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2352. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
  2353. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
  2354. BPF_MOV64_IMM(BPF_REG_3, 144),
  2355. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  2356. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
  2357. BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
  2358. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  2359. BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
  2360. BPF_MOV64_IMM(BPF_REG_0, 1),
  2361. BPF_EXIT_INSN(),
  2362. BPF_MOV64_IMM(BPF_REG_0, 0),
  2363. BPF_EXIT_INSN(),
  2364. },
  2365. .result = ACCEPT,
  2366. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2367. },
  2368. {
  2369. "direct packet access: test13 (branches, good access)",
  2370. .insns = {
  2371. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2372. offsetof(struct __sk_buff, data)),
  2373. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2374. offsetof(struct __sk_buff, data_end)),
  2375. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2376. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
  2377. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
  2378. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2379. offsetof(struct __sk_buff, mark)),
  2380. BPF_MOV64_IMM(BPF_REG_4, 1),
  2381. BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
  2382. BPF_MOV64_IMM(BPF_REG_3, 14),
  2383. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  2384. BPF_MOV64_IMM(BPF_REG_3, 24),
  2385. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  2386. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
  2387. BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
  2388. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  2389. BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
  2390. BPF_MOV64_IMM(BPF_REG_0, 1),
  2391. BPF_EXIT_INSN(),
  2392. BPF_MOV64_IMM(BPF_REG_0, 0),
  2393. BPF_EXIT_INSN(),
  2394. },
  2395. .result = ACCEPT,
  2396. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2397. },
  2398. {
  2399. "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
  2400. .insns = {
  2401. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2402. offsetof(struct __sk_buff, data)),
  2403. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2404. offsetof(struct __sk_buff, data_end)),
  2405. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2406. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
  2407. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
  2408. BPF_MOV64_IMM(BPF_REG_5, 12),
  2409. BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
  2410. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  2411. BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
  2412. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
  2413. BPF_MOV64_IMM(BPF_REG_0, 1),
  2414. BPF_EXIT_INSN(),
  2415. BPF_MOV64_IMM(BPF_REG_0, 0),
  2416. BPF_EXIT_INSN(),
  2417. },
  2418. .result = ACCEPT,
  2419. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2420. },
  2421. {
  2422. "helper access to packet: test1, valid packet_ptr range",
  2423. .insns = {
  2424. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2425. offsetof(struct xdp_md, data)),
  2426. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2427. offsetof(struct xdp_md, data_end)),
  2428. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  2429. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  2430. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
  2431. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2432. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  2433. BPF_MOV64_IMM(BPF_REG_4, 0),
  2434. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2435. BPF_FUNC_map_update_elem),
  2436. BPF_MOV64_IMM(BPF_REG_0, 0),
  2437. BPF_EXIT_INSN(),
  2438. },
  2439. .fixup_map1 = { 5 },
  2440. .result_unpriv = ACCEPT,
  2441. .result = ACCEPT,
  2442. .prog_type = BPF_PROG_TYPE_XDP,
  2443. },
  2444. {
  2445. "helper access to packet: test2, unchecked packet_ptr",
  2446. .insns = {
  2447. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2448. offsetof(struct xdp_md, data)),
  2449. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2450. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2451. BPF_FUNC_map_lookup_elem),
  2452. BPF_MOV64_IMM(BPF_REG_0, 0),
  2453. BPF_EXIT_INSN(),
  2454. },
  2455. .fixup_map1 = { 1 },
  2456. .result = REJECT,
  2457. .errstr = "invalid access to packet",
  2458. .prog_type = BPF_PROG_TYPE_XDP,
  2459. },
  2460. {
  2461. "helper access to packet: test3, variable add",
  2462. .insns = {
  2463. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2464. offsetof(struct xdp_md, data)),
  2465. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2466. offsetof(struct xdp_md, data_end)),
  2467. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2468. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
  2469. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
  2470. BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
  2471. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2472. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
  2473. BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
  2474. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
  2475. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
  2476. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2477. BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
  2478. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2479. BPF_FUNC_map_lookup_elem),
  2480. BPF_MOV64_IMM(BPF_REG_0, 0),
  2481. BPF_EXIT_INSN(),
  2482. },
  2483. .fixup_map1 = { 11 },
  2484. .result = ACCEPT,
  2485. .prog_type = BPF_PROG_TYPE_XDP,
  2486. },
  2487. {
  2488. "helper access to packet: test4, packet_ptr with bad range",
  2489. .insns = {
  2490. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2491. offsetof(struct xdp_md, data)),
  2492. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2493. offsetof(struct xdp_md, data_end)),
  2494. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2495. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  2496. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
  2497. BPF_MOV64_IMM(BPF_REG_0, 0),
  2498. BPF_EXIT_INSN(),
  2499. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2500. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2501. BPF_FUNC_map_lookup_elem),
  2502. BPF_MOV64_IMM(BPF_REG_0, 0),
  2503. BPF_EXIT_INSN(),
  2504. },
  2505. .fixup_map1 = { 7 },
  2506. .result = REJECT,
  2507. .errstr = "invalid access to packet",
  2508. .prog_type = BPF_PROG_TYPE_XDP,
  2509. },
  2510. {
  2511. "helper access to packet: test5, packet_ptr with too short range",
  2512. .insns = {
  2513. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2514. offsetof(struct xdp_md, data)),
  2515. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2516. offsetof(struct xdp_md, data_end)),
  2517. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  2518. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2519. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
  2520. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
  2521. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2522. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2523. BPF_FUNC_map_lookup_elem),
  2524. BPF_MOV64_IMM(BPF_REG_0, 0),
  2525. BPF_EXIT_INSN(),
  2526. },
  2527. .fixup_map1 = { 6 },
  2528. .result = REJECT,
  2529. .errstr = "invalid access to packet",
  2530. .prog_type = BPF_PROG_TYPE_XDP,
  2531. },
  2532. {
  2533. "helper access to packet: test6, cls valid packet_ptr range",
  2534. .insns = {
  2535. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2536. offsetof(struct __sk_buff, data)),
  2537. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2538. offsetof(struct __sk_buff, data_end)),
  2539. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  2540. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  2541. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
  2542. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2543. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  2544. BPF_MOV64_IMM(BPF_REG_4, 0),
  2545. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2546. BPF_FUNC_map_update_elem),
  2547. BPF_MOV64_IMM(BPF_REG_0, 0),
  2548. BPF_EXIT_INSN(),
  2549. },
  2550. .fixup_map1 = { 5 },
  2551. .result = ACCEPT,
  2552. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2553. },
  2554. {
  2555. "helper access to packet: test7, cls unchecked packet_ptr",
  2556. .insns = {
  2557. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2558. offsetof(struct __sk_buff, data)),
  2559. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2560. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2561. BPF_FUNC_map_lookup_elem),
  2562. BPF_MOV64_IMM(BPF_REG_0, 0),
  2563. BPF_EXIT_INSN(),
  2564. },
  2565. .fixup_map1 = { 1 },
  2566. .result = REJECT,
  2567. .errstr = "invalid access to packet",
  2568. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2569. },
  2570. {
  2571. "helper access to packet: test8, cls variable add",
  2572. .insns = {
  2573. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2574. offsetof(struct __sk_buff, data)),
  2575. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2576. offsetof(struct __sk_buff, data_end)),
  2577. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2578. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
  2579. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
  2580. BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
  2581. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2582. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
  2583. BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
  2584. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
  2585. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
  2586. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2587. BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
  2588. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2589. BPF_FUNC_map_lookup_elem),
  2590. BPF_MOV64_IMM(BPF_REG_0, 0),
  2591. BPF_EXIT_INSN(),
  2592. },
  2593. .fixup_map1 = { 11 },
  2594. .result = ACCEPT,
  2595. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2596. },
  2597. {
  2598. "helper access to packet: test9, cls packet_ptr with bad range",
  2599. .insns = {
  2600. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2601. offsetof(struct __sk_buff, data)),
  2602. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2603. offsetof(struct __sk_buff, data_end)),
  2604. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2605. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  2606. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
  2607. BPF_MOV64_IMM(BPF_REG_0, 0),
  2608. BPF_EXIT_INSN(),
  2609. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2610. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2611. BPF_FUNC_map_lookup_elem),
  2612. BPF_MOV64_IMM(BPF_REG_0, 0),
  2613. BPF_EXIT_INSN(),
  2614. },
  2615. .fixup_map1 = { 7 },
  2616. .result = REJECT,
  2617. .errstr = "invalid access to packet",
  2618. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2619. },
  2620. {
  2621. "helper access to packet: test10, cls packet_ptr with too short range",
  2622. .insns = {
  2623. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2624. offsetof(struct __sk_buff, data)),
  2625. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2626. offsetof(struct __sk_buff, data_end)),
  2627. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  2628. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2629. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
  2630. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
  2631. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2632. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2633. BPF_FUNC_map_lookup_elem),
  2634. BPF_MOV64_IMM(BPF_REG_0, 0),
  2635. BPF_EXIT_INSN(),
  2636. },
  2637. .fixup_map1 = { 6 },
  2638. .result = REJECT,
  2639. .errstr = "invalid access to packet",
  2640. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2641. },
  2642. {
  2643. "helper access to packet: test11, cls unsuitable helper 1",
  2644. .insns = {
  2645. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2646. offsetof(struct __sk_buff, data)),
  2647. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2648. offsetof(struct __sk_buff, data_end)),
  2649. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2650. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2651. BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
  2652. BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
  2653. BPF_MOV64_IMM(BPF_REG_2, 0),
  2654. BPF_MOV64_IMM(BPF_REG_4, 42),
  2655. BPF_MOV64_IMM(BPF_REG_5, 0),
  2656. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2657. BPF_FUNC_skb_store_bytes),
  2658. BPF_MOV64_IMM(BPF_REG_0, 0),
  2659. BPF_EXIT_INSN(),
  2660. },
  2661. .result = REJECT,
  2662. .errstr = "helper access to the packet",
  2663. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2664. },
  2665. {
  2666. "helper access to packet: test12, cls unsuitable helper 2",
  2667. .insns = {
  2668. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2669. offsetof(struct __sk_buff, data)),
  2670. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2671. offsetof(struct __sk_buff, data_end)),
  2672. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2673. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
  2674. BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
  2675. BPF_MOV64_IMM(BPF_REG_2, 0),
  2676. BPF_MOV64_IMM(BPF_REG_4, 4),
  2677. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2678. BPF_FUNC_skb_load_bytes),
  2679. BPF_MOV64_IMM(BPF_REG_0, 0),
  2680. BPF_EXIT_INSN(),
  2681. },
  2682. .result = REJECT,
  2683. .errstr = "helper access to the packet",
  2684. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2685. },
  2686. {
  2687. "helper access to packet: test13, cls helper ok",
  2688. .insns = {
  2689. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2690. offsetof(struct __sk_buff, data)),
  2691. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2692. offsetof(struct __sk_buff, data_end)),
  2693. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2694. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2695. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2696. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2697. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2698. BPF_MOV64_IMM(BPF_REG_2, 4),
  2699. BPF_MOV64_IMM(BPF_REG_3, 0),
  2700. BPF_MOV64_IMM(BPF_REG_4, 0),
  2701. BPF_MOV64_IMM(BPF_REG_5, 0),
  2702. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2703. BPF_FUNC_csum_diff),
  2704. BPF_MOV64_IMM(BPF_REG_0, 0),
  2705. BPF_EXIT_INSN(),
  2706. },
  2707. .result = ACCEPT,
  2708. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2709. },
  2710. {
  2711. "helper access to packet: test14, cls helper fail sub",
  2712. .insns = {
  2713. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2714. offsetof(struct __sk_buff, data)),
  2715. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2716. offsetof(struct __sk_buff, data_end)),
  2717. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2718. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2719. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2720. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2721. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
  2722. BPF_MOV64_IMM(BPF_REG_2, 4),
  2723. BPF_MOV64_IMM(BPF_REG_3, 0),
  2724. BPF_MOV64_IMM(BPF_REG_4, 0),
  2725. BPF_MOV64_IMM(BPF_REG_5, 0),
  2726. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2727. BPF_FUNC_csum_diff),
  2728. BPF_MOV64_IMM(BPF_REG_0, 0),
  2729. BPF_EXIT_INSN(),
  2730. },
  2731. .result = REJECT,
  2732. .errstr = "type=inv expected=fp",
  2733. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2734. },
  2735. {
  2736. "helper access to packet: test15, cls helper fail range 1",
  2737. .insns = {
  2738. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2739. offsetof(struct __sk_buff, data)),
  2740. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2741. offsetof(struct __sk_buff, data_end)),
  2742. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2743. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2744. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2745. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2746. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2747. BPF_MOV64_IMM(BPF_REG_2, 8),
  2748. BPF_MOV64_IMM(BPF_REG_3, 0),
  2749. BPF_MOV64_IMM(BPF_REG_4, 0),
  2750. BPF_MOV64_IMM(BPF_REG_5, 0),
  2751. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2752. BPF_FUNC_csum_diff),
  2753. BPF_MOV64_IMM(BPF_REG_0, 0),
  2754. BPF_EXIT_INSN(),
  2755. },
  2756. .result = REJECT,
  2757. .errstr = "invalid access to packet",
  2758. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2759. },
  2760. {
  2761. "helper access to packet: test16, cls helper fail range 2",
  2762. .insns = {
  2763. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2764. offsetof(struct __sk_buff, data)),
  2765. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2766. offsetof(struct __sk_buff, data_end)),
  2767. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2768. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2769. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2770. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2771. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2772. BPF_MOV64_IMM(BPF_REG_2, -9),
  2773. BPF_MOV64_IMM(BPF_REG_3, 0),
  2774. BPF_MOV64_IMM(BPF_REG_4, 0),
  2775. BPF_MOV64_IMM(BPF_REG_5, 0),
  2776. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2777. BPF_FUNC_csum_diff),
  2778. BPF_MOV64_IMM(BPF_REG_0, 0),
  2779. BPF_EXIT_INSN(),
  2780. },
  2781. .result = REJECT,
  2782. .errstr = "invalid access to packet",
  2783. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2784. },
  2785. {
  2786. "helper access to packet: test17, cls helper fail range 3",
  2787. .insns = {
  2788. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2789. offsetof(struct __sk_buff, data)),
  2790. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2791. offsetof(struct __sk_buff, data_end)),
  2792. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2793. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2794. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2795. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2796. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2797. BPF_MOV64_IMM(BPF_REG_2, ~0),
  2798. BPF_MOV64_IMM(BPF_REG_3, 0),
  2799. BPF_MOV64_IMM(BPF_REG_4, 0),
  2800. BPF_MOV64_IMM(BPF_REG_5, 0),
  2801. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2802. BPF_FUNC_csum_diff),
  2803. BPF_MOV64_IMM(BPF_REG_0, 0),
  2804. BPF_EXIT_INSN(),
  2805. },
  2806. .result = REJECT,
  2807. .errstr = "invalid access to packet",
  2808. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2809. },
  2810. {
  2811. "helper access to packet: test18, cls helper fail range zero",
  2812. .insns = {
  2813. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2814. offsetof(struct __sk_buff, data)),
  2815. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2816. offsetof(struct __sk_buff, data_end)),
  2817. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2818. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2819. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2820. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2821. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2822. BPF_MOV64_IMM(BPF_REG_2, 0),
  2823. BPF_MOV64_IMM(BPF_REG_3, 0),
  2824. BPF_MOV64_IMM(BPF_REG_4, 0),
  2825. BPF_MOV64_IMM(BPF_REG_5, 0),
  2826. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2827. BPF_FUNC_csum_diff),
  2828. BPF_MOV64_IMM(BPF_REG_0, 0),
  2829. BPF_EXIT_INSN(),
  2830. },
  2831. .result = REJECT,
  2832. .errstr = "invalid access to packet",
  2833. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2834. },
  2835. {
  2836. "helper access to packet: test19, pkt end as input",
  2837. .insns = {
  2838. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2839. offsetof(struct __sk_buff, data)),
  2840. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2841. offsetof(struct __sk_buff, data_end)),
  2842. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2843. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2844. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2845. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2846. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  2847. BPF_MOV64_IMM(BPF_REG_2, 4),
  2848. BPF_MOV64_IMM(BPF_REG_3, 0),
  2849. BPF_MOV64_IMM(BPF_REG_4, 0),
  2850. BPF_MOV64_IMM(BPF_REG_5, 0),
  2851. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2852. BPF_FUNC_csum_diff),
  2853. BPF_MOV64_IMM(BPF_REG_0, 0),
  2854. BPF_EXIT_INSN(),
  2855. },
  2856. .result = REJECT,
  2857. .errstr = "R1 type=pkt_end expected=fp",
  2858. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2859. },
  2860. {
  2861. "helper access to packet: test20, wrong reg",
  2862. .insns = {
  2863. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  2864. offsetof(struct __sk_buff, data)),
  2865. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2866. offsetof(struct __sk_buff, data_end)),
  2867. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  2868. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  2869. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  2870. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  2871. BPF_MOV64_IMM(BPF_REG_2, 4),
  2872. BPF_MOV64_IMM(BPF_REG_3, 0),
  2873. BPF_MOV64_IMM(BPF_REG_4, 0),
  2874. BPF_MOV64_IMM(BPF_REG_5, 0),
  2875. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2876. BPF_FUNC_csum_diff),
  2877. BPF_MOV64_IMM(BPF_REG_0, 0),
  2878. BPF_EXIT_INSN(),
  2879. },
  2880. .result = REJECT,
  2881. .errstr = "invalid access to packet",
  2882. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2883. },
  2884. {
  2885. "valid map access into an array with a constant",
  2886. .insns = {
  2887. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2888. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2889. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2890. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2891. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2892. BPF_FUNC_map_lookup_elem),
  2893. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  2894. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  2895. offsetof(struct test_val, foo)),
  2896. BPF_EXIT_INSN(),
  2897. },
  2898. .fixup_map2 = { 3 },
  2899. .errstr_unpriv = "R0 leaks addr",
  2900. .result_unpriv = REJECT,
  2901. .result = ACCEPT,
  2902. },
  2903. {
  2904. "valid map access into an array with a register",
  2905. .insns = {
  2906. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2907. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2908. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2909. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2910. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2911. BPF_FUNC_map_lookup_elem),
  2912. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  2913. BPF_MOV64_IMM(BPF_REG_1, 4),
  2914. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  2915. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2916. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  2917. offsetof(struct test_val, foo)),
  2918. BPF_EXIT_INSN(),
  2919. },
  2920. .fixup_map2 = { 3 },
  2921. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  2922. .result_unpriv = REJECT,
  2923. .result = ACCEPT,
  2924. },
  2925. {
  2926. "valid map access into an array with a variable",
  2927. .insns = {
  2928. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2929. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2930. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2931. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2932. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2933. BPF_FUNC_map_lookup_elem),
  2934. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  2935. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  2936. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
  2937. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  2938. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2939. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  2940. offsetof(struct test_val, foo)),
  2941. BPF_EXIT_INSN(),
  2942. },
  2943. .fixup_map2 = { 3 },
  2944. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  2945. .result_unpriv = REJECT,
  2946. .result = ACCEPT,
  2947. },
  2948. {
  2949. "valid map access into an array with a signed variable",
  2950. .insns = {
  2951. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2952. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2953. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2954. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2955. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2956. BPF_FUNC_map_lookup_elem),
  2957. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  2958. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  2959. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
  2960. BPF_MOV32_IMM(BPF_REG_1, 0),
  2961. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
  2962. BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
  2963. BPF_MOV32_IMM(BPF_REG_1, 0),
  2964. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  2965. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  2966. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  2967. offsetof(struct test_val, foo)),
  2968. BPF_EXIT_INSN(),
  2969. },
  2970. .fixup_map2 = { 3 },
  2971. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  2972. .result_unpriv = REJECT,
  2973. .result = ACCEPT,
  2974. },
  2975. {
  2976. "invalid map access into an array with a constant",
  2977. .insns = {
  2978. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2979. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2980. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2981. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2982. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2983. BPF_FUNC_map_lookup_elem),
  2984. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  2985. BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
  2986. offsetof(struct test_val, foo)),
  2987. BPF_EXIT_INSN(),
  2988. },
  2989. .fixup_map2 = { 3 },
  2990. .errstr = "invalid access to map value, value_size=48 off=48 size=8",
  2991. .result = REJECT,
  2992. },
  2993. {
  2994. "invalid map access into an array with a register",
  2995. .insns = {
  2996. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2997. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2998. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2999. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3000. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3001. BPF_FUNC_map_lookup_elem),
  3002. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  3003. BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
  3004. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  3005. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  3006. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  3007. offsetof(struct test_val, foo)),
  3008. BPF_EXIT_INSN(),
  3009. },
  3010. .fixup_map2 = { 3 },
  3011. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3012. .errstr = "R0 min value is outside of the array range",
  3013. .result_unpriv = REJECT,
  3014. .result = REJECT,
  3015. },
  3016. {
  3017. "invalid map access into an array with a variable",
  3018. .insns = {
  3019. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3020. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3021. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3022. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3023. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3024. BPF_FUNC_map_lookup_elem),
  3025. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  3026. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  3027. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  3028. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  3029. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  3030. offsetof(struct test_val, foo)),
  3031. BPF_EXIT_INSN(),
  3032. },
  3033. .fixup_map2 = { 3 },
  3034. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3035. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  3036. .result_unpriv = REJECT,
  3037. .result = REJECT,
  3038. },
  3039. {
  3040. "invalid map access into an array with no floor check",
  3041. .insns = {
  3042. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3043. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3044. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3045. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3046. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3047. BPF_FUNC_map_lookup_elem),
  3048. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  3049. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  3050. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
  3051. BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
  3052. BPF_MOV32_IMM(BPF_REG_1, 0),
  3053. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  3054. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  3055. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  3056. offsetof(struct test_val, foo)),
  3057. BPF_EXIT_INSN(),
  3058. },
  3059. .fixup_map2 = { 3 },
  3060. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3061. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  3062. .result_unpriv = REJECT,
  3063. .result = REJECT,
  3064. },
  3065. {
  3066. "invalid map access into an array with a invalid max check",
  3067. .insns = {
  3068. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3069. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3070. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3071. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3072. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3073. BPF_FUNC_map_lookup_elem),
  3074. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  3075. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  3076. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
  3077. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  3078. BPF_MOV32_IMM(BPF_REG_1, 0),
  3079. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  3080. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  3081. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  3082. offsetof(struct test_val, foo)),
  3083. BPF_EXIT_INSN(),
  3084. },
  3085. .fixup_map2 = { 3 },
  3086. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3087. .errstr = "invalid access to map value, value_size=48 off=44 size=8",
  3088. .result_unpriv = REJECT,
  3089. .result = REJECT,
  3090. },
  3091. {
  3092. "invalid map access into an array with a invalid max check",
  3093. .insns = {
  3094. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3095. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3096. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3097. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3098. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3099. BPF_FUNC_map_lookup_elem),
  3100. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
  3101. BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
  3102. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3103. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3104. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3105. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3106. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3107. BPF_FUNC_map_lookup_elem),
  3108. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  3109. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
  3110. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  3111. offsetof(struct test_val, foo)),
  3112. BPF_EXIT_INSN(),
  3113. },
  3114. .fixup_map2 = { 3, 11 },
  3115. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3116. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  3117. .result_unpriv = REJECT,
  3118. .result = REJECT,
  3119. },
  3120. {
  3121. "multiple registers share map_lookup_elem result",
  3122. .insns = {
  3123. BPF_MOV64_IMM(BPF_REG_1, 10),
  3124. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  3125. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3126. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3127. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3128. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3129. BPF_FUNC_map_lookup_elem),
  3130. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  3131. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  3132. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  3133. BPF_EXIT_INSN(),
  3134. },
  3135. .fixup_map1 = { 4 },
  3136. .result = ACCEPT,
  3137. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  3138. },
  3139. {
  3140. "invalid memory access with multiple map_lookup_elem calls",
  3141. .insns = {
  3142. BPF_MOV64_IMM(BPF_REG_1, 10),
  3143. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  3144. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3145. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3146. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3147. BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
  3148. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  3149. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3150. BPF_FUNC_map_lookup_elem),
  3151. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  3152. BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
  3153. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  3154. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3155. BPF_FUNC_map_lookup_elem),
  3156. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  3157. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  3158. BPF_EXIT_INSN(),
  3159. },
  3160. .fixup_map1 = { 4 },
  3161. .result = REJECT,
  3162. .errstr = "R4 !read_ok",
  3163. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  3164. },
  3165. {
  3166. "valid indirect map_lookup_elem access with 2nd lookup in branch",
  3167. .insns = {
  3168. BPF_MOV64_IMM(BPF_REG_1, 10),
  3169. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  3170. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3171. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3172. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3173. BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
  3174. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  3175. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3176. BPF_FUNC_map_lookup_elem),
  3177. BPF_MOV64_IMM(BPF_REG_2, 10),
  3178. BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
  3179. BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
  3180. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  3181. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3182. BPF_FUNC_map_lookup_elem),
  3183. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  3184. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  3185. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  3186. BPF_EXIT_INSN(),
  3187. },
  3188. .fixup_map1 = { 4 },
  3189. .result = ACCEPT,
  3190. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  3191. },
  3192. {
  3193. "multiple registers share map_lookup_elem bad reg type",
  3194. .insns = {
  3195. BPF_MOV64_IMM(BPF_REG_1, 10),
  3196. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  3197. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3198. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3199. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3200. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3201. BPF_FUNC_map_lookup_elem),
  3202. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  3203. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  3204. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  3205. BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
  3206. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  3207. BPF_MOV64_IMM(BPF_REG_1, 1),
  3208. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  3209. BPF_MOV64_IMM(BPF_REG_1, 2),
  3210. BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
  3211. BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
  3212. BPF_MOV64_IMM(BPF_REG_1, 3),
  3213. BPF_EXIT_INSN(),
  3214. },
  3215. .fixup_map1 = { 4 },
  3216. .result = REJECT,
  3217. .errstr = "R3 invalid mem access 'inv'",
  3218. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  3219. },
  3220. {
  3221. "invalid map access from else condition",
  3222. .insns = {
  3223. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3224. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3225. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3226. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3227. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  3228. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  3229. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  3230. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
  3231. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
  3232. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  3233. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  3234. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
  3235. BPF_EXIT_INSN(),
  3236. },
  3237. .fixup_map2 = { 3 },
  3238. .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
  3239. .result = REJECT,
  3240. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3241. .result_unpriv = REJECT,
  3242. },
  3243. {
  3244. "constant register |= constant should keep constant type",
  3245. .insns = {
  3246. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  3247. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  3248. BPF_MOV64_IMM(BPF_REG_2, 34),
  3249. BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
  3250. BPF_MOV64_IMM(BPF_REG_3, 0),
  3251. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3252. BPF_EXIT_INSN(),
  3253. },
  3254. .result = ACCEPT,
  3255. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3256. },
  3257. {
  3258. "constant register |= constant should not bypass stack boundary checks",
  3259. .insns = {
  3260. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  3261. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  3262. BPF_MOV64_IMM(BPF_REG_2, 34),
  3263. BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
  3264. BPF_MOV64_IMM(BPF_REG_3, 0),
  3265. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3266. BPF_EXIT_INSN(),
  3267. },
  3268. .errstr = "invalid stack type R1 off=-48 access_size=58",
  3269. .result = REJECT,
  3270. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3271. },
  3272. {
  3273. "constant register |= constant register should keep constant type",
  3274. .insns = {
  3275. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  3276. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  3277. BPF_MOV64_IMM(BPF_REG_2, 34),
  3278. BPF_MOV64_IMM(BPF_REG_4, 13),
  3279. BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
  3280. BPF_MOV64_IMM(BPF_REG_3, 0),
  3281. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3282. BPF_EXIT_INSN(),
  3283. },
  3284. .result = ACCEPT,
  3285. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3286. },
  3287. {
  3288. "constant register |= constant register should not bypass stack boundary checks",
  3289. .insns = {
  3290. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  3291. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  3292. BPF_MOV64_IMM(BPF_REG_2, 34),
  3293. BPF_MOV64_IMM(BPF_REG_4, 24),
  3294. BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
  3295. BPF_MOV64_IMM(BPF_REG_3, 0),
  3296. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3297. BPF_EXIT_INSN(),
  3298. },
  3299. .errstr = "invalid stack type R1 off=-48 access_size=58",
  3300. .result = REJECT,
  3301. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3302. },
  3303. {
  3304. "invalid direct packet write for LWT_IN",
  3305. .insns = {
  3306. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3307. offsetof(struct __sk_buff, data)),
  3308. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3309. offsetof(struct __sk_buff, data_end)),
  3310. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3311. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3312. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3313. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  3314. BPF_MOV64_IMM(BPF_REG_0, 0),
  3315. BPF_EXIT_INSN(),
  3316. },
  3317. .errstr = "cannot write into packet",
  3318. .result = REJECT,
  3319. .prog_type = BPF_PROG_TYPE_LWT_IN,
  3320. },
  3321. {
  3322. "invalid direct packet write for LWT_OUT",
  3323. .insns = {
  3324. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3325. offsetof(struct __sk_buff, data)),
  3326. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3327. offsetof(struct __sk_buff, data_end)),
  3328. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3329. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3330. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3331. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  3332. BPF_MOV64_IMM(BPF_REG_0, 0),
  3333. BPF_EXIT_INSN(),
  3334. },
  3335. .errstr = "cannot write into packet",
  3336. .result = REJECT,
  3337. .prog_type = BPF_PROG_TYPE_LWT_OUT,
  3338. },
  3339. {
  3340. "direct packet write for LWT_XMIT",
  3341. .insns = {
  3342. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3343. offsetof(struct __sk_buff, data)),
  3344. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3345. offsetof(struct __sk_buff, data_end)),
  3346. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3347. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3348. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3349. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  3350. BPF_MOV64_IMM(BPF_REG_0, 0),
  3351. BPF_EXIT_INSN(),
  3352. },
  3353. .result = ACCEPT,
  3354. .prog_type = BPF_PROG_TYPE_LWT_XMIT,
  3355. },
  3356. {
  3357. "direct packet read for LWT_IN",
  3358. .insns = {
  3359. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3360. offsetof(struct __sk_buff, data)),
  3361. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3362. offsetof(struct __sk_buff, data_end)),
  3363. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3364. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3365. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3366. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3367. BPF_MOV64_IMM(BPF_REG_0, 0),
  3368. BPF_EXIT_INSN(),
  3369. },
  3370. .result = ACCEPT,
  3371. .prog_type = BPF_PROG_TYPE_LWT_IN,
  3372. },
  3373. {
  3374. "direct packet read for LWT_OUT",
  3375. .insns = {
  3376. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3377. offsetof(struct __sk_buff, data)),
  3378. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3379. offsetof(struct __sk_buff, data_end)),
  3380. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3381. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3382. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3383. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3384. BPF_MOV64_IMM(BPF_REG_0, 0),
  3385. BPF_EXIT_INSN(),
  3386. },
  3387. .result = ACCEPT,
  3388. .prog_type = BPF_PROG_TYPE_LWT_OUT,
  3389. },
  3390. {
  3391. "direct packet read for LWT_XMIT",
  3392. .insns = {
  3393. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3394. offsetof(struct __sk_buff, data)),
  3395. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3396. offsetof(struct __sk_buff, data_end)),
  3397. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3398. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3399. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3400. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3401. BPF_MOV64_IMM(BPF_REG_0, 0),
  3402. BPF_EXIT_INSN(),
  3403. },
  3404. .result = ACCEPT,
  3405. .prog_type = BPF_PROG_TYPE_LWT_XMIT,
  3406. },
  3407. {
  3408. "invalid access of tc_classid for LWT_IN",
  3409. .insns = {
  3410. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  3411. offsetof(struct __sk_buff, tc_classid)),
  3412. BPF_EXIT_INSN(),
  3413. },
  3414. .result = REJECT,
  3415. .errstr = "invalid bpf_context access",
  3416. },
  3417. {
  3418. "invalid access of tc_classid for LWT_OUT",
  3419. .insns = {
  3420. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  3421. offsetof(struct __sk_buff, tc_classid)),
  3422. BPF_EXIT_INSN(),
  3423. },
  3424. .result = REJECT,
  3425. .errstr = "invalid bpf_context access",
  3426. },
  3427. {
  3428. "invalid access of tc_classid for LWT_XMIT",
  3429. .insns = {
  3430. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  3431. offsetof(struct __sk_buff, tc_classid)),
  3432. BPF_EXIT_INSN(),
  3433. },
  3434. .result = REJECT,
  3435. .errstr = "invalid bpf_context access",
  3436. },
  3437. {
  3438. "helper access to map: full range",
  3439. .insns = {
  3440. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3441. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3442. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3443. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3444. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3445. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  3446. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3447. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
  3448. BPF_MOV64_IMM(BPF_REG_3, 0),
  3449. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3450. BPF_EXIT_INSN(),
  3451. },
  3452. .fixup_map2 = { 3 },
  3453. .result = ACCEPT,
  3454. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3455. },
  3456. {
  3457. "helper access to map: partial range",
  3458. .insns = {
  3459. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3460. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3461. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3462. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3463. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3464. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  3465. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3466. BPF_MOV64_IMM(BPF_REG_2, 8),
  3467. BPF_MOV64_IMM(BPF_REG_3, 0),
  3468. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3469. BPF_EXIT_INSN(),
  3470. },
  3471. .fixup_map2 = { 3 },
  3472. .result = ACCEPT,
  3473. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3474. },
  3475. {
  3476. "helper access to map: empty range",
  3477. .insns = {
  3478. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3479. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3480. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3481. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3482. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3483. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  3484. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3485. BPF_MOV64_IMM(BPF_REG_2, 0),
  3486. BPF_MOV64_IMM(BPF_REG_3, 0),
  3487. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3488. BPF_EXIT_INSN(),
  3489. },
  3490. .fixup_map2 = { 3 },
  3491. .errstr = "invalid access to map value, value_size=48 off=0 size=0",
  3492. .result = REJECT,
  3493. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3494. },
  3495. {
  3496. "helper access to map: out-of-bound range",
  3497. .insns = {
  3498. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3499. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3500. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3501. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3502. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3503. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  3504. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3505. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
  3506. BPF_MOV64_IMM(BPF_REG_3, 0),
  3507. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3508. BPF_EXIT_INSN(),
  3509. },
  3510. .fixup_map2 = { 3 },
  3511. .errstr = "invalid access to map value, value_size=48 off=0 size=56",
  3512. .result = REJECT,
  3513. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3514. },
  3515. {
  3516. "helper access to map: negative range",
  3517. .insns = {
  3518. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3519. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3520. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3521. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3522. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3523. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  3524. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3525. BPF_MOV64_IMM(BPF_REG_2, -8),
  3526. BPF_MOV64_IMM(BPF_REG_3, 0),
  3527. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3528. BPF_EXIT_INSN(),
  3529. },
  3530. .fixup_map2 = { 3 },
  3531. .errstr = "invalid access to map value, value_size=48 off=0 size=-8",
  3532. .result = REJECT,
  3533. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3534. },
  3535. {
  3536. "helper access to adjusted map (via const imm): full range",
  3537. .insns = {
  3538. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3539. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3540. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3541. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3542. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3543. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  3544. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3545. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  3546. offsetof(struct test_val, foo)),
  3547. BPF_MOV64_IMM(BPF_REG_2,
  3548. sizeof(struct test_val) -
  3549. offsetof(struct test_val, foo)),
  3550. BPF_MOV64_IMM(BPF_REG_3, 0),
  3551. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3552. BPF_EXIT_INSN(),
  3553. },
  3554. .fixup_map2 = { 3 },
  3555. .result = ACCEPT,
  3556. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3557. },
  3558. {
  3559. "helper access to adjusted map (via const imm): partial range",
  3560. .insns = {
  3561. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3562. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3563. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3564. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3565. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3566. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  3567. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3568. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  3569. offsetof(struct test_val, foo)),
  3570. BPF_MOV64_IMM(BPF_REG_2, 8),
  3571. BPF_MOV64_IMM(BPF_REG_3, 0),
  3572. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3573. BPF_EXIT_INSN(),
  3574. },
  3575. .fixup_map2 = { 3 },
  3576. .result = ACCEPT,
  3577. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3578. },
  3579. {
  3580. "helper access to adjusted map (via const imm): empty range",
  3581. .insns = {
  3582. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3583. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3584. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3585. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3586. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3587. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  3588. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3589. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  3590. offsetof(struct test_val, foo)),
  3591. BPF_MOV64_IMM(BPF_REG_2, 0),
  3592. BPF_MOV64_IMM(BPF_REG_3, 0),
  3593. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3594. BPF_EXIT_INSN(),
  3595. },
  3596. .fixup_map2 = { 3 },
  3597. .errstr = "R1 min value is outside of the array range",
  3598. .result = REJECT,
  3599. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3600. },
  3601. {
  3602. "helper access to adjusted map (via const imm): out-of-bound range",
  3603. .insns = {
  3604. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3605. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3606. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3607. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3608. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3609. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  3610. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3611. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  3612. offsetof(struct test_val, foo)),
  3613. BPF_MOV64_IMM(BPF_REG_2,
  3614. sizeof(struct test_val) -
  3615. offsetof(struct test_val, foo) + 8),
  3616. BPF_MOV64_IMM(BPF_REG_3, 0),
  3617. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3618. BPF_EXIT_INSN(),
  3619. },
  3620. .fixup_map2 = { 3 },
  3621. .errstr = "invalid access to map value, value_size=48 off=4 size=52",
  3622. .result = REJECT,
  3623. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3624. },
  3625. {
  3626. "helper access to adjusted map (via const imm): negative range (> adjustment)",
  3627. .insns = {
  3628. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3629. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3630. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3631. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3632. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3633. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  3634. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3635. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  3636. offsetof(struct test_val, foo)),
  3637. BPF_MOV64_IMM(BPF_REG_2, -8),
  3638. BPF_MOV64_IMM(BPF_REG_3, 0),
  3639. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3640. BPF_EXIT_INSN(),
  3641. },
  3642. .fixup_map2 = { 3 },
  3643. .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
  3644. .result = REJECT,
  3645. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3646. },
  3647. {
  3648. "helper access to adjusted map (via const imm): negative range (< adjustment)",
  3649. .insns = {
  3650. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3651. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3652. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3653. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3654. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3655. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  3656. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3657. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  3658. offsetof(struct test_val, foo)),
  3659. BPF_MOV64_IMM(BPF_REG_2, -1),
  3660. BPF_MOV64_IMM(BPF_REG_3, 0),
  3661. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3662. BPF_EXIT_INSN(),
  3663. },
  3664. .fixup_map2 = { 3 },
  3665. .errstr = "R1 min value is outside of the array range",
  3666. .result = REJECT,
  3667. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3668. },
  3669. {
  3670. "helper access to adjusted map (via const reg): full range",
  3671. .insns = {
  3672. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3673. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3674. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3675. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3676. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3677. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  3678. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3679. BPF_MOV64_IMM(BPF_REG_3,
  3680. offsetof(struct test_val, foo)),
  3681. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  3682. BPF_MOV64_IMM(BPF_REG_2,
  3683. sizeof(struct test_val) -
  3684. offsetof(struct test_val, foo)),
  3685. BPF_MOV64_IMM(BPF_REG_3, 0),
  3686. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3687. BPF_EXIT_INSN(),
  3688. },
  3689. .fixup_map2 = { 3 },
  3690. .result = ACCEPT,
  3691. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3692. },
  3693. {
  3694. "helper access to adjusted map (via const reg): partial range",
  3695. .insns = {
  3696. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3697. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3698. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3699. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3700. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3701. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  3702. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3703. BPF_MOV64_IMM(BPF_REG_3,
  3704. offsetof(struct test_val, foo)),
  3705. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  3706. BPF_MOV64_IMM(BPF_REG_2, 8),
  3707. BPF_MOV64_IMM(BPF_REG_3, 0),
  3708. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3709. BPF_EXIT_INSN(),
  3710. },
  3711. .fixup_map2 = { 3 },
  3712. .result = ACCEPT,
  3713. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3714. },
  3715. {
  3716. "helper access to adjusted map (via const reg): empty range",
  3717. .insns = {
  3718. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3719. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3720. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3721. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3722. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3723. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  3724. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3725. BPF_MOV64_IMM(BPF_REG_3, 0),
  3726. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  3727. BPF_MOV64_IMM(BPF_REG_2, 0),
  3728. BPF_MOV64_IMM(BPF_REG_3, 0),
  3729. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3730. BPF_EXIT_INSN(),
  3731. },
  3732. .fixup_map2 = { 3 },
  3733. .errstr = "R1 min value is outside of the array range",
  3734. .result = REJECT,
  3735. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3736. },
  3737. {
  3738. "helper access to adjusted map (via const reg): out-of-bound range",
  3739. .insns = {
  3740. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3741. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3742. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3743. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3744. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3745. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  3746. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3747. BPF_MOV64_IMM(BPF_REG_3,
  3748. offsetof(struct test_val, foo)),
  3749. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  3750. BPF_MOV64_IMM(BPF_REG_2,
  3751. sizeof(struct test_val) -
  3752. offsetof(struct test_val, foo) + 8),
  3753. BPF_MOV64_IMM(BPF_REG_3, 0),
  3754. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3755. BPF_EXIT_INSN(),
  3756. },
  3757. .fixup_map2 = { 3 },
  3758. .errstr = "invalid access to map value, value_size=48 off=4 size=52",
  3759. .result = REJECT,
  3760. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3761. },
  3762. {
  3763. "helper access to adjusted map (via const reg): negative range (> adjustment)",
  3764. .insns = {
  3765. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3766. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3767. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3768. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3769. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3770. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  3771. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3772. BPF_MOV64_IMM(BPF_REG_3,
  3773. offsetof(struct test_val, foo)),
  3774. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  3775. BPF_MOV64_IMM(BPF_REG_2, -8),
  3776. BPF_MOV64_IMM(BPF_REG_3, 0),
  3777. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3778. BPF_EXIT_INSN(),
  3779. },
  3780. .fixup_map2 = { 3 },
  3781. .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
  3782. .result = REJECT,
  3783. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3784. },
  3785. {
  3786. "helper access to adjusted map (via const reg): negative range (< adjustment)",
  3787. .insns = {
  3788. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3789. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3790. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3791. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3792. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3793. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  3794. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3795. BPF_MOV64_IMM(BPF_REG_3,
  3796. offsetof(struct test_val, foo)),
  3797. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  3798. BPF_MOV64_IMM(BPF_REG_2, -1),
  3799. BPF_MOV64_IMM(BPF_REG_3, 0),
  3800. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3801. BPF_EXIT_INSN(),
  3802. },
  3803. .fixup_map2 = { 3 },
  3804. .errstr = "R1 min value is outside of the array range",
  3805. .result = REJECT,
  3806. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3807. },
  3808. {
  3809. "helper access to adjusted map (via variable): full range",
  3810. .insns = {
  3811. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3812. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3813. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3814. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3815. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3816. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  3817. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3818. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  3819. BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
  3820. offsetof(struct test_val, foo), 4),
  3821. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  3822. BPF_MOV64_IMM(BPF_REG_2,
  3823. sizeof(struct test_val) -
  3824. offsetof(struct test_val, foo)),
  3825. BPF_MOV64_IMM(BPF_REG_3, 0),
  3826. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3827. BPF_EXIT_INSN(),
  3828. },
  3829. .fixup_map2 = { 3 },
  3830. .result = ACCEPT,
  3831. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3832. },
  3833. {
  3834. "helper access to adjusted map (via variable): partial range",
  3835. .insns = {
  3836. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3837. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3838. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3839. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3840. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3841. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  3842. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3843. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  3844. BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
  3845. offsetof(struct test_val, foo), 4),
  3846. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  3847. BPF_MOV64_IMM(BPF_REG_2, 8),
  3848. BPF_MOV64_IMM(BPF_REG_3, 0),
  3849. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3850. BPF_EXIT_INSN(),
  3851. },
  3852. .fixup_map2 = { 3 },
  3853. .result = ACCEPT,
  3854. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3855. },
  3856. {
  3857. "helper access to adjusted map (via variable): empty range",
  3858. .insns = {
  3859. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3860. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3861. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3862. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3863. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3864. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  3865. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3866. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  3867. BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
  3868. offsetof(struct test_val, foo), 4),
  3869. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  3870. BPF_MOV64_IMM(BPF_REG_2, 0),
  3871. BPF_MOV64_IMM(BPF_REG_3, 0),
  3872. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3873. BPF_EXIT_INSN(),
  3874. },
  3875. .fixup_map2 = { 3 },
  3876. .errstr = "R1 min value is outside of the array range",
  3877. .result = REJECT,
  3878. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3879. },
  3880. {
  3881. "helper access to adjusted map (via variable): no max check",
  3882. .insns = {
  3883. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3884. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3885. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3886. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3887. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3888. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  3889. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3890. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  3891. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  3892. BPF_MOV64_IMM(BPF_REG_2, 0),
  3893. BPF_MOV64_IMM(BPF_REG_3, 0),
  3894. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3895. BPF_EXIT_INSN(),
  3896. },
  3897. .fixup_map2 = { 3 },
  3898. .errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check",
  3899. .result = REJECT,
  3900. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3901. },
  3902. {
  3903. "helper access to adjusted map (via variable): wrong max check",
  3904. .insns = {
  3905. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3906. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3907. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3908. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3909. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3910. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  3911. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3912. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  3913. BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
  3914. offsetof(struct test_val, foo), 4),
  3915. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  3916. BPF_MOV64_IMM(BPF_REG_2,
  3917. sizeof(struct test_val) -
  3918. offsetof(struct test_val, foo) + 1),
  3919. BPF_MOV64_IMM(BPF_REG_3, 0),
  3920. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3921. BPF_EXIT_INSN(),
  3922. },
  3923. .fixup_map2 = { 3 },
  3924. .errstr = "invalid access to map value, value_size=48 off=4 size=45",
  3925. .result = REJECT,
  3926. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3927. },
  3928. {
  3929. "map element value is preserved across register spilling",
  3930. .insns = {
  3931. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3932. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3933. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3934. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3935. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3936. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  3937. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
  3938. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  3939. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
  3940. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  3941. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
  3942. BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
  3943. BPF_EXIT_INSN(),
  3944. },
  3945. .fixup_map2 = { 3 },
  3946. .errstr_unpriv = "R0 leaks addr",
  3947. .result = ACCEPT,
  3948. .result_unpriv = REJECT,
  3949. },
  3950. {
  3951. "map element value (adjusted) is preserved across register spilling",
  3952. .insns = {
  3953. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3954. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3955. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3956. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3957. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3958. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  3959. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
  3960. offsetof(struct test_val, foo)),
  3961. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
  3962. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  3963. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
  3964. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  3965. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
  3966. BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
  3967. BPF_EXIT_INSN(),
  3968. },
  3969. .fixup_map2 = { 3 },
  3970. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3971. .result = ACCEPT,
  3972. .result_unpriv = REJECT,
  3973. },
  3974. {
  3975. "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
  3976. .insns = {
  3977. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  3978. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  3979. BPF_MOV64_IMM(BPF_REG_0, 0),
  3980. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
  3981. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
  3982. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
  3983. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
  3984. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
  3985. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
  3986. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
  3987. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  3988. BPF_MOV64_IMM(BPF_REG_2, 16),
  3989. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  3990. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  3991. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
  3992. BPF_MOV64_IMM(BPF_REG_4, 0),
  3993. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  3994. BPF_MOV64_IMM(BPF_REG_3, 0),
  3995. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3996. BPF_MOV64_IMM(BPF_REG_0, 0),
  3997. BPF_EXIT_INSN(),
  3998. },
  3999. .result = ACCEPT,
  4000. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4001. },
  4002. {
  4003. "helper access to variable memory: stack, bitwise AND, zero included",
  4004. .insns = {
  4005. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4006. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4007. BPF_MOV64_IMM(BPF_REG_2, 16),
  4008. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4009. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4010. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
  4011. BPF_MOV64_IMM(BPF_REG_3, 0),
  4012. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4013. BPF_EXIT_INSN(),
  4014. },
  4015. .errstr = "invalid stack type R1 off=-64 access_size=0",
  4016. .result = REJECT,
  4017. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4018. },
  4019. {
  4020. "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
  4021. .insns = {
  4022. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4023. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4024. BPF_MOV64_IMM(BPF_REG_2, 16),
  4025. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4026. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4027. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
  4028. BPF_MOV64_IMM(BPF_REG_4, 0),
  4029. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  4030. BPF_MOV64_IMM(BPF_REG_3, 0),
  4031. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4032. BPF_MOV64_IMM(BPF_REG_0, 0),
  4033. BPF_EXIT_INSN(),
  4034. },
  4035. .errstr = "invalid stack type R1 off=-64 access_size=65",
  4036. .result = REJECT,
  4037. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4038. },
  4039. {
  4040. "helper access to variable memory: stack, JMP, correct bounds",
  4041. .insns = {
  4042. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4043. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4044. BPF_MOV64_IMM(BPF_REG_0, 0),
  4045. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
  4046. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
  4047. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
  4048. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
  4049. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
  4050. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
  4051. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
  4052. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  4053. BPF_MOV64_IMM(BPF_REG_2, 16),
  4054. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4055. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4056. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
  4057. BPF_MOV64_IMM(BPF_REG_4, 0),
  4058. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  4059. BPF_MOV64_IMM(BPF_REG_3, 0),
  4060. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4061. BPF_MOV64_IMM(BPF_REG_0, 0),
  4062. BPF_EXIT_INSN(),
  4063. },
  4064. .result = ACCEPT,
  4065. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4066. },
  4067. {
  4068. "helper access to variable memory: stack, JMP (signed), correct bounds",
  4069. .insns = {
  4070. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4071. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4072. BPF_MOV64_IMM(BPF_REG_0, 0),
  4073. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
  4074. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
  4075. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
  4076. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
  4077. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
  4078. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
  4079. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
  4080. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  4081. BPF_MOV64_IMM(BPF_REG_2, 16),
  4082. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4083. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4084. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
  4085. BPF_MOV64_IMM(BPF_REG_4, 0),
  4086. BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
  4087. BPF_MOV64_IMM(BPF_REG_3, 0),
  4088. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4089. BPF_MOV64_IMM(BPF_REG_0, 0),
  4090. BPF_EXIT_INSN(),
  4091. },
  4092. .result = ACCEPT,
  4093. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4094. },
  4095. {
  4096. "helper access to variable memory: stack, JMP, bounds + offset",
  4097. .insns = {
  4098. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4099. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4100. BPF_MOV64_IMM(BPF_REG_2, 16),
  4101. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4102. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4103. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
  4104. BPF_MOV64_IMM(BPF_REG_4, 0),
  4105. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
  4106. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  4107. BPF_MOV64_IMM(BPF_REG_3, 0),
  4108. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4109. BPF_MOV64_IMM(BPF_REG_0, 0),
  4110. BPF_EXIT_INSN(),
  4111. },
  4112. .errstr = "invalid stack type R1 off=-64 access_size=65",
  4113. .result = REJECT,
  4114. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4115. },
  4116. {
  4117. "helper access to variable memory: stack, JMP, wrong max",
  4118. .insns = {
  4119. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4120. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4121. BPF_MOV64_IMM(BPF_REG_2, 16),
  4122. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4123. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4124. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
  4125. BPF_MOV64_IMM(BPF_REG_4, 0),
  4126. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  4127. BPF_MOV64_IMM(BPF_REG_3, 0),
  4128. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4129. BPF_MOV64_IMM(BPF_REG_0, 0),
  4130. BPF_EXIT_INSN(),
  4131. },
  4132. .errstr = "invalid stack type R1 off=-64 access_size=65",
  4133. .result = REJECT,
  4134. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4135. },
  4136. {
  4137. "helper access to variable memory: stack, JMP, no max check",
  4138. .insns = {
  4139. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4140. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4141. BPF_MOV64_IMM(BPF_REG_2, 16),
  4142. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4143. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4144. BPF_MOV64_IMM(BPF_REG_4, 0),
  4145. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  4146. BPF_MOV64_IMM(BPF_REG_3, 0),
  4147. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4148. BPF_MOV64_IMM(BPF_REG_0, 0),
  4149. BPF_EXIT_INSN(),
  4150. },
  4151. .errstr = "R2 unbounded memory access",
  4152. .result = REJECT,
  4153. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4154. },
  4155. {
  4156. "helper access to variable memory: stack, JMP, no min check",
  4157. .insns = {
  4158. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4159. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4160. BPF_MOV64_IMM(BPF_REG_2, 16),
  4161. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4162. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4163. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
  4164. BPF_MOV64_IMM(BPF_REG_3, 0),
  4165. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4166. BPF_MOV64_IMM(BPF_REG_0, 0),
  4167. BPF_EXIT_INSN(),
  4168. },
  4169. .errstr = "invalid stack type R1 off=-64 access_size=0",
  4170. .result = REJECT,
  4171. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4172. },
  4173. {
  4174. "helper access to variable memory: stack, JMP (signed), no min check",
  4175. .insns = {
  4176. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4177. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4178. BPF_MOV64_IMM(BPF_REG_2, 16),
  4179. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4180. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4181. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
  4182. BPF_MOV64_IMM(BPF_REG_3, 0),
  4183. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4184. BPF_MOV64_IMM(BPF_REG_0, 0),
  4185. BPF_EXIT_INSN(),
  4186. },
  4187. .errstr = "R2 min value is negative",
  4188. .result = REJECT,
  4189. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4190. },
  4191. {
  4192. "helper access to variable memory: map, JMP, correct bounds",
  4193. .insns = {
  4194. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4195. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4196. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4197. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4198. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4199. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
  4200. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4201. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
  4202. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  4203. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  4204. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
  4205. sizeof(struct test_val), 4),
  4206. BPF_MOV64_IMM(BPF_REG_4, 0),
  4207. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  4208. BPF_MOV64_IMM(BPF_REG_3, 0),
  4209. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4210. BPF_MOV64_IMM(BPF_REG_0, 0),
  4211. BPF_EXIT_INSN(),
  4212. },
  4213. .fixup_map2 = { 3 },
  4214. .result = ACCEPT,
  4215. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4216. },
  4217. {
  4218. "helper access to variable memory: map, JMP, wrong max",
  4219. .insns = {
  4220. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4221. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4222. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4223. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4224. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4225. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
  4226. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4227. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
  4228. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  4229. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  4230. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
  4231. sizeof(struct test_val) + 1, 4),
  4232. BPF_MOV64_IMM(BPF_REG_4, 0),
  4233. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  4234. BPF_MOV64_IMM(BPF_REG_3, 0),
  4235. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4236. BPF_MOV64_IMM(BPF_REG_0, 0),
  4237. BPF_EXIT_INSN(),
  4238. },
  4239. .fixup_map2 = { 3 },
  4240. .errstr = "invalid access to map value, value_size=48 off=0 size=49",
  4241. .result = REJECT,
  4242. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4243. },
  4244. {
  4245. "helper access to variable memory: map adjusted, JMP, correct bounds",
  4246. .insns = {
  4247. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4248. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4249. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4250. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4251. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4252. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
  4253. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4254. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
  4255. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
  4256. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  4257. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  4258. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
  4259. sizeof(struct test_val) - 20, 4),
  4260. BPF_MOV64_IMM(BPF_REG_4, 0),
  4261. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  4262. BPF_MOV64_IMM(BPF_REG_3, 0),
  4263. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4264. BPF_MOV64_IMM(BPF_REG_0, 0),
  4265. BPF_EXIT_INSN(),
  4266. },
  4267. .fixup_map2 = { 3 },
  4268. .result = ACCEPT,
  4269. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4270. },
  4271. {
  4272. "helper access to variable memory: map adjusted, JMP, wrong max",
  4273. .insns = {
  4274. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4275. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4276. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4277. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4278. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4279. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
  4280. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4281. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
  4282. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
  4283. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  4284. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  4285. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
  4286. sizeof(struct test_val) - 19, 4),
  4287. BPF_MOV64_IMM(BPF_REG_4, 0),
  4288. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  4289. BPF_MOV64_IMM(BPF_REG_3, 0),
  4290. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4291. BPF_MOV64_IMM(BPF_REG_0, 0),
  4292. BPF_EXIT_INSN(),
  4293. },
  4294. .fixup_map2 = { 3 },
  4295. .errstr = "R1 min value is outside of the array range",
  4296. .result = REJECT,
  4297. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4298. },
  4299. {
  4300. "helper access to variable memory: size > 0 not allowed on NULL",
  4301. .insns = {
  4302. BPF_MOV64_IMM(BPF_REG_1, 0),
  4303. BPF_MOV64_IMM(BPF_REG_2, 0),
  4304. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  4305. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  4306. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
  4307. BPF_MOV64_IMM(BPF_REG_3, 0),
  4308. BPF_MOV64_IMM(BPF_REG_4, 0),
  4309. BPF_MOV64_IMM(BPF_REG_5, 0),
  4310. BPF_EMIT_CALL(BPF_FUNC_csum_diff),
  4311. BPF_EXIT_INSN(),
  4312. },
  4313. .errstr = "R1 type=imm expected=fp",
  4314. .result = REJECT,
  4315. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4316. },
  4317. {
  4318. "helper access to variable memory: size = 0 not allowed on != NULL",
  4319. .insns = {
  4320. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4321. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  4322. BPF_MOV64_IMM(BPF_REG_2, 0),
  4323. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
  4324. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
  4325. BPF_MOV64_IMM(BPF_REG_3, 0),
  4326. BPF_MOV64_IMM(BPF_REG_4, 0),
  4327. BPF_MOV64_IMM(BPF_REG_5, 0),
  4328. BPF_EMIT_CALL(BPF_FUNC_csum_diff),
  4329. BPF_EXIT_INSN(),
  4330. },
  4331. .errstr = "invalid stack type R1 off=-8 access_size=0",
  4332. .result = REJECT,
  4333. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4334. },
  4335. {
  4336. "helper access to variable memory: 8 bytes leak",
  4337. .insns = {
  4338. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4339. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4340. BPF_MOV64_IMM(BPF_REG_0, 0),
  4341. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
  4342. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
  4343. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
  4344. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
  4345. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
  4346. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
  4347. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  4348. BPF_MOV64_IMM(BPF_REG_2, 0),
  4349. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  4350. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  4351. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
  4352. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  4353. BPF_MOV64_IMM(BPF_REG_3, 0),
  4354. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4355. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  4356. BPF_EXIT_INSN(),
  4357. },
  4358. .errstr = "invalid indirect read from stack off -64+32 size 64",
  4359. .result = REJECT,
  4360. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4361. },
  4362. {
  4363. "helper access to variable memory: 8 bytes no leak (init memory)",
  4364. .insns = {
  4365. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4366. BPF_MOV64_IMM(BPF_REG_0, 0),
  4367. BPF_MOV64_IMM(BPF_REG_0, 0),
  4368. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
  4369. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
  4370. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
  4371. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
  4372. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
  4373. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
  4374. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
  4375. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  4376. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4377. BPF_MOV64_IMM(BPF_REG_2, 0),
  4378. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
  4379. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
  4380. BPF_MOV64_IMM(BPF_REG_3, 0),
  4381. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4382. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  4383. BPF_EXIT_INSN(),
  4384. },
  4385. .result = ACCEPT,
  4386. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4387. },
  4388. {
  4389. "invalid and of negative number",
  4390. .insns = {
  4391. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4392. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4393. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4394. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4395. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4396. BPF_FUNC_map_lookup_elem),
  4397. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  4398. BPF_MOV64_IMM(BPF_REG_1, 6),
  4399. BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
  4400. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  4401. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  4402. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  4403. offsetof(struct test_val, foo)),
  4404. BPF_EXIT_INSN(),
  4405. },
  4406. .fixup_map2 = { 3 },
  4407. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  4408. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  4409. .result = REJECT,
  4410. .result_unpriv = REJECT,
  4411. },
  4412. {
  4413. "invalid range check",
  4414. .insns = {
  4415. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4416. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4417. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4418. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4419. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4420. BPF_FUNC_map_lookup_elem),
  4421. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
  4422. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  4423. BPF_MOV64_IMM(BPF_REG_9, 1),
  4424. BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
  4425. BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
  4426. BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
  4427. BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
  4428. BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
  4429. BPF_MOV32_IMM(BPF_REG_3, 1),
  4430. BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
  4431. BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
  4432. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
  4433. BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
  4434. BPF_MOV64_REG(BPF_REG_0, 0),
  4435. BPF_EXIT_INSN(),
  4436. },
  4437. .fixup_map2 = { 3 },
  4438. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  4439. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  4440. .result = REJECT,
  4441. .result_unpriv = REJECT,
  4442. }
  4443. };
  4444. static int probe_filter_length(const struct bpf_insn *fp)
  4445. {
  4446. int len;
  4447. for (len = MAX_INSNS - 1; len > 0; --len)
  4448. if (fp[len].code != 0 || fp[len].imm != 0)
  4449. break;
  4450. return len + 1;
  4451. }
  4452. static int create_map(uint32_t size_value, uint32_t max_elem)
  4453. {
  4454. int fd;
  4455. fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
  4456. size_value, max_elem, BPF_F_NO_PREALLOC);
  4457. if (fd < 0)
  4458. printf("Failed to create hash map '%s'!\n", strerror(errno));
  4459. return fd;
  4460. }
  4461. static int create_prog_array(void)
  4462. {
  4463. int fd;
  4464. fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
  4465. sizeof(int), 4, 0);
  4466. if (fd < 0)
  4467. printf("Failed to create prog array '%s'!\n", strerror(errno));
  4468. return fd;
  4469. }
  4470. static char bpf_vlog[32768];
  4471. static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
  4472. int *fd_f1, int *fd_f2, int *fd_f3)
  4473. {
  4474. int *fixup_map1 = test->fixup_map1;
  4475. int *fixup_map2 = test->fixup_map2;
  4476. int *fixup_prog = test->fixup_prog;
  4477. /* Allocating HTs with 1 elem is fine here, since we only test
  4478. * for verifier and not do a runtime lookup, so the only thing
  4479. * that really matters is value size in this case.
  4480. */
  4481. if (*fixup_map1) {
  4482. *fd_f1 = create_map(sizeof(long long), 1);
  4483. do {
  4484. prog[*fixup_map1].imm = *fd_f1;
  4485. fixup_map1++;
  4486. } while (*fixup_map1);
  4487. }
  4488. if (*fixup_map2) {
  4489. *fd_f2 = create_map(sizeof(struct test_val), 1);
  4490. do {
  4491. prog[*fixup_map2].imm = *fd_f2;
  4492. fixup_map2++;
  4493. } while (*fixup_map2);
  4494. }
  4495. if (*fixup_prog) {
  4496. *fd_f3 = create_prog_array();
  4497. do {
  4498. prog[*fixup_prog].imm = *fd_f3;
  4499. fixup_prog++;
  4500. } while (*fixup_prog);
  4501. }
  4502. }
  4503. static void do_test_single(struct bpf_test *test, bool unpriv,
  4504. int *passes, int *errors)
  4505. {
  4506. struct bpf_insn *prog = test->insns;
  4507. int prog_len = probe_filter_length(prog);
  4508. int prog_type = test->prog_type;
  4509. int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
  4510. int fd_prog, expected_ret;
  4511. const char *expected_err;
  4512. do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
  4513. fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
  4514. prog, prog_len, "GPL", 0, bpf_vlog,
  4515. sizeof(bpf_vlog));
  4516. expected_ret = unpriv && test->result_unpriv != UNDEF ?
  4517. test->result_unpriv : test->result;
  4518. expected_err = unpriv && test->errstr_unpriv ?
  4519. test->errstr_unpriv : test->errstr;
  4520. if (expected_ret == ACCEPT) {
  4521. if (fd_prog < 0) {
  4522. printf("FAIL\nFailed to load prog '%s'!\n",
  4523. strerror(errno));
  4524. goto fail_log;
  4525. }
  4526. } else {
  4527. if (fd_prog >= 0) {
  4528. printf("FAIL\nUnexpected success to load!\n");
  4529. goto fail_log;
  4530. }
  4531. if (!strstr(bpf_vlog, expected_err)) {
  4532. printf("FAIL\nUnexpected error message!\n");
  4533. goto fail_log;
  4534. }
  4535. }
  4536. (*passes)++;
  4537. printf("OK\n");
  4538. close_fds:
  4539. close(fd_prog);
  4540. close(fd_f1);
  4541. close(fd_f2);
  4542. close(fd_f3);
  4543. sched_yield();
  4544. return;
  4545. fail_log:
  4546. (*errors)++;
  4547. printf("%s", bpf_vlog);
  4548. goto close_fds;
  4549. }
  4550. static bool is_admin(void)
  4551. {
  4552. cap_t caps;
  4553. cap_flag_value_t sysadmin = CAP_CLEAR;
  4554. const cap_value_t cap_val = CAP_SYS_ADMIN;
  4555. if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
  4556. perror("cap_get_flag");
  4557. return false;
  4558. }
  4559. caps = cap_get_proc();
  4560. if (!caps) {
  4561. perror("cap_get_proc");
  4562. return false;
  4563. }
  4564. if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
  4565. perror("cap_get_flag");
  4566. if (cap_free(caps))
  4567. perror("cap_free");
  4568. return (sysadmin == CAP_SET);
  4569. }
  4570. static int set_admin(bool admin)
  4571. {
  4572. cap_t caps;
  4573. const cap_value_t cap_val = CAP_SYS_ADMIN;
  4574. int ret = -1;
  4575. caps = cap_get_proc();
  4576. if (!caps) {
  4577. perror("cap_get_proc");
  4578. return -1;
  4579. }
  4580. if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
  4581. admin ? CAP_SET : CAP_CLEAR)) {
  4582. perror("cap_set_flag");
  4583. goto out;
  4584. }
  4585. if (cap_set_proc(caps)) {
  4586. perror("cap_set_proc");
  4587. goto out;
  4588. }
  4589. ret = 0;
  4590. out:
  4591. if (cap_free(caps))
  4592. perror("cap_free");
  4593. return ret;
  4594. }
  4595. static int do_test(bool unpriv, unsigned int from, unsigned int to)
  4596. {
  4597. int i, passes = 0, errors = 0;
  4598. for (i = from; i < to; i++) {
  4599. struct bpf_test *test = &tests[i];
  4600. /* Program types that are not supported by non-root we
  4601. * skip right away.
  4602. */
  4603. if (!test->prog_type) {
  4604. if (!unpriv)
  4605. set_admin(false);
  4606. printf("#%d/u %s ", i, test->descr);
  4607. do_test_single(test, true, &passes, &errors);
  4608. if (!unpriv)
  4609. set_admin(true);
  4610. }
  4611. if (!unpriv) {
  4612. printf("#%d/p %s ", i, test->descr);
  4613. do_test_single(test, false, &passes, &errors);
  4614. }
  4615. }
  4616. printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
  4617. return errors ? -errors : 0;
  4618. }
  4619. int main(int argc, char **argv)
  4620. {
  4621. struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
  4622. struct rlimit rlim = { 1 << 20, 1 << 20 };
  4623. unsigned int from = 0, to = ARRAY_SIZE(tests);
  4624. bool unpriv = !is_admin();
  4625. if (argc == 3) {
  4626. unsigned int l = atoi(argv[argc - 2]);
  4627. unsigned int u = atoi(argv[argc - 1]);
  4628. if (l < to && u < to) {
  4629. from = l;
  4630. to = u + 1;
  4631. }
  4632. } else if (argc == 2) {
  4633. unsigned int t = atoi(argv[argc - 1]);
  4634. if (t < to) {
  4635. from = t;
  4636. to = t + 1;
  4637. }
  4638. }
  4639. setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
  4640. return do_test(unpriv, from, to);
  4641. }