test_verifier.c 432 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497134981349913500135011350213503135041350513506135071350813509135101351113512135131351413515135161351713518135191352013521135221352313524135251352613527135281352913530135311353213533135341353513536135371353813539135401354113542135431354413545135461354713548135491355013551135521355313554135551355613557135581355913560135611356213563135641356513566135671356813569135701357113572135731357413575135761357713578135791358013581135821358313584135851358613587135881358913590135911359213593135941359513596135971359813599136001360113602136031360413605136061360713608136091361013611136121361313614136151361613617136181361913620136211362213623136241362513626136271362813629136301363113632136331363413635136361363713638136391364013641136421364313644136451364613647136481364913650136511365213653136541365513656136571365813659136601366113662136631366413665136661366713668136691367013671136721367313674136751367613677136781367913680136811368213683136841368513686136871368813689136901369113692136931369413695136961369713698136991370013701137021370313704137051370613707137081370913710137111371213713137141371513716137171371813719137201372113722137231372413725137261372713728137291373013731137321373313734137351373613737137381373913740137411374213743137441374513746137471374813749137501375113752137531375413755137561375713758137591376013761137621376313764137651376613767137681376913770137711377213773137741377513776137771377813779137801378113782137831378413785137861378713788137891379013791137921379313794137951379613797137981379913800138011380213803138041380513806138071380813809138101381113812138131381413815138161381713818138191382013821138221382313824138251382613827138281382913830138311383213833138341383513836138371383813839138401384113842138431384413845138461384713848138491385013851138521385313854138551385613857138581385913860138611386213863138641386513866138671386813869138701387113872138731387413875138761387713878138791388013881138821388313884138851388613887138881388913890138911389213893138941389513896138971389813899139001390113902139031390413905139061390713908139091391013911139121391313914139151391613917139181391913920139211392213923139241392513926139271392813929139301393113932139331393413935139361393713938139391394013941139421394313944139451394613947139481394913950139511395213953139541395513956139571395813959139601396113962139631396413965139661396713968139691397013971139721397313974139751397613977139781397913980139811398213983139841398513986139871398813989139901399113992139931399413995139961399713998139991400014001140021400314004140051400614007140081400914010140111401214013140141401514016140171401814019140201402114022140231402414025140261402714028140291403014031140321403314034140351403614037140381403914040140411404214043140441404514046140471404814049140501405114052140531405414055140561405714058140591406014061140621406314064140651406614067140681406914070140711407214073140741407514076140771407814079140801408114082140831408414085140861408714088140891409014091140921409314094140951409614097140981409914100141011410214103141041410514106141071410814109141101411114112141131411414115141161411714118141191412014121141221412314124141251412614127141281412914130141311413214133141341413514136141371413814139141401414114142141431414414145141461414714148141491415014151141521415314154141551415614157141581415914160141611416214163141641416514166141671416814169141701417114172141731417414175141761417714178141791418014181141821418314184141851418614187141881418914190141911419214193141941419514196141971419814199142001420114202142031420414205142061420714208142091421014211142121421314214142151421614217142181421914220142211422214223142241422514226142271422814229142301423114232142331423414235142361423714238142391424014241142421424314244142451424614247142481424914250142511425214253142541425514256142571425814259142601426114262142631426414265142661426714268142691427014271142721427314274142751427614277142781427914280142811428214283142841428514286142871428814289142901429114292142931429414295142961429714298142991430014301143021430314304143051430614307143081430914310143111431214313143141431514316143171431814319143201432114322143231432414325143261432714328143291433014331143321433314334143351433614337143381433914340143411434214343143441434514346143471434814349143501435114352143531435414355143561435714358143591436014361143621436314364143651436614367143681436914370143711437214373143741437514376143771437814379143801438114382143831438414385143861438714388143891439014391143921439314394143951439614397143981439914400144011440214403144041440514406144071440814409
  1. /*
  2. * Testsuite for eBPF verifier
  3. *
  4. * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
  5. * Copyright (c) 2017 Facebook
  6. * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of version 2 of the GNU General Public
  10. * License as published by the Free Software Foundation.
  11. */
  12. #include <endian.h>
  13. #include <asm/types.h>
  14. #include <linux/types.h>
  15. #include <stdint.h>
  16. #include <stdio.h>
  17. #include <stdlib.h>
  18. #include <unistd.h>
  19. #include <errno.h>
  20. #include <string.h>
  21. #include <stddef.h>
  22. #include <stdbool.h>
  23. #include <sched.h>
  24. #include <limits.h>
  25. #include <sys/capability.h>
  26. #include <linux/unistd.h>
  27. #include <linux/filter.h>
  28. #include <linux/bpf_perf_event.h>
  29. #include <linux/bpf.h>
  30. #include <linux/if_ether.h>
  31. #include <bpf/bpf.h>
  32. #ifdef HAVE_GENHDR
  33. # include "autoconf.h"
  34. #else
  35. # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
  36. # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
  37. # endif
  38. #endif
  39. #include "bpf_rlimit.h"
  40. #include "bpf_rand.h"
  41. #include "bpf_util.h"
  42. #include "../../../include/linux/filter.h"
  43. #define MAX_INSNS BPF_MAXINSNS
  44. #define MAX_FIXUPS 8
  45. #define MAX_NR_MAPS 13
  46. #define POINTER_VALUE 0xcafe4all
  47. #define TEST_DATA_LEN 64
  48. #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
  49. #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
  50. #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
  51. static bool unpriv_disabled = false;
  52. struct bpf_test {
  53. const char *descr;
  54. struct bpf_insn insns[MAX_INSNS];
  55. int fixup_map_hash_8b[MAX_FIXUPS];
  56. int fixup_map_hash_48b[MAX_FIXUPS];
  57. int fixup_map_hash_16b[MAX_FIXUPS];
  58. int fixup_map_array_48b[MAX_FIXUPS];
  59. int fixup_map_sockmap[MAX_FIXUPS];
  60. int fixup_map_sockhash[MAX_FIXUPS];
  61. int fixup_map_xskmap[MAX_FIXUPS];
  62. int fixup_map_stacktrace[MAX_FIXUPS];
  63. int fixup_prog1[MAX_FIXUPS];
  64. int fixup_prog2[MAX_FIXUPS];
  65. int fixup_map_in_map[MAX_FIXUPS];
  66. int fixup_cgroup_storage[MAX_FIXUPS];
  67. int fixup_percpu_cgroup_storage[MAX_FIXUPS];
  68. const char *errstr;
  69. const char *errstr_unpriv;
  70. uint32_t retval, retval_unpriv;
  71. enum {
  72. UNDEF,
  73. ACCEPT,
  74. REJECT
  75. } result, result_unpriv;
  76. enum bpf_prog_type prog_type;
  77. uint8_t flags;
  78. __u8 data[TEST_DATA_LEN];
  79. void (*fill_helper)(struct bpf_test *self);
  80. };
  81. /* Note we want this to be 64 bit aligned so that the end of our array is
  82. * actually the end of the structure.
  83. */
  84. #define MAX_ENTRIES 11
  85. struct test_val {
  86. unsigned int index;
  87. int foo[MAX_ENTRIES];
  88. };
  89. struct other_val {
  90. long long foo;
  91. long long bar;
  92. };
  93. static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
  94. {
  95. /* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
  96. #define PUSH_CNT 51
  97. unsigned int len = BPF_MAXINSNS;
  98. struct bpf_insn *insn = self->insns;
  99. int i = 0, j, k = 0;
  100. insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
  101. loop:
  102. for (j = 0; j < PUSH_CNT; j++) {
  103. insn[i++] = BPF_LD_ABS(BPF_B, 0);
  104. insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
  105. i++;
  106. insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
  107. insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
  108. insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
  109. insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  110. BPF_FUNC_skb_vlan_push),
  111. insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
  112. i++;
  113. }
  114. for (j = 0; j < PUSH_CNT; j++) {
  115. insn[i++] = BPF_LD_ABS(BPF_B, 0);
  116. insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
  117. i++;
  118. insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
  119. insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  120. BPF_FUNC_skb_vlan_pop),
  121. insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
  122. i++;
  123. }
  124. if (++k < 5)
  125. goto loop;
  126. for (; i < len - 1; i++)
  127. insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
  128. insn[len - 1] = BPF_EXIT_INSN();
  129. }
  130. static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
  131. {
  132. struct bpf_insn *insn = self->insns;
  133. unsigned int len = BPF_MAXINSNS;
  134. int i = 0;
  135. insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
  136. insn[i++] = BPF_LD_ABS(BPF_B, 0);
  137. insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
  138. i++;
  139. while (i < len - 1)
  140. insn[i++] = BPF_LD_ABS(BPF_B, 1);
  141. insn[i] = BPF_EXIT_INSN();
  142. }
  143. static void bpf_fill_rand_ld_dw(struct bpf_test *self)
  144. {
  145. struct bpf_insn *insn = self->insns;
  146. uint64_t res = 0;
  147. int i = 0;
  148. insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
  149. while (i < self->retval) {
  150. uint64_t val = bpf_semi_rand_get();
  151. struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
  152. res ^= val;
  153. insn[i++] = tmp[0];
  154. insn[i++] = tmp[1];
  155. insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
  156. }
  157. insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
  158. insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
  159. insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
  160. insn[i] = BPF_EXIT_INSN();
  161. res ^= (res >> 32);
  162. self->retval = (uint32_t)res;
  163. }
  164. /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
  165. #define BPF_SK_LOOKUP \
  166. /* struct bpf_sock_tuple tuple = {} */ \
  167. BPF_MOV64_IMM(BPF_REG_2, 0), \
  168. BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
  169. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
  170. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
  171. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
  172. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
  173. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
  174. /* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */ \
  175. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
  176. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
  177. BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
  178. BPF_MOV64_IMM(BPF_REG_4, 0), \
  179. BPF_MOV64_IMM(BPF_REG_5, 0), \
  180. BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
  181. static struct bpf_test tests[] = {
  182. {
  183. "add+sub+mul",
  184. .insns = {
  185. BPF_MOV64_IMM(BPF_REG_1, 1),
  186. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
  187. BPF_MOV64_IMM(BPF_REG_2, 3),
  188. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
  189. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
  190. BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
  191. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  192. BPF_EXIT_INSN(),
  193. },
  194. .result = ACCEPT,
  195. .retval = -3,
  196. },
  197. {
  198. "DIV32 by 0, zero check 1",
  199. .insns = {
  200. BPF_MOV32_IMM(BPF_REG_0, 42),
  201. BPF_MOV32_IMM(BPF_REG_1, 0),
  202. BPF_MOV32_IMM(BPF_REG_2, 1),
  203. BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
  204. BPF_EXIT_INSN(),
  205. },
  206. .result = ACCEPT,
  207. .retval = 42,
  208. },
  209. {
  210. "DIV32 by 0, zero check 2",
  211. .insns = {
  212. BPF_MOV32_IMM(BPF_REG_0, 42),
  213. BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
  214. BPF_MOV32_IMM(BPF_REG_2, 1),
  215. BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
  216. BPF_EXIT_INSN(),
  217. },
  218. .result = ACCEPT,
  219. .retval = 42,
  220. },
  221. {
  222. "DIV64 by 0, zero check",
  223. .insns = {
  224. BPF_MOV32_IMM(BPF_REG_0, 42),
  225. BPF_MOV32_IMM(BPF_REG_1, 0),
  226. BPF_MOV32_IMM(BPF_REG_2, 1),
  227. BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
  228. BPF_EXIT_INSN(),
  229. },
  230. .result = ACCEPT,
  231. .retval = 42,
  232. },
  233. {
  234. "MOD32 by 0, zero check 1",
  235. .insns = {
  236. BPF_MOV32_IMM(BPF_REG_0, 42),
  237. BPF_MOV32_IMM(BPF_REG_1, 0),
  238. BPF_MOV32_IMM(BPF_REG_2, 1),
  239. BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
  240. BPF_EXIT_INSN(),
  241. },
  242. .result = ACCEPT,
  243. .retval = 42,
  244. },
  245. {
  246. "MOD32 by 0, zero check 2",
  247. .insns = {
  248. BPF_MOV32_IMM(BPF_REG_0, 42),
  249. BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
  250. BPF_MOV32_IMM(BPF_REG_2, 1),
  251. BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
  252. BPF_EXIT_INSN(),
  253. },
  254. .result = ACCEPT,
  255. .retval = 42,
  256. },
  257. {
  258. "MOD64 by 0, zero check",
  259. .insns = {
  260. BPF_MOV32_IMM(BPF_REG_0, 42),
  261. BPF_MOV32_IMM(BPF_REG_1, 0),
  262. BPF_MOV32_IMM(BPF_REG_2, 1),
  263. BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
  264. BPF_EXIT_INSN(),
  265. },
  266. .result = ACCEPT,
  267. .retval = 42,
  268. },
  269. {
  270. "DIV32 by 0, zero check ok, cls",
  271. .insns = {
  272. BPF_MOV32_IMM(BPF_REG_0, 42),
  273. BPF_MOV32_IMM(BPF_REG_1, 2),
  274. BPF_MOV32_IMM(BPF_REG_2, 16),
  275. BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
  276. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  277. BPF_EXIT_INSN(),
  278. },
  279. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  280. .result = ACCEPT,
  281. .retval = 8,
  282. },
  283. {
  284. "DIV32 by 0, zero check 1, cls",
  285. .insns = {
  286. BPF_MOV32_IMM(BPF_REG_1, 0),
  287. BPF_MOV32_IMM(BPF_REG_0, 1),
  288. BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
  289. BPF_EXIT_INSN(),
  290. },
  291. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  292. .result = ACCEPT,
  293. .retval = 0,
  294. },
  295. {
  296. "DIV32 by 0, zero check 2, cls",
  297. .insns = {
  298. BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
  299. BPF_MOV32_IMM(BPF_REG_0, 1),
  300. BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
  301. BPF_EXIT_INSN(),
  302. },
  303. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  304. .result = ACCEPT,
  305. .retval = 0,
  306. },
  307. {
  308. "DIV64 by 0, zero check, cls",
  309. .insns = {
  310. BPF_MOV32_IMM(BPF_REG_1, 0),
  311. BPF_MOV32_IMM(BPF_REG_0, 1),
  312. BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
  313. BPF_EXIT_INSN(),
  314. },
  315. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  316. .result = ACCEPT,
  317. .retval = 0,
  318. },
  319. {
  320. "MOD32 by 0, zero check ok, cls",
  321. .insns = {
  322. BPF_MOV32_IMM(BPF_REG_0, 42),
  323. BPF_MOV32_IMM(BPF_REG_1, 3),
  324. BPF_MOV32_IMM(BPF_REG_2, 5),
  325. BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
  326. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  327. BPF_EXIT_INSN(),
  328. },
  329. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  330. .result = ACCEPT,
  331. .retval = 2,
  332. },
  333. {
  334. "MOD32 by 0, zero check 1, cls",
  335. .insns = {
  336. BPF_MOV32_IMM(BPF_REG_1, 0),
  337. BPF_MOV32_IMM(BPF_REG_0, 1),
  338. BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
  339. BPF_EXIT_INSN(),
  340. },
  341. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  342. .result = ACCEPT,
  343. .retval = 1,
  344. },
  345. {
  346. "MOD32 by 0, zero check 2, cls",
  347. .insns = {
  348. BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
  349. BPF_MOV32_IMM(BPF_REG_0, 1),
  350. BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
  351. BPF_EXIT_INSN(),
  352. },
  353. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  354. .result = ACCEPT,
  355. .retval = 1,
  356. },
  357. {
  358. "MOD64 by 0, zero check 1, cls",
  359. .insns = {
  360. BPF_MOV32_IMM(BPF_REG_1, 0),
  361. BPF_MOV32_IMM(BPF_REG_0, 2),
  362. BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
  363. BPF_EXIT_INSN(),
  364. },
  365. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  366. .result = ACCEPT,
  367. .retval = 2,
  368. },
  369. {
  370. "MOD64 by 0, zero check 2, cls",
  371. .insns = {
  372. BPF_MOV32_IMM(BPF_REG_1, 0),
  373. BPF_MOV32_IMM(BPF_REG_0, -1),
  374. BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
  375. BPF_EXIT_INSN(),
  376. },
  377. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  378. .result = ACCEPT,
  379. .retval = -1,
  380. },
  381. /* Just make sure that JITs used udiv/umod as otherwise we get
  382. * an exception from INT_MIN/-1 overflow similarly as with div
  383. * by zero.
  384. */
  385. {
  386. "DIV32 overflow, check 1",
  387. .insns = {
  388. BPF_MOV32_IMM(BPF_REG_1, -1),
  389. BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
  390. BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
  391. BPF_EXIT_INSN(),
  392. },
  393. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  394. .result = ACCEPT,
  395. .retval = 0,
  396. },
  397. {
  398. "DIV32 overflow, check 2",
  399. .insns = {
  400. BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
  401. BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
  402. BPF_EXIT_INSN(),
  403. },
  404. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  405. .result = ACCEPT,
  406. .retval = 0,
  407. },
  408. {
  409. "DIV64 overflow, check 1",
  410. .insns = {
  411. BPF_MOV64_IMM(BPF_REG_1, -1),
  412. BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
  413. BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
  414. BPF_EXIT_INSN(),
  415. },
  416. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  417. .result = ACCEPT,
  418. .retval = 0,
  419. },
  420. {
  421. "DIV64 overflow, check 2",
  422. .insns = {
  423. BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
  424. BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
  425. BPF_EXIT_INSN(),
  426. },
  427. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  428. .result = ACCEPT,
  429. .retval = 0,
  430. },
  431. {
  432. "MOD32 overflow, check 1",
  433. .insns = {
  434. BPF_MOV32_IMM(BPF_REG_1, -1),
  435. BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
  436. BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
  437. BPF_EXIT_INSN(),
  438. },
  439. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  440. .result = ACCEPT,
  441. .retval = INT_MIN,
  442. },
  443. {
  444. "MOD32 overflow, check 2",
  445. .insns = {
  446. BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
  447. BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
  448. BPF_EXIT_INSN(),
  449. },
  450. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  451. .result = ACCEPT,
  452. .retval = INT_MIN,
  453. },
  454. {
  455. "MOD64 overflow, check 1",
  456. .insns = {
  457. BPF_MOV64_IMM(BPF_REG_1, -1),
  458. BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
  459. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  460. BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
  461. BPF_MOV32_IMM(BPF_REG_0, 0),
  462. BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
  463. BPF_MOV32_IMM(BPF_REG_0, 1),
  464. BPF_EXIT_INSN(),
  465. },
  466. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  467. .result = ACCEPT,
  468. .retval = 1,
  469. },
  470. {
  471. "MOD64 overflow, check 2",
  472. .insns = {
  473. BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
  474. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  475. BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
  476. BPF_MOV32_IMM(BPF_REG_0, 0),
  477. BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
  478. BPF_MOV32_IMM(BPF_REG_0, 1),
  479. BPF_EXIT_INSN(),
  480. },
  481. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  482. .result = ACCEPT,
  483. .retval = 1,
  484. },
  485. {
  486. "xor32 zero extend check",
  487. .insns = {
  488. BPF_MOV32_IMM(BPF_REG_2, -1),
  489. BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
  490. BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
  491. BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
  492. BPF_MOV32_IMM(BPF_REG_0, 2),
  493. BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
  494. BPF_MOV32_IMM(BPF_REG_0, 1),
  495. BPF_EXIT_INSN(),
  496. },
  497. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  498. .result = ACCEPT,
  499. .retval = 1,
  500. },
  501. {
  502. "empty prog",
  503. .insns = {
  504. },
  505. .errstr = "unknown opcode 00",
  506. .result = REJECT,
  507. },
  508. {
  509. "only exit insn",
  510. .insns = {
  511. BPF_EXIT_INSN(),
  512. },
  513. .errstr = "R0 !read_ok",
  514. .result = REJECT,
  515. },
  516. {
  517. "unreachable",
  518. .insns = {
  519. BPF_EXIT_INSN(),
  520. BPF_EXIT_INSN(),
  521. },
  522. .errstr = "unreachable",
  523. .result = REJECT,
  524. },
  525. {
  526. "unreachable2",
  527. .insns = {
  528. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  529. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  530. BPF_EXIT_INSN(),
  531. },
  532. .errstr = "unreachable",
  533. .result = REJECT,
  534. },
  535. {
  536. "out of range jump",
  537. .insns = {
  538. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  539. BPF_EXIT_INSN(),
  540. },
  541. .errstr = "jump out of range",
  542. .result = REJECT,
  543. },
  544. {
  545. "out of range jump2",
  546. .insns = {
  547. BPF_JMP_IMM(BPF_JA, 0, 0, -2),
  548. BPF_EXIT_INSN(),
  549. },
  550. .errstr = "jump out of range",
  551. .result = REJECT,
  552. },
  553. {
  554. "test1 ld_imm64",
  555. .insns = {
  556. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  557. BPF_LD_IMM64(BPF_REG_0, 0),
  558. BPF_LD_IMM64(BPF_REG_0, 0),
  559. BPF_LD_IMM64(BPF_REG_0, 1),
  560. BPF_LD_IMM64(BPF_REG_0, 1),
  561. BPF_MOV64_IMM(BPF_REG_0, 2),
  562. BPF_EXIT_INSN(),
  563. },
  564. .errstr = "invalid BPF_LD_IMM insn",
  565. .errstr_unpriv = "R1 pointer comparison",
  566. .result = REJECT,
  567. },
  568. {
  569. "test2 ld_imm64",
  570. .insns = {
  571. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  572. BPF_LD_IMM64(BPF_REG_0, 0),
  573. BPF_LD_IMM64(BPF_REG_0, 0),
  574. BPF_LD_IMM64(BPF_REG_0, 1),
  575. BPF_LD_IMM64(BPF_REG_0, 1),
  576. BPF_EXIT_INSN(),
  577. },
  578. .errstr = "invalid BPF_LD_IMM insn",
  579. .errstr_unpriv = "R1 pointer comparison",
  580. .result = REJECT,
  581. },
  582. {
  583. "test3 ld_imm64",
  584. .insns = {
  585. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  586. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  587. BPF_LD_IMM64(BPF_REG_0, 0),
  588. BPF_LD_IMM64(BPF_REG_0, 0),
  589. BPF_LD_IMM64(BPF_REG_0, 1),
  590. BPF_LD_IMM64(BPF_REG_0, 1),
  591. BPF_EXIT_INSN(),
  592. },
  593. .errstr = "invalid bpf_ld_imm64 insn",
  594. .result = REJECT,
  595. },
  596. {
  597. "test4 ld_imm64",
  598. .insns = {
  599. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  600. BPF_EXIT_INSN(),
  601. },
  602. .errstr = "invalid bpf_ld_imm64 insn",
  603. .result = REJECT,
  604. },
  605. {
  606. "test5 ld_imm64",
  607. .insns = {
  608. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  609. },
  610. .errstr = "invalid bpf_ld_imm64 insn",
  611. .result = REJECT,
  612. },
  613. {
  614. "test6 ld_imm64",
  615. .insns = {
  616. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  617. BPF_RAW_INSN(0, 0, 0, 0, 0),
  618. BPF_EXIT_INSN(),
  619. },
  620. .result = ACCEPT,
  621. },
  622. {
  623. "test7 ld_imm64",
  624. .insns = {
  625. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
  626. BPF_RAW_INSN(0, 0, 0, 0, 1),
  627. BPF_EXIT_INSN(),
  628. },
  629. .result = ACCEPT,
  630. .retval = 1,
  631. },
  632. {
  633. "test8 ld_imm64",
  634. .insns = {
  635. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
  636. BPF_RAW_INSN(0, 0, 0, 0, 1),
  637. BPF_EXIT_INSN(),
  638. },
  639. .errstr = "uses reserved fields",
  640. .result = REJECT,
  641. },
  642. {
  643. "test9 ld_imm64",
  644. .insns = {
  645. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
  646. BPF_RAW_INSN(0, 0, 0, 1, 1),
  647. BPF_EXIT_INSN(),
  648. },
  649. .errstr = "invalid bpf_ld_imm64 insn",
  650. .result = REJECT,
  651. },
  652. {
  653. "test10 ld_imm64",
  654. .insns = {
  655. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
  656. BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
  657. BPF_EXIT_INSN(),
  658. },
  659. .errstr = "invalid bpf_ld_imm64 insn",
  660. .result = REJECT,
  661. },
  662. {
  663. "test11 ld_imm64",
  664. .insns = {
  665. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
  666. BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
  667. BPF_EXIT_INSN(),
  668. },
  669. .errstr = "invalid bpf_ld_imm64 insn",
  670. .result = REJECT,
  671. },
  672. {
  673. "test12 ld_imm64",
  674. .insns = {
  675. BPF_MOV64_IMM(BPF_REG_1, 0),
  676. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
  677. BPF_RAW_INSN(0, 0, 0, 0, 1),
  678. BPF_EXIT_INSN(),
  679. },
  680. .errstr = "not pointing to valid bpf_map",
  681. .result = REJECT,
  682. },
  683. {
  684. "test13 ld_imm64",
  685. .insns = {
  686. BPF_MOV64_IMM(BPF_REG_1, 0),
  687. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
  688. BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
  689. BPF_EXIT_INSN(),
  690. },
  691. .errstr = "invalid bpf_ld_imm64 insn",
  692. .result = REJECT,
  693. },
  694. {
  695. "arsh32 on imm",
  696. .insns = {
  697. BPF_MOV64_IMM(BPF_REG_0, 1),
  698. BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
  699. BPF_EXIT_INSN(),
  700. },
  701. .result = REJECT,
  702. .errstr = "unknown opcode c4",
  703. },
  704. {
  705. "arsh32 on reg",
  706. .insns = {
  707. BPF_MOV64_IMM(BPF_REG_0, 1),
  708. BPF_MOV64_IMM(BPF_REG_1, 5),
  709. BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
  710. BPF_EXIT_INSN(),
  711. },
  712. .result = REJECT,
  713. .errstr = "unknown opcode cc",
  714. },
  715. {
  716. "arsh64 on imm",
  717. .insns = {
  718. BPF_MOV64_IMM(BPF_REG_0, 1),
  719. BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
  720. BPF_EXIT_INSN(),
  721. },
  722. .result = ACCEPT,
  723. },
  724. {
  725. "arsh64 on reg",
  726. .insns = {
  727. BPF_MOV64_IMM(BPF_REG_0, 1),
  728. BPF_MOV64_IMM(BPF_REG_1, 5),
  729. BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
  730. BPF_EXIT_INSN(),
  731. },
  732. .result = ACCEPT,
  733. },
  734. {
  735. "no bpf_exit",
  736. .insns = {
  737. BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
  738. },
  739. .errstr = "not an exit",
  740. .result = REJECT,
  741. },
  742. {
  743. "loop (back-edge)",
  744. .insns = {
  745. BPF_JMP_IMM(BPF_JA, 0, 0, -1),
  746. BPF_EXIT_INSN(),
  747. },
  748. .errstr = "back-edge",
  749. .result = REJECT,
  750. },
  751. {
  752. "loop2 (back-edge)",
  753. .insns = {
  754. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  755. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  756. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  757. BPF_JMP_IMM(BPF_JA, 0, 0, -4),
  758. BPF_EXIT_INSN(),
  759. },
  760. .errstr = "back-edge",
  761. .result = REJECT,
  762. },
  763. {
  764. "conditional loop",
  765. .insns = {
  766. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  767. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  768. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  769. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
  770. BPF_EXIT_INSN(),
  771. },
  772. .errstr = "back-edge",
  773. .result = REJECT,
  774. },
  775. {
  776. "read uninitialized register",
  777. .insns = {
  778. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  779. BPF_EXIT_INSN(),
  780. },
  781. .errstr = "R2 !read_ok",
  782. .result = REJECT,
  783. },
  784. {
  785. "read invalid register",
  786. .insns = {
  787. BPF_MOV64_REG(BPF_REG_0, -1),
  788. BPF_EXIT_INSN(),
  789. },
  790. .errstr = "R15 is invalid",
  791. .result = REJECT,
  792. },
  793. {
  794. "program doesn't init R0 before exit",
  795. .insns = {
  796. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
  797. BPF_EXIT_INSN(),
  798. },
  799. .errstr = "R0 !read_ok",
  800. .result = REJECT,
  801. },
  802. {
  803. "program doesn't init R0 before exit in all branches",
  804. .insns = {
  805. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  806. BPF_MOV64_IMM(BPF_REG_0, 1),
  807. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
  808. BPF_EXIT_INSN(),
  809. },
  810. .errstr = "R0 !read_ok",
  811. .errstr_unpriv = "R1 pointer comparison",
  812. .result = REJECT,
  813. },
  814. {
  815. "stack out of bounds",
  816. .insns = {
  817. BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
  818. BPF_EXIT_INSN(),
  819. },
  820. .errstr = "invalid stack",
  821. .result = REJECT,
  822. },
  823. {
  824. "invalid call insn1",
  825. .insns = {
  826. BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
  827. BPF_EXIT_INSN(),
  828. },
  829. .errstr = "unknown opcode 8d",
  830. .result = REJECT,
  831. },
  832. {
  833. "invalid call insn2",
  834. .insns = {
  835. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
  836. BPF_EXIT_INSN(),
  837. },
  838. .errstr = "BPF_CALL uses reserved",
  839. .result = REJECT,
  840. },
  841. {
  842. "invalid function call",
  843. .insns = {
  844. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
  845. BPF_EXIT_INSN(),
  846. },
  847. .errstr = "invalid func unknown#1234567",
  848. .result = REJECT,
  849. },
  850. {
  851. "uninitialized stack1",
  852. .insns = {
  853. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  854. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  855. BPF_LD_MAP_FD(BPF_REG_1, 0),
  856. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  857. BPF_FUNC_map_lookup_elem),
  858. BPF_EXIT_INSN(),
  859. },
  860. .fixup_map_hash_8b = { 2 },
  861. .errstr = "invalid indirect read from stack",
  862. .result = REJECT,
  863. },
  864. {
  865. "uninitialized stack2",
  866. .insns = {
  867. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  868. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
  869. BPF_EXIT_INSN(),
  870. },
  871. .errstr = "invalid read from stack",
  872. .result = REJECT,
  873. },
  874. {
  875. "invalid fp arithmetic",
  876. /* If this gets ever changed, make sure JITs can deal with it. */
  877. .insns = {
  878. BPF_MOV64_IMM(BPF_REG_0, 0),
  879. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  880. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
  881. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  882. BPF_EXIT_INSN(),
  883. },
  884. .errstr = "R1 subtraction from stack pointer",
  885. .result = REJECT,
  886. },
  887. {
  888. "non-invalid fp arithmetic",
  889. .insns = {
  890. BPF_MOV64_IMM(BPF_REG_0, 0),
  891. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  892. BPF_EXIT_INSN(),
  893. },
  894. .result = ACCEPT,
  895. },
  896. {
  897. "invalid argument register",
  898. .insns = {
  899. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  900. BPF_FUNC_get_cgroup_classid),
  901. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  902. BPF_FUNC_get_cgroup_classid),
  903. BPF_EXIT_INSN(),
  904. },
  905. .errstr = "R1 !read_ok",
  906. .result = REJECT,
  907. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  908. },
  909. {
  910. "non-invalid argument register",
  911. .insns = {
  912. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
  913. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  914. BPF_FUNC_get_cgroup_classid),
  915. BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
  916. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  917. BPF_FUNC_get_cgroup_classid),
  918. BPF_EXIT_INSN(),
  919. },
  920. .result = ACCEPT,
  921. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  922. },
  923. {
  924. "check valid spill/fill",
  925. .insns = {
  926. /* spill R1(ctx) into stack */
  927. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  928. /* fill it back into R2 */
  929. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
  930. /* should be able to access R0 = *(R2 + 8) */
  931. /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
  932. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  933. BPF_EXIT_INSN(),
  934. },
  935. .errstr_unpriv = "R0 leaks addr",
  936. .result = ACCEPT,
  937. .result_unpriv = REJECT,
  938. .retval = POINTER_VALUE,
  939. },
  940. {
  941. "check valid spill/fill, skb mark",
  942. .insns = {
  943. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
  944. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
  945. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  946. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  947. offsetof(struct __sk_buff, mark)),
  948. BPF_EXIT_INSN(),
  949. },
  950. .result = ACCEPT,
  951. .result_unpriv = ACCEPT,
  952. },
  953. {
  954. "check corrupted spill/fill",
  955. .insns = {
  956. /* spill R1(ctx) into stack */
  957. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  958. /* mess up with R1 pointer on stack */
  959. BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
  960. /* fill back into R0 should fail */
  961. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  962. BPF_EXIT_INSN(),
  963. },
  964. .errstr_unpriv = "attempt to corrupt spilled",
  965. .errstr = "corrupted spill",
  966. .result = REJECT,
  967. },
  968. {
  969. "invalid src register in STX",
  970. .insns = {
  971. BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
  972. BPF_EXIT_INSN(),
  973. },
  974. .errstr = "R15 is invalid",
  975. .result = REJECT,
  976. },
  977. {
  978. "invalid dst register in STX",
  979. .insns = {
  980. BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
  981. BPF_EXIT_INSN(),
  982. },
  983. .errstr = "R14 is invalid",
  984. .result = REJECT,
  985. },
  986. {
  987. "invalid dst register in ST",
  988. .insns = {
  989. BPF_ST_MEM(BPF_B, 14, -1, -1),
  990. BPF_EXIT_INSN(),
  991. },
  992. .errstr = "R14 is invalid",
  993. .result = REJECT,
  994. },
  995. {
  996. "invalid src register in LDX",
  997. .insns = {
  998. BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
  999. BPF_EXIT_INSN(),
  1000. },
  1001. .errstr = "R12 is invalid",
  1002. .result = REJECT,
  1003. },
  1004. {
  1005. "invalid dst register in LDX",
  1006. .insns = {
  1007. BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
  1008. BPF_EXIT_INSN(),
  1009. },
  1010. .errstr = "R11 is invalid",
  1011. .result = REJECT,
  1012. },
  1013. {
  1014. "junk insn",
  1015. .insns = {
  1016. BPF_RAW_INSN(0, 0, 0, 0, 0),
  1017. BPF_EXIT_INSN(),
  1018. },
  1019. .errstr = "unknown opcode 00",
  1020. .result = REJECT,
  1021. },
  1022. {
  1023. "junk insn2",
  1024. .insns = {
  1025. BPF_RAW_INSN(1, 0, 0, 0, 0),
  1026. BPF_EXIT_INSN(),
  1027. },
  1028. .errstr = "BPF_LDX uses reserved fields",
  1029. .result = REJECT,
  1030. },
  1031. {
  1032. "junk insn3",
  1033. .insns = {
  1034. BPF_RAW_INSN(-1, 0, 0, 0, 0),
  1035. BPF_EXIT_INSN(),
  1036. },
  1037. .errstr = "unknown opcode ff",
  1038. .result = REJECT,
  1039. },
  1040. {
  1041. "junk insn4",
  1042. .insns = {
  1043. BPF_RAW_INSN(-1, -1, -1, -1, -1),
  1044. BPF_EXIT_INSN(),
  1045. },
  1046. .errstr = "unknown opcode ff",
  1047. .result = REJECT,
  1048. },
  1049. {
  1050. "junk insn5",
  1051. .insns = {
  1052. BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
  1053. BPF_EXIT_INSN(),
  1054. },
  1055. .errstr = "BPF_ALU uses reserved fields",
  1056. .result = REJECT,
  1057. },
  1058. {
  1059. "misaligned read from stack",
  1060. .insns = {
  1061. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1062. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
  1063. BPF_EXIT_INSN(),
  1064. },
  1065. .errstr = "misaligned stack access",
  1066. .result = REJECT,
  1067. },
  1068. {
  1069. "invalid map_fd for function call",
  1070. .insns = {
  1071. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1072. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
  1073. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1074. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1075. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1076. BPF_FUNC_map_delete_elem),
  1077. BPF_EXIT_INSN(),
  1078. },
  1079. .errstr = "fd 0 is not pointing to valid bpf_map",
  1080. .result = REJECT,
  1081. },
  1082. {
  1083. "don't check return value before access",
  1084. .insns = {
  1085. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1086. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1087. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1088. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1089. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1090. BPF_FUNC_map_lookup_elem),
  1091. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1092. BPF_EXIT_INSN(),
  1093. },
  1094. .fixup_map_hash_8b = { 3 },
  1095. .errstr = "R0 invalid mem access 'map_value_or_null'",
  1096. .result = REJECT,
  1097. },
  1098. {
  1099. "access memory with incorrect alignment",
  1100. .insns = {
  1101. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1102. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1103. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1104. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1105. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1106. BPF_FUNC_map_lookup_elem),
  1107. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  1108. BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
  1109. BPF_EXIT_INSN(),
  1110. },
  1111. .fixup_map_hash_8b = { 3 },
  1112. .errstr = "misaligned value access",
  1113. .result = REJECT,
  1114. .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
  1115. },
  1116. {
  1117. "sometimes access memory with incorrect alignment",
  1118. .insns = {
  1119. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1120. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1121. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1122. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1123. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1124. BPF_FUNC_map_lookup_elem),
  1125. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  1126. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  1127. BPF_EXIT_INSN(),
  1128. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
  1129. BPF_EXIT_INSN(),
  1130. },
  1131. .fixup_map_hash_8b = { 3 },
  1132. .errstr = "R0 invalid mem access",
  1133. .errstr_unpriv = "R0 leaks addr",
  1134. .result = REJECT,
  1135. .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
  1136. },
  1137. {
  1138. "jump test 1",
  1139. .insns = {
  1140. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1141. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
  1142. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  1143. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  1144. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
  1145. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
  1146. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
  1147. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
  1148. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
  1149. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
  1150. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
  1151. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
  1152. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  1153. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
  1154. BPF_MOV64_IMM(BPF_REG_0, 0),
  1155. BPF_EXIT_INSN(),
  1156. },
  1157. .errstr_unpriv = "R1 pointer comparison",
  1158. .result_unpriv = REJECT,
  1159. .result = ACCEPT,
  1160. },
  1161. {
  1162. "jump test 2",
  1163. .insns = {
  1164. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1165. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
  1166. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  1167. BPF_JMP_IMM(BPF_JA, 0, 0, 14),
  1168. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
  1169. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  1170. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  1171. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
  1172. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  1173. BPF_JMP_IMM(BPF_JA, 0, 0, 8),
  1174. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
  1175. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  1176. BPF_JMP_IMM(BPF_JA, 0, 0, 5),
  1177. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
  1178. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  1179. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  1180. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  1181. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  1182. BPF_MOV64_IMM(BPF_REG_0, 0),
  1183. BPF_EXIT_INSN(),
  1184. },
  1185. .errstr_unpriv = "R1 pointer comparison",
  1186. .result_unpriv = REJECT,
  1187. .result = ACCEPT,
  1188. },
  1189. {
  1190. "jump test 3",
  1191. .insns = {
  1192. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1193. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  1194. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  1195. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1196. BPF_JMP_IMM(BPF_JA, 0, 0, 19),
  1197. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
  1198. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  1199. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  1200. BPF_JMP_IMM(BPF_JA, 0, 0, 15),
  1201. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
  1202. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  1203. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
  1204. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  1205. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
  1206. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  1207. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
  1208. BPF_JMP_IMM(BPF_JA, 0, 0, 7),
  1209. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
  1210. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  1211. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
  1212. BPF_JMP_IMM(BPF_JA, 0, 0, 3),
  1213. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
  1214. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  1215. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
  1216. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1217. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1218. BPF_FUNC_map_delete_elem),
  1219. BPF_EXIT_INSN(),
  1220. },
  1221. .fixup_map_hash_8b = { 24 },
  1222. .errstr_unpriv = "R1 pointer comparison",
  1223. .result_unpriv = REJECT,
  1224. .result = ACCEPT,
  1225. .retval = -ENOENT,
  1226. },
  1227. {
  1228. "jump test 4",
  1229. .insns = {
  1230. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  1231. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  1232. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  1233. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  1234. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  1235. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  1236. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  1237. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  1238. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  1239. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  1240. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  1241. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  1242. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  1243. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  1244. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  1245. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  1246. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  1247. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  1248. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  1249. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  1250. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  1251. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  1252. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  1253. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  1254. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  1255. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  1256. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  1257. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  1258. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  1259. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  1260. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  1261. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  1262. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  1263. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  1264. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  1265. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  1266. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  1267. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  1268. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  1269. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  1270. BPF_MOV64_IMM(BPF_REG_0, 0),
  1271. BPF_EXIT_INSN(),
  1272. },
  1273. .errstr_unpriv = "R1 pointer comparison",
  1274. .result_unpriv = REJECT,
  1275. .result = ACCEPT,
  1276. },
  1277. {
  1278. "jump test 5",
  1279. .insns = {
  1280. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1281. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  1282. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  1283. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  1284. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  1285. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  1286. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  1287. BPF_MOV64_IMM(BPF_REG_0, 0),
  1288. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  1289. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  1290. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  1291. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  1292. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  1293. BPF_MOV64_IMM(BPF_REG_0, 0),
  1294. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  1295. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  1296. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  1297. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  1298. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  1299. BPF_MOV64_IMM(BPF_REG_0, 0),
  1300. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  1301. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  1302. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  1303. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  1304. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  1305. BPF_MOV64_IMM(BPF_REG_0, 0),
  1306. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  1307. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  1308. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  1309. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  1310. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  1311. BPF_MOV64_IMM(BPF_REG_0, 0),
  1312. BPF_EXIT_INSN(),
  1313. },
  1314. .errstr_unpriv = "R1 pointer comparison",
  1315. .result_unpriv = REJECT,
  1316. .result = ACCEPT,
  1317. },
  1318. {
  1319. "access skb fields ok",
  1320. .insns = {
  1321. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1322. offsetof(struct __sk_buff, len)),
  1323. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  1324. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1325. offsetof(struct __sk_buff, mark)),
  1326. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  1327. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1328. offsetof(struct __sk_buff, pkt_type)),
  1329. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  1330. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1331. offsetof(struct __sk_buff, queue_mapping)),
  1332. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  1333. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1334. offsetof(struct __sk_buff, protocol)),
  1335. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  1336. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1337. offsetof(struct __sk_buff, vlan_present)),
  1338. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  1339. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1340. offsetof(struct __sk_buff, vlan_tci)),
  1341. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  1342. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1343. offsetof(struct __sk_buff, napi_id)),
  1344. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  1345. BPF_EXIT_INSN(),
  1346. },
  1347. .result = ACCEPT,
  1348. },
  1349. {
  1350. "access skb fields bad1",
  1351. .insns = {
  1352. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
  1353. BPF_EXIT_INSN(),
  1354. },
  1355. .errstr = "invalid bpf_context access",
  1356. .result = REJECT,
  1357. },
  1358. {
  1359. "access skb fields bad2",
  1360. .insns = {
  1361. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
  1362. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1363. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1364. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1365. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1366. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1367. BPF_FUNC_map_lookup_elem),
  1368. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  1369. BPF_EXIT_INSN(),
  1370. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  1371. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1372. offsetof(struct __sk_buff, pkt_type)),
  1373. BPF_EXIT_INSN(),
  1374. },
  1375. .fixup_map_hash_8b = { 4 },
  1376. .errstr = "different pointers",
  1377. .errstr_unpriv = "R1 pointer comparison",
  1378. .result = REJECT,
  1379. },
  1380. {
  1381. "access skb fields bad3",
  1382. .insns = {
  1383. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  1384. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1385. offsetof(struct __sk_buff, pkt_type)),
  1386. BPF_EXIT_INSN(),
  1387. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1388. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1389. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1390. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1391. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1392. BPF_FUNC_map_lookup_elem),
  1393. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  1394. BPF_EXIT_INSN(),
  1395. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  1396. BPF_JMP_IMM(BPF_JA, 0, 0, -12),
  1397. },
  1398. .fixup_map_hash_8b = { 6 },
  1399. .errstr = "different pointers",
  1400. .errstr_unpriv = "R1 pointer comparison",
  1401. .result = REJECT,
  1402. },
  1403. {
  1404. "access skb fields bad4",
  1405. .insns = {
  1406. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
  1407. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  1408. offsetof(struct __sk_buff, len)),
  1409. BPF_MOV64_IMM(BPF_REG_0, 0),
  1410. BPF_EXIT_INSN(),
  1411. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1412. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1413. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1414. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1415. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1416. BPF_FUNC_map_lookup_elem),
  1417. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  1418. BPF_EXIT_INSN(),
  1419. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  1420. BPF_JMP_IMM(BPF_JA, 0, 0, -13),
  1421. },
  1422. .fixup_map_hash_8b = { 7 },
  1423. .errstr = "different pointers",
  1424. .errstr_unpriv = "R1 pointer comparison",
  1425. .result = REJECT,
  1426. },
  1427. {
  1428. "invalid access __sk_buff family",
  1429. .insns = {
  1430. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1431. offsetof(struct __sk_buff, family)),
  1432. BPF_EXIT_INSN(),
  1433. },
  1434. .errstr = "invalid bpf_context access",
  1435. .result = REJECT,
  1436. },
  1437. {
  1438. "invalid access __sk_buff remote_ip4",
  1439. .insns = {
  1440. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1441. offsetof(struct __sk_buff, remote_ip4)),
  1442. BPF_EXIT_INSN(),
  1443. },
  1444. .errstr = "invalid bpf_context access",
  1445. .result = REJECT,
  1446. },
  1447. {
  1448. "invalid access __sk_buff local_ip4",
  1449. .insns = {
  1450. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1451. offsetof(struct __sk_buff, local_ip4)),
  1452. BPF_EXIT_INSN(),
  1453. },
  1454. .errstr = "invalid bpf_context access",
  1455. .result = REJECT,
  1456. },
  1457. {
  1458. "invalid access __sk_buff remote_ip6",
  1459. .insns = {
  1460. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1461. offsetof(struct __sk_buff, remote_ip6)),
  1462. BPF_EXIT_INSN(),
  1463. },
  1464. .errstr = "invalid bpf_context access",
  1465. .result = REJECT,
  1466. },
  1467. {
  1468. "invalid access __sk_buff local_ip6",
  1469. .insns = {
  1470. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1471. offsetof(struct __sk_buff, local_ip6)),
  1472. BPF_EXIT_INSN(),
  1473. },
  1474. .errstr = "invalid bpf_context access",
  1475. .result = REJECT,
  1476. },
  1477. {
  1478. "invalid access __sk_buff remote_port",
  1479. .insns = {
  1480. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1481. offsetof(struct __sk_buff, remote_port)),
  1482. BPF_EXIT_INSN(),
  1483. },
  1484. .errstr = "invalid bpf_context access",
  1485. .result = REJECT,
  1486. },
  1487. {
  1488. "invalid access __sk_buff remote_port",
  1489. .insns = {
  1490. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1491. offsetof(struct __sk_buff, local_port)),
  1492. BPF_EXIT_INSN(),
  1493. },
  1494. .errstr = "invalid bpf_context access",
  1495. .result = REJECT,
  1496. },
  1497. {
  1498. "valid access __sk_buff family",
  1499. .insns = {
  1500. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1501. offsetof(struct __sk_buff, family)),
  1502. BPF_EXIT_INSN(),
  1503. },
  1504. .result = ACCEPT,
  1505. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1506. },
  1507. {
  1508. "valid access __sk_buff remote_ip4",
  1509. .insns = {
  1510. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1511. offsetof(struct __sk_buff, remote_ip4)),
  1512. BPF_EXIT_INSN(),
  1513. },
  1514. .result = ACCEPT,
  1515. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1516. },
  1517. {
  1518. "valid access __sk_buff local_ip4",
  1519. .insns = {
  1520. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1521. offsetof(struct __sk_buff, local_ip4)),
  1522. BPF_EXIT_INSN(),
  1523. },
  1524. .result = ACCEPT,
  1525. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1526. },
  1527. {
  1528. "valid access __sk_buff remote_ip6",
  1529. .insns = {
  1530. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1531. offsetof(struct __sk_buff, remote_ip6[0])),
  1532. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1533. offsetof(struct __sk_buff, remote_ip6[1])),
  1534. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1535. offsetof(struct __sk_buff, remote_ip6[2])),
  1536. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1537. offsetof(struct __sk_buff, remote_ip6[3])),
  1538. BPF_EXIT_INSN(),
  1539. },
  1540. .result = ACCEPT,
  1541. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1542. },
  1543. {
  1544. "valid access __sk_buff local_ip6",
  1545. .insns = {
  1546. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1547. offsetof(struct __sk_buff, local_ip6[0])),
  1548. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1549. offsetof(struct __sk_buff, local_ip6[1])),
  1550. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1551. offsetof(struct __sk_buff, local_ip6[2])),
  1552. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1553. offsetof(struct __sk_buff, local_ip6[3])),
  1554. BPF_EXIT_INSN(),
  1555. },
  1556. .result = ACCEPT,
  1557. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1558. },
  1559. {
  1560. "valid access __sk_buff remote_port",
  1561. .insns = {
  1562. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1563. offsetof(struct __sk_buff, remote_port)),
  1564. BPF_EXIT_INSN(),
  1565. },
  1566. .result = ACCEPT,
  1567. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1568. },
  1569. {
  1570. "valid access __sk_buff remote_port",
  1571. .insns = {
  1572. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1573. offsetof(struct __sk_buff, local_port)),
  1574. BPF_EXIT_INSN(),
  1575. },
  1576. .result = ACCEPT,
  1577. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1578. },
  1579. {
  1580. "invalid access of tc_classid for SK_SKB",
  1581. .insns = {
  1582. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1583. offsetof(struct __sk_buff, tc_classid)),
  1584. BPF_EXIT_INSN(),
  1585. },
  1586. .result = REJECT,
  1587. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1588. .errstr = "invalid bpf_context access",
  1589. },
  1590. {
  1591. "invalid access of skb->mark for SK_SKB",
  1592. .insns = {
  1593. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1594. offsetof(struct __sk_buff, mark)),
  1595. BPF_EXIT_INSN(),
  1596. },
  1597. .result = REJECT,
  1598. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1599. .errstr = "invalid bpf_context access",
  1600. },
  1601. {
  1602. "check skb->mark is not writeable by SK_SKB",
  1603. .insns = {
  1604. BPF_MOV64_IMM(BPF_REG_0, 0),
  1605. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1606. offsetof(struct __sk_buff, mark)),
  1607. BPF_EXIT_INSN(),
  1608. },
  1609. .result = REJECT,
  1610. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1611. .errstr = "invalid bpf_context access",
  1612. },
  1613. {
  1614. "check skb->tc_index is writeable by SK_SKB",
  1615. .insns = {
  1616. BPF_MOV64_IMM(BPF_REG_0, 0),
  1617. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1618. offsetof(struct __sk_buff, tc_index)),
  1619. BPF_EXIT_INSN(),
  1620. },
  1621. .result = ACCEPT,
  1622. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1623. },
  1624. {
  1625. "check skb->priority is writeable by SK_SKB",
  1626. .insns = {
  1627. BPF_MOV64_IMM(BPF_REG_0, 0),
  1628. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1629. offsetof(struct __sk_buff, priority)),
  1630. BPF_EXIT_INSN(),
  1631. },
  1632. .result = ACCEPT,
  1633. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1634. },
  1635. {
  1636. "direct packet read for SK_SKB",
  1637. .insns = {
  1638. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1639. offsetof(struct __sk_buff, data)),
  1640. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1641. offsetof(struct __sk_buff, data_end)),
  1642. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1643. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1644. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  1645. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1646. BPF_MOV64_IMM(BPF_REG_0, 0),
  1647. BPF_EXIT_INSN(),
  1648. },
  1649. .result = ACCEPT,
  1650. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1651. },
  1652. {
  1653. "direct packet write for SK_SKB",
  1654. .insns = {
  1655. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1656. offsetof(struct __sk_buff, data)),
  1657. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1658. offsetof(struct __sk_buff, data_end)),
  1659. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1660. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1661. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  1662. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  1663. BPF_MOV64_IMM(BPF_REG_0, 0),
  1664. BPF_EXIT_INSN(),
  1665. },
  1666. .result = ACCEPT,
  1667. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1668. },
  1669. {
  1670. "overlapping checks for direct packet access SK_SKB",
  1671. .insns = {
  1672. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1673. offsetof(struct __sk_buff, data)),
  1674. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  1675. offsetof(struct __sk_buff, data_end)),
  1676. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1677. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1678. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
  1679. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  1680. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
  1681. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
  1682. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
  1683. BPF_MOV64_IMM(BPF_REG_0, 0),
  1684. BPF_EXIT_INSN(),
  1685. },
  1686. .result = ACCEPT,
  1687. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1688. },
  1689. {
  1690. "valid access family in SK_MSG",
  1691. .insns = {
  1692. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1693. offsetof(struct sk_msg_md, family)),
  1694. BPF_EXIT_INSN(),
  1695. },
  1696. .result = ACCEPT,
  1697. .prog_type = BPF_PROG_TYPE_SK_MSG,
  1698. },
  1699. {
  1700. "valid access remote_ip4 in SK_MSG",
  1701. .insns = {
  1702. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1703. offsetof(struct sk_msg_md, remote_ip4)),
  1704. BPF_EXIT_INSN(),
  1705. },
  1706. .result = ACCEPT,
  1707. .prog_type = BPF_PROG_TYPE_SK_MSG,
  1708. },
  1709. {
  1710. "valid access local_ip4 in SK_MSG",
  1711. .insns = {
  1712. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1713. offsetof(struct sk_msg_md, local_ip4)),
  1714. BPF_EXIT_INSN(),
  1715. },
  1716. .result = ACCEPT,
  1717. .prog_type = BPF_PROG_TYPE_SK_MSG,
  1718. },
  1719. {
  1720. "valid access remote_port in SK_MSG",
  1721. .insns = {
  1722. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1723. offsetof(struct sk_msg_md, remote_port)),
  1724. BPF_EXIT_INSN(),
  1725. },
  1726. .result = ACCEPT,
  1727. .prog_type = BPF_PROG_TYPE_SK_MSG,
  1728. },
  1729. {
  1730. "valid access local_port in SK_MSG",
  1731. .insns = {
  1732. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1733. offsetof(struct sk_msg_md, local_port)),
  1734. BPF_EXIT_INSN(),
  1735. },
  1736. .result = ACCEPT,
  1737. .prog_type = BPF_PROG_TYPE_SK_MSG,
  1738. },
  1739. {
  1740. "valid access remote_ip6 in SK_MSG",
  1741. .insns = {
  1742. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1743. offsetof(struct sk_msg_md, remote_ip6[0])),
  1744. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1745. offsetof(struct sk_msg_md, remote_ip6[1])),
  1746. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1747. offsetof(struct sk_msg_md, remote_ip6[2])),
  1748. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1749. offsetof(struct sk_msg_md, remote_ip6[3])),
  1750. BPF_EXIT_INSN(),
  1751. },
  1752. .result = ACCEPT,
  1753. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1754. },
  1755. {
  1756. "valid access local_ip6 in SK_MSG",
  1757. .insns = {
  1758. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1759. offsetof(struct sk_msg_md, local_ip6[0])),
  1760. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1761. offsetof(struct sk_msg_md, local_ip6[1])),
  1762. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1763. offsetof(struct sk_msg_md, local_ip6[2])),
  1764. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1765. offsetof(struct sk_msg_md, local_ip6[3])),
  1766. BPF_EXIT_INSN(),
  1767. },
  1768. .result = ACCEPT,
  1769. .prog_type = BPF_PROG_TYPE_SK_SKB,
  1770. },
  1771. {
  1772. "invalid 64B read of family in SK_MSG",
  1773. .insns = {
  1774. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
  1775. offsetof(struct sk_msg_md, family)),
  1776. BPF_EXIT_INSN(),
  1777. },
  1778. .errstr = "invalid bpf_context access",
  1779. .result = REJECT,
  1780. .prog_type = BPF_PROG_TYPE_SK_MSG,
  1781. },
  1782. {
  1783. "invalid read past end of SK_MSG",
  1784. .insns = {
  1785. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1786. offsetof(struct sk_msg_md, local_port) + 4),
  1787. BPF_EXIT_INSN(),
  1788. },
  1789. .errstr = "R0 !read_ok",
  1790. .result = REJECT,
  1791. .prog_type = BPF_PROG_TYPE_SK_MSG,
  1792. },
  1793. {
  1794. "invalid read offset in SK_MSG",
  1795. .insns = {
  1796. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  1797. offsetof(struct sk_msg_md, family) + 1),
  1798. BPF_EXIT_INSN(),
  1799. },
  1800. .errstr = "invalid bpf_context access",
  1801. .result = REJECT,
  1802. .prog_type = BPF_PROG_TYPE_SK_MSG,
  1803. },
  1804. {
  1805. "direct packet read for SK_MSG",
  1806. .insns = {
  1807. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
  1808. offsetof(struct sk_msg_md, data)),
  1809. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
  1810. offsetof(struct sk_msg_md, data_end)),
  1811. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1812. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1813. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  1814. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  1815. BPF_MOV64_IMM(BPF_REG_0, 0),
  1816. BPF_EXIT_INSN(),
  1817. },
  1818. .result = ACCEPT,
  1819. .prog_type = BPF_PROG_TYPE_SK_MSG,
  1820. },
  1821. {
  1822. "direct packet write for SK_MSG",
  1823. .insns = {
  1824. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
  1825. offsetof(struct sk_msg_md, data)),
  1826. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
  1827. offsetof(struct sk_msg_md, data_end)),
  1828. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1829. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1830. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  1831. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  1832. BPF_MOV64_IMM(BPF_REG_0, 0),
  1833. BPF_EXIT_INSN(),
  1834. },
  1835. .result = ACCEPT,
  1836. .prog_type = BPF_PROG_TYPE_SK_MSG,
  1837. },
  1838. {
  1839. "overlapping checks for direct packet access SK_MSG",
  1840. .insns = {
  1841. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
  1842. offsetof(struct sk_msg_md, data)),
  1843. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
  1844. offsetof(struct sk_msg_md, data_end)),
  1845. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  1846. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  1847. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
  1848. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  1849. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
  1850. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
  1851. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
  1852. BPF_MOV64_IMM(BPF_REG_0, 0),
  1853. BPF_EXIT_INSN(),
  1854. },
  1855. .result = ACCEPT,
  1856. .prog_type = BPF_PROG_TYPE_SK_MSG,
  1857. },
  1858. {
  1859. "check skb->mark is not writeable by sockets",
  1860. .insns = {
  1861. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  1862. offsetof(struct __sk_buff, mark)),
  1863. BPF_EXIT_INSN(),
  1864. },
  1865. .errstr = "invalid bpf_context access",
  1866. .errstr_unpriv = "R1 leaks addr",
  1867. .result = REJECT,
  1868. },
  1869. {
  1870. "check skb->tc_index is not writeable by sockets",
  1871. .insns = {
  1872. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  1873. offsetof(struct __sk_buff, tc_index)),
  1874. BPF_EXIT_INSN(),
  1875. },
  1876. .errstr = "invalid bpf_context access",
  1877. .errstr_unpriv = "R1 leaks addr",
  1878. .result = REJECT,
  1879. },
  1880. {
  1881. "check cb access: byte",
  1882. .insns = {
  1883. BPF_MOV64_IMM(BPF_REG_0, 0),
  1884. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1885. offsetof(struct __sk_buff, cb[0])),
  1886. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1887. offsetof(struct __sk_buff, cb[0]) + 1),
  1888. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1889. offsetof(struct __sk_buff, cb[0]) + 2),
  1890. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1891. offsetof(struct __sk_buff, cb[0]) + 3),
  1892. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1893. offsetof(struct __sk_buff, cb[1])),
  1894. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1895. offsetof(struct __sk_buff, cb[1]) + 1),
  1896. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1897. offsetof(struct __sk_buff, cb[1]) + 2),
  1898. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1899. offsetof(struct __sk_buff, cb[1]) + 3),
  1900. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1901. offsetof(struct __sk_buff, cb[2])),
  1902. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1903. offsetof(struct __sk_buff, cb[2]) + 1),
  1904. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1905. offsetof(struct __sk_buff, cb[2]) + 2),
  1906. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1907. offsetof(struct __sk_buff, cb[2]) + 3),
  1908. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1909. offsetof(struct __sk_buff, cb[3])),
  1910. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1911. offsetof(struct __sk_buff, cb[3]) + 1),
  1912. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1913. offsetof(struct __sk_buff, cb[3]) + 2),
  1914. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1915. offsetof(struct __sk_buff, cb[3]) + 3),
  1916. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1917. offsetof(struct __sk_buff, cb[4])),
  1918. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1919. offsetof(struct __sk_buff, cb[4]) + 1),
  1920. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1921. offsetof(struct __sk_buff, cb[4]) + 2),
  1922. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1923. offsetof(struct __sk_buff, cb[4]) + 3),
  1924. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1925. offsetof(struct __sk_buff, cb[0])),
  1926. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1927. offsetof(struct __sk_buff, cb[0]) + 1),
  1928. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1929. offsetof(struct __sk_buff, cb[0]) + 2),
  1930. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1931. offsetof(struct __sk_buff, cb[0]) + 3),
  1932. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1933. offsetof(struct __sk_buff, cb[1])),
  1934. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1935. offsetof(struct __sk_buff, cb[1]) + 1),
  1936. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1937. offsetof(struct __sk_buff, cb[1]) + 2),
  1938. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1939. offsetof(struct __sk_buff, cb[1]) + 3),
  1940. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1941. offsetof(struct __sk_buff, cb[2])),
  1942. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1943. offsetof(struct __sk_buff, cb[2]) + 1),
  1944. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1945. offsetof(struct __sk_buff, cb[2]) + 2),
  1946. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1947. offsetof(struct __sk_buff, cb[2]) + 3),
  1948. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1949. offsetof(struct __sk_buff, cb[3])),
  1950. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1951. offsetof(struct __sk_buff, cb[3]) + 1),
  1952. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1953. offsetof(struct __sk_buff, cb[3]) + 2),
  1954. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1955. offsetof(struct __sk_buff, cb[3]) + 3),
  1956. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1957. offsetof(struct __sk_buff, cb[4])),
  1958. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1959. offsetof(struct __sk_buff, cb[4]) + 1),
  1960. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1961. offsetof(struct __sk_buff, cb[4]) + 2),
  1962. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1963. offsetof(struct __sk_buff, cb[4]) + 3),
  1964. BPF_EXIT_INSN(),
  1965. },
  1966. .result = ACCEPT,
  1967. },
  1968. {
  1969. "__sk_buff->hash, offset 0, byte store not permitted",
  1970. .insns = {
  1971. BPF_MOV64_IMM(BPF_REG_0, 0),
  1972. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1973. offsetof(struct __sk_buff, hash)),
  1974. BPF_EXIT_INSN(),
  1975. },
  1976. .errstr = "invalid bpf_context access",
  1977. .result = REJECT,
  1978. },
  1979. {
  1980. "__sk_buff->tc_index, offset 3, byte store not permitted",
  1981. .insns = {
  1982. BPF_MOV64_IMM(BPF_REG_0, 0),
  1983. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1984. offsetof(struct __sk_buff, tc_index) + 3),
  1985. BPF_EXIT_INSN(),
  1986. },
  1987. .errstr = "invalid bpf_context access",
  1988. .result = REJECT,
  1989. },
  1990. {
  1991. "check skb->hash byte load permitted",
  1992. .insns = {
  1993. BPF_MOV64_IMM(BPF_REG_0, 0),
  1994. #if __BYTE_ORDER == __LITTLE_ENDIAN
  1995. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1996. offsetof(struct __sk_buff, hash)),
  1997. #else
  1998. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1999. offsetof(struct __sk_buff, hash) + 3),
  2000. #endif
  2001. BPF_EXIT_INSN(),
  2002. },
  2003. .result = ACCEPT,
  2004. },
  2005. {
  2006. "check skb->hash byte load not permitted 1",
  2007. .insns = {
  2008. BPF_MOV64_IMM(BPF_REG_0, 0),
  2009. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  2010. offsetof(struct __sk_buff, hash) + 1),
  2011. BPF_EXIT_INSN(),
  2012. },
  2013. .errstr = "invalid bpf_context access",
  2014. .result = REJECT,
  2015. },
  2016. {
  2017. "check skb->hash byte load not permitted 2",
  2018. .insns = {
  2019. BPF_MOV64_IMM(BPF_REG_0, 0),
  2020. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  2021. offsetof(struct __sk_buff, hash) + 2),
  2022. BPF_EXIT_INSN(),
  2023. },
  2024. .errstr = "invalid bpf_context access",
  2025. .result = REJECT,
  2026. },
  2027. {
  2028. "check skb->hash byte load not permitted 3",
  2029. .insns = {
  2030. BPF_MOV64_IMM(BPF_REG_0, 0),
  2031. #if __BYTE_ORDER == __LITTLE_ENDIAN
  2032. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  2033. offsetof(struct __sk_buff, hash) + 3),
  2034. #else
  2035. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  2036. offsetof(struct __sk_buff, hash)),
  2037. #endif
  2038. BPF_EXIT_INSN(),
  2039. },
  2040. .errstr = "invalid bpf_context access",
  2041. .result = REJECT,
  2042. },
  2043. {
  2044. "check cb access: byte, wrong type",
  2045. .insns = {
  2046. BPF_MOV64_IMM(BPF_REG_0, 0),
  2047. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  2048. offsetof(struct __sk_buff, cb[0])),
  2049. BPF_EXIT_INSN(),
  2050. },
  2051. .errstr = "invalid bpf_context access",
  2052. .result = REJECT,
  2053. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  2054. },
  2055. {
  2056. "check cb access: half",
  2057. .insns = {
  2058. BPF_MOV64_IMM(BPF_REG_0, 0),
  2059. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  2060. offsetof(struct __sk_buff, cb[0])),
  2061. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  2062. offsetof(struct __sk_buff, cb[0]) + 2),
  2063. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  2064. offsetof(struct __sk_buff, cb[1])),
  2065. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  2066. offsetof(struct __sk_buff, cb[1]) + 2),
  2067. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  2068. offsetof(struct __sk_buff, cb[2])),
  2069. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  2070. offsetof(struct __sk_buff, cb[2]) + 2),
  2071. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  2072. offsetof(struct __sk_buff, cb[3])),
  2073. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  2074. offsetof(struct __sk_buff, cb[3]) + 2),
  2075. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  2076. offsetof(struct __sk_buff, cb[4])),
  2077. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  2078. offsetof(struct __sk_buff, cb[4]) + 2),
  2079. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  2080. offsetof(struct __sk_buff, cb[0])),
  2081. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  2082. offsetof(struct __sk_buff, cb[0]) + 2),
  2083. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  2084. offsetof(struct __sk_buff, cb[1])),
  2085. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  2086. offsetof(struct __sk_buff, cb[1]) + 2),
  2087. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  2088. offsetof(struct __sk_buff, cb[2])),
  2089. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  2090. offsetof(struct __sk_buff, cb[2]) + 2),
  2091. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  2092. offsetof(struct __sk_buff, cb[3])),
  2093. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  2094. offsetof(struct __sk_buff, cb[3]) + 2),
  2095. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  2096. offsetof(struct __sk_buff, cb[4])),
  2097. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  2098. offsetof(struct __sk_buff, cb[4]) + 2),
  2099. BPF_EXIT_INSN(),
  2100. },
  2101. .result = ACCEPT,
  2102. },
  2103. {
  2104. "check cb access: half, unaligned",
  2105. .insns = {
  2106. BPF_MOV64_IMM(BPF_REG_0, 0),
  2107. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  2108. offsetof(struct __sk_buff, cb[0]) + 1),
  2109. BPF_EXIT_INSN(),
  2110. },
  2111. .errstr = "misaligned context access",
  2112. .result = REJECT,
  2113. .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
  2114. },
  2115. {
  2116. "check __sk_buff->hash, offset 0, half store not permitted",
  2117. .insns = {
  2118. BPF_MOV64_IMM(BPF_REG_0, 0),
  2119. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  2120. offsetof(struct __sk_buff, hash)),
  2121. BPF_EXIT_INSN(),
  2122. },
  2123. .errstr = "invalid bpf_context access",
  2124. .result = REJECT,
  2125. },
  2126. {
  2127. "check __sk_buff->tc_index, offset 2, half store not permitted",
  2128. .insns = {
  2129. BPF_MOV64_IMM(BPF_REG_0, 0),
  2130. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  2131. offsetof(struct __sk_buff, tc_index) + 2),
  2132. BPF_EXIT_INSN(),
  2133. },
  2134. .errstr = "invalid bpf_context access",
  2135. .result = REJECT,
  2136. },
  2137. {
  2138. "check skb->hash half load permitted",
  2139. .insns = {
  2140. BPF_MOV64_IMM(BPF_REG_0, 0),
  2141. #if __BYTE_ORDER == __LITTLE_ENDIAN
  2142. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  2143. offsetof(struct __sk_buff, hash)),
  2144. #else
  2145. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  2146. offsetof(struct __sk_buff, hash) + 2),
  2147. #endif
  2148. BPF_EXIT_INSN(),
  2149. },
  2150. .result = ACCEPT,
  2151. },
  2152. {
  2153. "check skb->hash half load not permitted",
  2154. .insns = {
  2155. BPF_MOV64_IMM(BPF_REG_0, 0),
  2156. #if __BYTE_ORDER == __LITTLE_ENDIAN
  2157. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  2158. offsetof(struct __sk_buff, hash) + 2),
  2159. #else
  2160. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  2161. offsetof(struct __sk_buff, hash)),
  2162. #endif
  2163. BPF_EXIT_INSN(),
  2164. },
  2165. .errstr = "invalid bpf_context access",
  2166. .result = REJECT,
  2167. },
  2168. {
  2169. "check cb access: half, wrong type",
  2170. .insns = {
  2171. BPF_MOV64_IMM(BPF_REG_0, 0),
  2172. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  2173. offsetof(struct __sk_buff, cb[0])),
  2174. BPF_EXIT_INSN(),
  2175. },
  2176. .errstr = "invalid bpf_context access",
  2177. .result = REJECT,
  2178. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  2179. },
  2180. {
  2181. "check cb access: word",
  2182. .insns = {
  2183. BPF_MOV64_IMM(BPF_REG_0, 0),
  2184. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  2185. offsetof(struct __sk_buff, cb[0])),
  2186. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  2187. offsetof(struct __sk_buff, cb[1])),
  2188. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  2189. offsetof(struct __sk_buff, cb[2])),
  2190. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  2191. offsetof(struct __sk_buff, cb[3])),
  2192. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  2193. offsetof(struct __sk_buff, cb[4])),
  2194. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  2195. offsetof(struct __sk_buff, cb[0])),
  2196. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  2197. offsetof(struct __sk_buff, cb[1])),
  2198. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  2199. offsetof(struct __sk_buff, cb[2])),
  2200. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  2201. offsetof(struct __sk_buff, cb[3])),
  2202. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  2203. offsetof(struct __sk_buff, cb[4])),
  2204. BPF_EXIT_INSN(),
  2205. },
  2206. .result = ACCEPT,
  2207. },
  2208. {
  2209. "check cb access: word, unaligned 1",
  2210. .insns = {
  2211. BPF_MOV64_IMM(BPF_REG_0, 0),
  2212. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  2213. offsetof(struct __sk_buff, cb[0]) + 2),
  2214. BPF_EXIT_INSN(),
  2215. },
  2216. .errstr = "misaligned context access",
  2217. .result = REJECT,
  2218. .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
  2219. },
  2220. {
  2221. "check cb access: word, unaligned 2",
  2222. .insns = {
  2223. BPF_MOV64_IMM(BPF_REG_0, 0),
  2224. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  2225. offsetof(struct __sk_buff, cb[4]) + 1),
  2226. BPF_EXIT_INSN(),
  2227. },
  2228. .errstr = "misaligned context access",
  2229. .result = REJECT,
  2230. .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
  2231. },
  2232. {
  2233. "check cb access: word, unaligned 3",
  2234. .insns = {
  2235. BPF_MOV64_IMM(BPF_REG_0, 0),
  2236. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  2237. offsetof(struct __sk_buff, cb[4]) + 2),
  2238. BPF_EXIT_INSN(),
  2239. },
  2240. .errstr = "misaligned context access",
  2241. .result = REJECT,
  2242. .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
  2243. },
  2244. {
  2245. "check cb access: word, unaligned 4",
  2246. .insns = {
  2247. BPF_MOV64_IMM(BPF_REG_0, 0),
  2248. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  2249. offsetof(struct __sk_buff, cb[4]) + 3),
  2250. BPF_EXIT_INSN(),
  2251. },
  2252. .errstr = "misaligned context access",
  2253. .result = REJECT,
  2254. .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
  2255. },
  2256. {
  2257. "check cb access: double",
  2258. .insns = {
  2259. BPF_MOV64_IMM(BPF_REG_0, 0),
  2260. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  2261. offsetof(struct __sk_buff, cb[0])),
  2262. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  2263. offsetof(struct __sk_buff, cb[2])),
  2264. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
  2265. offsetof(struct __sk_buff, cb[0])),
  2266. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
  2267. offsetof(struct __sk_buff, cb[2])),
  2268. BPF_EXIT_INSN(),
  2269. },
  2270. .result = ACCEPT,
  2271. },
  2272. {
  2273. "check cb access: double, unaligned 1",
  2274. .insns = {
  2275. BPF_MOV64_IMM(BPF_REG_0, 0),
  2276. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  2277. offsetof(struct __sk_buff, cb[1])),
  2278. BPF_EXIT_INSN(),
  2279. },
  2280. .errstr = "misaligned context access",
  2281. .result = REJECT,
  2282. .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
  2283. },
  2284. {
  2285. "check cb access: double, unaligned 2",
  2286. .insns = {
  2287. BPF_MOV64_IMM(BPF_REG_0, 0),
  2288. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  2289. offsetof(struct __sk_buff, cb[3])),
  2290. BPF_EXIT_INSN(),
  2291. },
  2292. .errstr = "misaligned context access",
  2293. .result = REJECT,
  2294. .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
  2295. },
  2296. {
  2297. "check cb access: double, oob 1",
  2298. .insns = {
  2299. BPF_MOV64_IMM(BPF_REG_0, 0),
  2300. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  2301. offsetof(struct __sk_buff, cb[4])),
  2302. BPF_EXIT_INSN(),
  2303. },
  2304. .errstr = "invalid bpf_context access",
  2305. .result = REJECT,
  2306. },
  2307. {
  2308. "check cb access: double, oob 2",
  2309. .insns = {
  2310. BPF_MOV64_IMM(BPF_REG_0, 0),
  2311. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
  2312. offsetof(struct __sk_buff, cb[4])),
  2313. BPF_EXIT_INSN(),
  2314. },
  2315. .errstr = "invalid bpf_context access",
  2316. .result = REJECT,
  2317. },
  2318. {
  2319. "check __sk_buff->ifindex dw store not permitted",
  2320. .insns = {
  2321. BPF_MOV64_IMM(BPF_REG_0, 0),
  2322. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  2323. offsetof(struct __sk_buff, ifindex)),
  2324. BPF_EXIT_INSN(),
  2325. },
  2326. .errstr = "invalid bpf_context access",
  2327. .result = REJECT,
  2328. },
  2329. {
  2330. "check __sk_buff->ifindex dw load not permitted",
  2331. .insns = {
  2332. BPF_MOV64_IMM(BPF_REG_0, 0),
  2333. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
  2334. offsetof(struct __sk_buff, ifindex)),
  2335. BPF_EXIT_INSN(),
  2336. },
  2337. .errstr = "invalid bpf_context access",
  2338. .result = REJECT,
  2339. },
  2340. {
  2341. "check cb access: double, wrong type",
  2342. .insns = {
  2343. BPF_MOV64_IMM(BPF_REG_0, 0),
  2344. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  2345. offsetof(struct __sk_buff, cb[0])),
  2346. BPF_EXIT_INSN(),
  2347. },
  2348. .errstr = "invalid bpf_context access",
  2349. .result = REJECT,
  2350. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  2351. },
  2352. {
  2353. "check out of range skb->cb access",
  2354. .insns = {
  2355. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  2356. offsetof(struct __sk_buff, cb[0]) + 256),
  2357. BPF_EXIT_INSN(),
  2358. },
  2359. .errstr = "invalid bpf_context access",
  2360. .errstr_unpriv = "",
  2361. .result = REJECT,
  2362. .prog_type = BPF_PROG_TYPE_SCHED_ACT,
  2363. },
  2364. {
  2365. "write skb fields from socket prog",
  2366. .insns = {
  2367. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  2368. offsetof(struct __sk_buff, cb[4])),
  2369. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  2370. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  2371. offsetof(struct __sk_buff, mark)),
  2372. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  2373. offsetof(struct __sk_buff, tc_index)),
  2374. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  2375. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  2376. offsetof(struct __sk_buff, cb[0])),
  2377. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  2378. offsetof(struct __sk_buff, cb[2])),
  2379. BPF_EXIT_INSN(),
  2380. },
  2381. .result = ACCEPT,
  2382. .errstr_unpriv = "R1 leaks addr",
  2383. .result_unpriv = REJECT,
  2384. },
  2385. {
  2386. "write skb fields from tc_cls_act prog",
  2387. .insns = {
  2388. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  2389. offsetof(struct __sk_buff, cb[0])),
  2390. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  2391. offsetof(struct __sk_buff, mark)),
  2392. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  2393. offsetof(struct __sk_buff, tc_index)),
  2394. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  2395. offsetof(struct __sk_buff, tc_index)),
  2396. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  2397. offsetof(struct __sk_buff, cb[3])),
  2398. BPF_EXIT_INSN(),
  2399. },
  2400. .errstr_unpriv = "",
  2401. .result_unpriv = REJECT,
  2402. .result = ACCEPT,
  2403. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2404. },
  2405. {
  2406. "PTR_TO_STACK store/load",
  2407. .insns = {
  2408. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  2409. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  2410. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  2411. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  2412. BPF_EXIT_INSN(),
  2413. },
  2414. .result = ACCEPT,
  2415. .retval = 0xfaceb00c,
  2416. },
  2417. {
  2418. "PTR_TO_STACK store/load - bad alignment on off",
  2419. .insns = {
  2420. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  2421. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  2422. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  2423. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  2424. BPF_EXIT_INSN(),
  2425. },
  2426. .result = REJECT,
  2427. .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
  2428. },
  2429. {
  2430. "PTR_TO_STACK store/load - bad alignment on reg",
  2431. .insns = {
  2432. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  2433. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  2434. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  2435. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  2436. BPF_EXIT_INSN(),
  2437. },
  2438. .result = REJECT,
  2439. .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
  2440. },
  2441. {
  2442. "PTR_TO_STACK store/load - out of bounds low",
  2443. .insns = {
  2444. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  2445. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
  2446. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  2447. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  2448. BPF_EXIT_INSN(),
  2449. },
  2450. .result = REJECT,
  2451. .errstr = "invalid stack off=-79992 size=8",
  2452. },
  2453. {
  2454. "PTR_TO_STACK store/load - out of bounds high",
  2455. .insns = {
  2456. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  2457. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  2458. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  2459. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  2460. BPF_EXIT_INSN(),
  2461. },
  2462. .result = REJECT,
  2463. .errstr = "invalid stack off=0 size=8",
  2464. },
  2465. {
  2466. "unpriv: return pointer",
  2467. .insns = {
  2468. BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
  2469. BPF_EXIT_INSN(),
  2470. },
  2471. .result = ACCEPT,
  2472. .result_unpriv = REJECT,
  2473. .errstr_unpriv = "R0 leaks addr",
  2474. .retval = POINTER_VALUE,
  2475. },
  2476. {
  2477. "unpriv: add const to pointer",
  2478. .insns = {
  2479. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  2480. BPF_MOV64_IMM(BPF_REG_0, 0),
  2481. BPF_EXIT_INSN(),
  2482. },
  2483. .result = ACCEPT,
  2484. },
  2485. {
  2486. "unpriv: add pointer to pointer",
  2487. .insns = {
  2488. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
  2489. BPF_MOV64_IMM(BPF_REG_0, 0),
  2490. BPF_EXIT_INSN(),
  2491. },
  2492. .result = REJECT,
  2493. .errstr = "R1 pointer += pointer",
  2494. },
  2495. {
  2496. "unpriv: neg pointer",
  2497. .insns = {
  2498. BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
  2499. BPF_MOV64_IMM(BPF_REG_0, 0),
  2500. BPF_EXIT_INSN(),
  2501. },
  2502. .result = ACCEPT,
  2503. .result_unpriv = REJECT,
  2504. .errstr_unpriv = "R1 pointer arithmetic",
  2505. },
  2506. {
  2507. "unpriv: cmp pointer with const",
  2508. .insns = {
  2509. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  2510. BPF_MOV64_IMM(BPF_REG_0, 0),
  2511. BPF_EXIT_INSN(),
  2512. },
  2513. .result = ACCEPT,
  2514. .result_unpriv = REJECT,
  2515. .errstr_unpriv = "R1 pointer comparison",
  2516. },
  2517. {
  2518. "unpriv: cmp pointer with pointer",
  2519. .insns = {
  2520. BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  2521. BPF_MOV64_IMM(BPF_REG_0, 0),
  2522. BPF_EXIT_INSN(),
  2523. },
  2524. .result = ACCEPT,
  2525. .result_unpriv = REJECT,
  2526. .errstr_unpriv = "R10 pointer comparison",
  2527. },
  2528. {
  2529. "unpriv: check that printk is disallowed",
  2530. .insns = {
  2531. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2532. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  2533. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  2534. BPF_MOV64_IMM(BPF_REG_2, 8),
  2535. BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
  2536. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2537. BPF_FUNC_trace_printk),
  2538. BPF_MOV64_IMM(BPF_REG_0, 0),
  2539. BPF_EXIT_INSN(),
  2540. },
  2541. .errstr_unpriv = "unknown func bpf_trace_printk#6",
  2542. .result_unpriv = REJECT,
  2543. .result = ACCEPT,
  2544. },
  2545. {
  2546. "unpriv: pass pointer to helper function",
  2547. .insns = {
  2548. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2549. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2550. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2551. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2552. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  2553. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2554. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2555. BPF_FUNC_map_update_elem),
  2556. BPF_MOV64_IMM(BPF_REG_0, 0),
  2557. BPF_EXIT_INSN(),
  2558. },
  2559. .fixup_map_hash_8b = { 3 },
  2560. .errstr_unpriv = "R4 leaks addr",
  2561. .result_unpriv = REJECT,
  2562. .result = ACCEPT,
  2563. },
  2564. {
  2565. "unpriv: indirectly pass pointer on stack to helper function",
  2566. .insns = {
  2567. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  2568. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2569. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2570. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2571. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2572. BPF_FUNC_map_lookup_elem),
  2573. BPF_MOV64_IMM(BPF_REG_0, 0),
  2574. BPF_EXIT_INSN(),
  2575. },
  2576. .fixup_map_hash_8b = { 3 },
  2577. .errstr = "invalid indirect read from stack off -8+0 size 8",
  2578. .result = REJECT,
  2579. },
  2580. {
  2581. "unpriv: mangle pointer on stack 1",
  2582. .insns = {
  2583. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  2584. BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
  2585. BPF_MOV64_IMM(BPF_REG_0, 0),
  2586. BPF_EXIT_INSN(),
  2587. },
  2588. .errstr_unpriv = "attempt to corrupt spilled",
  2589. .result_unpriv = REJECT,
  2590. .result = ACCEPT,
  2591. },
  2592. {
  2593. "unpriv: mangle pointer on stack 2",
  2594. .insns = {
  2595. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  2596. BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
  2597. BPF_MOV64_IMM(BPF_REG_0, 0),
  2598. BPF_EXIT_INSN(),
  2599. },
  2600. .errstr_unpriv = "attempt to corrupt spilled",
  2601. .result_unpriv = REJECT,
  2602. .result = ACCEPT,
  2603. },
  2604. {
  2605. "unpriv: read pointer from stack in small chunks",
  2606. .insns = {
  2607. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  2608. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
  2609. BPF_MOV64_IMM(BPF_REG_0, 0),
  2610. BPF_EXIT_INSN(),
  2611. },
  2612. .errstr = "invalid size",
  2613. .result = REJECT,
  2614. },
  2615. {
  2616. "unpriv: write pointer into ctx",
  2617. .insns = {
  2618. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
  2619. BPF_MOV64_IMM(BPF_REG_0, 0),
  2620. BPF_EXIT_INSN(),
  2621. },
  2622. .errstr_unpriv = "R1 leaks addr",
  2623. .result_unpriv = REJECT,
  2624. .errstr = "invalid bpf_context access",
  2625. .result = REJECT,
  2626. },
  2627. {
  2628. "unpriv: spill/fill of ctx",
  2629. .insns = {
  2630. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2631. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  2632. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  2633. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  2634. BPF_MOV64_IMM(BPF_REG_0, 0),
  2635. BPF_EXIT_INSN(),
  2636. },
  2637. .result = ACCEPT,
  2638. },
  2639. {
  2640. "unpriv: spill/fill of ctx 2",
  2641. .insns = {
  2642. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2643. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  2644. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  2645. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  2646. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2647. BPF_FUNC_get_hash_recalc),
  2648. BPF_MOV64_IMM(BPF_REG_0, 0),
  2649. BPF_EXIT_INSN(),
  2650. },
  2651. .result = ACCEPT,
  2652. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2653. },
  2654. {
  2655. "unpriv: spill/fill of ctx 3",
  2656. .insns = {
  2657. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2658. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  2659. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  2660. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
  2661. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  2662. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2663. BPF_FUNC_get_hash_recalc),
  2664. BPF_EXIT_INSN(),
  2665. },
  2666. .result = REJECT,
  2667. .errstr = "R1 type=fp expected=ctx",
  2668. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2669. },
  2670. {
  2671. "unpriv: spill/fill of ctx 4",
  2672. .insns = {
  2673. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2674. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  2675. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  2676. BPF_MOV64_IMM(BPF_REG_0, 1),
  2677. BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
  2678. BPF_REG_0, -8, 0),
  2679. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  2680. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2681. BPF_FUNC_get_hash_recalc),
  2682. BPF_EXIT_INSN(),
  2683. },
  2684. .result = REJECT,
  2685. .errstr = "R1 type=inv expected=ctx",
  2686. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2687. },
  2688. {
  2689. "unpriv: spill/fill of different pointers stx",
  2690. .insns = {
  2691. BPF_MOV64_IMM(BPF_REG_3, 42),
  2692. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2693. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  2694. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  2695. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2696. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  2697. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
  2698. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  2699. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  2700. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  2701. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
  2702. offsetof(struct __sk_buff, mark)),
  2703. BPF_MOV64_IMM(BPF_REG_0, 0),
  2704. BPF_EXIT_INSN(),
  2705. },
  2706. .result = REJECT,
  2707. .errstr = "same insn cannot be used with different pointers",
  2708. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2709. },
  2710. {
  2711. "unpriv: spill/fill of different pointers stx - ctx and sock",
  2712. .insns = {
  2713. BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
  2714. /* struct bpf_sock *sock = bpf_sock_lookup(...); */
  2715. BPF_SK_LOOKUP,
  2716. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  2717. /* u64 foo; */
  2718. /* void *target = &foo; */
  2719. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2720. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  2721. BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
  2722. /* if (skb == NULL) *target = sock; */
  2723. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  2724. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
  2725. /* else *target = skb; */
  2726. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  2727. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  2728. /* struct __sk_buff *skb = *target; */
  2729. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  2730. /* skb->mark = 42; */
  2731. BPF_MOV64_IMM(BPF_REG_3, 42),
  2732. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
  2733. offsetof(struct __sk_buff, mark)),
  2734. /* if (sk) bpf_sk_release(sk) */
  2735. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  2736. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  2737. BPF_MOV64_IMM(BPF_REG_0, 0),
  2738. BPF_EXIT_INSN(),
  2739. },
  2740. .result = REJECT,
  2741. .errstr = "type=ctx expected=sock",
  2742. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2743. },
  2744. {
  2745. "unpriv: spill/fill of different pointers stx - leak sock",
  2746. .insns = {
  2747. BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
  2748. /* struct bpf_sock *sock = bpf_sock_lookup(...); */
  2749. BPF_SK_LOOKUP,
  2750. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  2751. /* u64 foo; */
  2752. /* void *target = &foo; */
  2753. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2754. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  2755. BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
  2756. /* if (skb == NULL) *target = sock; */
  2757. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  2758. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
  2759. /* else *target = skb; */
  2760. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  2761. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  2762. /* struct __sk_buff *skb = *target; */
  2763. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  2764. /* skb->mark = 42; */
  2765. BPF_MOV64_IMM(BPF_REG_3, 42),
  2766. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
  2767. offsetof(struct __sk_buff, mark)),
  2768. BPF_EXIT_INSN(),
  2769. },
  2770. .result = REJECT,
  2771. //.errstr = "same insn cannot be used with different pointers",
  2772. .errstr = "Unreleased reference",
  2773. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2774. },
  2775. {
  2776. "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
  2777. .insns = {
  2778. BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
  2779. /* struct bpf_sock *sock = bpf_sock_lookup(...); */
  2780. BPF_SK_LOOKUP,
  2781. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  2782. /* u64 foo; */
  2783. /* void *target = &foo; */
  2784. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2785. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  2786. BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
  2787. /* if (skb) *target = skb */
  2788. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  2789. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  2790. /* else *target = sock */
  2791. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  2792. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
  2793. /* struct bpf_sock *sk = *target; */
  2794. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  2795. /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
  2796. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
  2797. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2798. offsetof(struct bpf_sock, mark)),
  2799. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  2800. BPF_MOV64_IMM(BPF_REG_0, 0),
  2801. BPF_EXIT_INSN(),
  2802. },
  2803. .result = REJECT,
  2804. .errstr = "same insn cannot be used with different pointers",
  2805. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2806. },
  2807. {
  2808. "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
  2809. .insns = {
  2810. BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
  2811. /* struct bpf_sock *sock = bpf_sock_lookup(...); */
  2812. BPF_SK_LOOKUP,
  2813. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  2814. /* u64 foo; */
  2815. /* void *target = &foo; */
  2816. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2817. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  2818. BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
  2819. /* if (skb) *target = skb */
  2820. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  2821. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  2822. /* else *target = sock */
  2823. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  2824. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
  2825. /* struct bpf_sock *sk = *target; */
  2826. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  2827. /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
  2828. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  2829. BPF_MOV64_IMM(BPF_REG_3, 42),
  2830. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
  2831. offsetof(struct bpf_sock, mark)),
  2832. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  2833. BPF_MOV64_IMM(BPF_REG_0, 0),
  2834. BPF_EXIT_INSN(),
  2835. },
  2836. .result = REJECT,
  2837. //.errstr = "same insn cannot be used with different pointers",
  2838. .errstr = "cannot write into socket",
  2839. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2840. },
  2841. {
  2842. "unpriv: spill/fill of different pointers ldx",
  2843. .insns = {
  2844. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2845. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  2846. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  2847. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2848. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
  2849. -(__s32)offsetof(struct bpf_perf_event_data,
  2850. sample_period) - 8),
  2851. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
  2852. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  2853. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  2854. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  2855. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
  2856. offsetof(struct bpf_perf_event_data,
  2857. sample_period)),
  2858. BPF_MOV64_IMM(BPF_REG_0, 0),
  2859. BPF_EXIT_INSN(),
  2860. },
  2861. .result = REJECT,
  2862. .errstr = "same insn cannot be used with different pointers",
  2863. .prog_type = BPF_PROG_TYPE_PERF_EVENT,
  2864. },
  2865. {
  2866. "unpriv: write pointer into map elem value",
  2867. .insns = {
  2868. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  2869. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2870. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2871. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2872. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2873. BPF_FUNC_map_lookup_elem),
  2874. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  2875. BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
  2876. BPF_EXIT_INSN(),
  2877. },
  2878. .fixup_map_hash_8b = { 3 },
  2879. .errstr_unpriv = "R0 leaks addr",
  2880. .result_unpriv = REJECT,
  2881. .result = ACCEPT,
  2882. },
  2883. {
  2884. "unpriv: partial copy of pointer",
  2885. .insns = {
  2886. BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
  2887. BPF_MOV64_IMM(BPF_REG_0, 0),
  2888. BPF_EXIT_INSN(),
  2889. },
  2890. .errstr_unpriv = "R10 partial copy",
  2891. .result_unpriv = REJECT,
  2892. .result = ACCEPT,
  2893. },
  2894. {
  2895. "unpriv: pass pointer to tail_call",
  2896. .insns = {
  2897. BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
  2898. BPF_LD_MAP_FD(BPF_REG_2, 0),
  2899. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2900. BPF_FUNC_tail_call),
  2901. BPF_MOV64_IMM(BPF_REG_0, 0),
  2902. BPF_EXIT_INSN(),
  2903. },
  2904. .fixup_prog1 = { 1 },
  2905. .errstr_unpriv = "R3 leaks addr into helper",
  2906. .result_unpriv = REJECT,
  2907. .result = ACCEPT,
  2908. },
  2909. {
  2910. "unpriv: cmp map pointer with zero",
  2911. .insns = {
  2912. BPF_MOV64_IMM(BPF_REG_1, 0),
  2913. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2914. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  2915. BPF_MOV64_IMM(BPF_REG_0, 0),
  2916. BPF_EXIT_INSN(),
  2917. },
  2918. .fixup_map_hash_8b = { 1 },
  2919. .errstr_unpriv = "R1 pointer comparison",
  2920. .result_unpriv = REJECT,
  2921. .result = ACCEPT,
  2922. },
  2923. {
  2924. "unpriv: write into frame pointer",
  2925. .insns = {
  2926. BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
  2927. BPF_MOV64_IMM(BPF_REG_0, 0),
  2928. BPF_EXIT_INSN(),
  2929. },
  2930. .errstr = "frame pointer is read only",
  2931. .result = REJECT,
  2932. },
  2933. {
  2934. "unpriv: spill/fill frame pointer",
  2935. .insns = {
  2936. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2937. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  2938. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
  2939. BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
  2940. BPF_MOV64_IMM(BPF_REG_0, 0),
  2941. BPF_EXIT_INSN(),
  2942. },
  2943. .errstr = "frame pointer is read only",
  2944. .result = REJECT,
  2945. },
  2946. {
  2947. "unpriv: cmp of frame pointer",
  2948. .insns = {
  2949. BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
  2950. BPF_MOV64_IMM(BPF_REG_0, 0),
  2951. BPF_EXIT_INSN(),
  2952. },
  2953. .errstr_unpriv = "R10 pointer comparison",
  2954. .result_unpriv = REJECT,
  2955. .result = ACCEPT,
  2956. },
  2957. {
  2958. "unpriv: adding of fp",
  2959. .insns = {
  2960. BPF_MOV64_IMM(BPF_REG_0, 0),
  2961. BPF_MOV64_IMM(BPF_REG_1, 0),
  2962. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
  2963. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
  2964. BPF_EXIT_INSN(),
  2965. },
  2966. .result = ACCEPT,
  2967. },
  2968. {
  2969. "unpriv: cmp of stack pointer",
  2970. .insns = {
  2971. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  2972. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  2973. BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
  2974. BPF_MOV64_IMM(BPF_REG_0, 0),
  2975. BPF_EXIT_INSN(),
  2976. },
  2977. .errstr_unpriv = "R2 pointer comparison",
  2978. .result_unpriv = REJECT,
  2979. .result = ACCEPT,
  2980. },
  2981. {
  2982. "runtime/jit: tail_call within bounds, prog once",
  2983. .insns = {
  2984. BPF_MOV64_IMM(BPF_REG_3, 0),
  2985. BPF_LD_MAP_FD(BPF_REG_2, 0),
  2986. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2987. BPF_FUNC_tail_call),
  2988. BPF_MOV64_IMM(BPF_REG_0, 1),
  2989. BPF_EXIT_INSN(),
  2990. },
  2991. .fixup_prog1 = { 1 },
  2992. .result = ACCEPT,
  2993. .retval = 42,
  2994. },
  2995. {
  2996. "runtime/jit: tail_call within bounds, prog loop",
  2997. .insns = {
  2998. BPF_MOV64_IMM(BPF_REG_3, 1),
  2999. BPF_LD_MAP_FD(BPF_REG_2, 0),
  3000. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3001. BPF_FUNC_tail_call),
  3002. BPF_MOV64_IMM(BPF_REG_0, 1),
  3003. BPF_EXIT_INSN(),
  3004. },
  3005. .fixup_prog1 = { 1 },
  3006. .result = ACCEPT,
  3007. .retval = 41,
  3008. },
  3009. {
  3010. "runtime/jit: tail_call within bounds, no prog",
  3011. .insns = {
  3012. BPF_MOV64_IMM(BPF_REG_3, 2),
  3013. BPF_LD_MAP_FD(BPF_REG_2, 0),
  3014. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3015. BPF_FUNC_tail_call),
  3016. BPF_MOV64_IMM(BPF_REG_0, 1),
  3017. BPF_EXIT_INSN(),
  3018. },
  3019. .fixup_prog1 = { 1 },
  3020. .result = ACCEPT,
  3021. .retval = 1,
  3022. },
  3023. {
  3024. "runtime/jit: tail_call out of bounds",
  3025. .insns = {
  3026. BPF_MOV64_IMM(BPF_REG_3, 256),
  3027. BPF_LD_MAP_FD(BPF_REG_2, 0),
  3028. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3029. BPF_FUNC_tail_call),
  3030. BPF_MOV64_IMM(BPF_REG_0, 2),
  3031. BPF_EXIT_INSN(),
  3032. },
  3033. .fixup_prog1 = { 1 },
  3034. .result = ACCEPT,
  3035. .retval = 2,
  3036. },
  3037. {
  3038. "runtime/jit: pass negative index to tail_call",
  3039. .insns = {
  3040. BPF_MOV64_IMM(BPF_REG_3, -1),
  3041. BPF_LD_MAP_FD(BPF_REG_2, 0),
  3042. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3043. BPF_FUNC_tail_call),
  3044. BPF_MOV64_IMM(BPF_REG_0, 2),
  3045. BPF_EXIT_INSN(),
  3046. },
  3047. .fixup_prog1 = { 1 },
  3048. .result = ACCEPT,
  3049. .retval = 2,
  3050. },
  3051. {
  3052. "runtime/jit: pass > 32bit index to tail_call",
  3053. .insns = {
  3054. BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
  3055. BPF_LD_MAP_FD(BPF_REG_2, 0),
  3056. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3057. BPF_FUNC_tail_call),
  3058. BPF_MOV64_IMM(BPF_REG_0, 2),
  3059. BPF_EXIT_INSN(),
  3060. },
  3061. .fixup_prog1 = { 2 },
  3062. .result = ACCEPT,
  3063. .retval = 42,
  3064. /* Verifier rewrite for unpriv skips tail call here. */
  3065. .retval_unpriv = 2,
  3066. },
  3067. {
  3068. "stack pointer arithmetic",
  3069. .insns = {
  3070. BPF_MOV64_IMM(BPF_REG_1, 4),
  3071. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  3072. BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
  3073. BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
  3074. BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
  3075. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  3076. BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
  3077. BPF_ST_MEM(0, BPF_REG_2, 4, 0),
  3078. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  3079. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
  3080. BPF_ST_MEM(0, BPF_REG_2, 4, 0),
  3081. BPF_MOV64_IMM(BPF_REG_0, 0),
  3082. BPF_EXIT_INSN(),
  3083. },
  3084. .result = ACCEPT,
  3085. },
  3086. {
  3087. "raw_stack: no skb_load_bytes",
  3088. .insns = {
  3089. BPF_MOV64_IMM(BPF_REG_2, 4),
  3090. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3091. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  3092. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3093. BPF_MOV64_IMM(BPF_REG_4, 8),
  3094. /* Call to skb_load_bytes() omitted. */
  3095. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  3096. BPF_EXIT_INSN(),
  3097. },
  3098. .result = REJECT,
  3099. .errstr = "invalid read from stack off -8+0 size 8",
  3100. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3101. },
  3102. {
  3103. "raw_stack: skb_load_bytes, negative len",
  3104. .insns = {
  3105. BPF_MOV64_IMM(BPF_REG_2, 4),
  3106. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3107. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  3108. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3109. BPF_MOV64_IMM(BPF_REG_4, -8),
  3110. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3111. BPF_FUNC_skb_load_bytes),
  3112. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  3113. BPF_EXIT_INSN(),
  3114. },
  3115. .result = REJECT,
  3116. .errstr = "R4 min value is negative",
  3117. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3118. },
  3119. {
  3120. "raw_stack: skb_load_bytes, negative len 2",
  3121. .insns = {
  3122. BPF_MOV64_IMM(BPF_REG_2, 4),
  3123. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3124. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  3125. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3126. BPF_MOV64_IMM(BPF_REG_4, ~0),
  3127. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3128. BPF_FUNC_skb_load_bytes),
  3129. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  3130. BPF_EXIT_INSN(),
  3131. },
  3132. .result = REJECT,
  3133. .errstr = "R4 min value is negative",
  3134. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3135. },
  3136. {
  3137. "raw_stack: skb_load_bytes, zero len",
  3138. .insns = {
  3139. BPF_MOV64_IMM(BPF_REG_2, 4),
  3140. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3141. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  3142. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3143. BPF_MOV64_IMM(BPF_REG_4, 0),
  3144. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3145. BPF_FUNC_skb_load_bytes),
  3146. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  3147. BPF_EXIT_INSN(),
  3148. },
  3149. .result = REJECT,
  3150. .errstr = "invalid stack type R3",
  3151. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3152. },
  3153. {
  3154. "raw_stack: skb_load_bytes, no init",
  3155. .insns = {
  3156. BPF_MOV64_IMM(BPF_REG_2, 4),
  3157. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3158. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  3159. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3160. BPF_MOV64_IMM(BPF_REG_4, 8),
  3161. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3162. BPF_FUNC_skb_load_bytes),
  3163. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  3164. BPF_EXIT_INSN(),
  3165. },
  3166. .result = ACCEPT,
  3167. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3168. },
  3169. {
  3170. "raw_stack: skb_load_bytes, init",
  3171. .insns = {
  3172. BPF_MOV64_IMM(BPF_REG_2, 4),
  3173. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3174. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  3175. BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
  3176. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3177. BPF_MOV64_IMM(BPF_REG_4, 8),
  3178. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3179. BPF_FUNC_skb_load_bytes),
  3180. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  3181. BPF_EXIT_INSN(),
  3182. },
  3183. .result = ACCEPT,
  3184. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3185. },
  3186. {
  3187. "raw_stack: skb_load_bytes, spilled regs around bounds",
  3188. .insns = {
  3189. BPF_MOV64_IMM(BPF_REG_2, 4),
  3190. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3191. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  3192. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
  3193. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
  3194. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3195. BPF_MOV64_IMM(BPF_REG_4, 8),
  3196. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3197. BPF_FUNC_skb_load_bytes),
  3198. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
  3199. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
  3200. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  3201. offsetof(struct __sk_buff, mark)),
  3202. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  3203. offsetof(struct __sk_buff, priority)),
  3204. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  3205. BPF_EXIT_INSN(),
  3206. },
  3207. .result = ACCEPT,
  3208. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3209. },
  3210. {
  3211. "raw_stack: skb_load_bytes, spilled regs corruption",
  3212. .insns = {
  3213. BPF_MOV64_IMM(BPF_REG_2, 4),
  3214. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3215. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  3216. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  3217. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3218. BPF_MOV64_IMM(BPF_REG_4, 8),
  3219. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3220. BPF_FUNC_skb_load_bytes),
  3221. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  3222. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  3223. offsetof(struct __sk_buff, mark)),
  3224. BPF_EXIT_INSN(),
  3225. },
  3226. .result = REJECT,
  3227. .errstr = "R0 invalid mem access 'inv'",
  3228. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3229. },
  3230. {
  3231. "raw_stack: skb_load_bytes, spilled regs corruption 2",
  3232. .insns = {
  3233. BPF_MOV64_IMM(BPF_REG_2, 4),
  3234. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3235. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  3236. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
  3237. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  3238. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
  3239. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3240. BPF_MOV64_IMM(BPF_REG_4, 8),
  3241. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3242. BPF_FUNC_skb_load_bytes),
  3243. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
  3244. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
  3245. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
  3246. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  3247. offsetof(struct __sk_buff, mark)),
  3248. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  3249. offsetof(struct __sk_buff, priority)),
  3250. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  3251. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
  3252. offsetof(struct __sk_buff, pkt_type)),
  3253. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
  3254. BPF_EXIT_INSN(),
  3255. },
  3256. .result = REJECT,
  3257. .errstr = "R3 invalid mem access 'inv'",
  3258. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3259. },
  3260. {
  3261. "raw_stack: skb_load_bytes, spilled regs + data",
  3262. .insns = {
  3263. BPF_MOV64_IMM(BPF_REG_2, 4),
  3264. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3265. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  3266. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
  3267. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  3268. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
  3269. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3270. BPF_MOV64_IMM(BPF_REG_4, 8),
  3271. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3272. BPF_FUNC_skb_load_bytes),
  3273. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
  3274. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
  3275. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
  3276. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  3277. offsetof(struct __sk_buff, mark)),
  3278. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  3279. offsetof(struct __sk_buff, priority)),
  3280. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  3281. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
  3282. BPF_EXIT_INSN(),
  3283. },
  3284. .result = ACCEPT,
  3285. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3286. },
  3287. {
  3288. "raw_stack: skb_load_bytes, invalid access 1",
  3289. .insns = {
  3290. BPF_MOV64_IMM(BPF_REG_2, 4),
  3291. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3292. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
  3293. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3294. BPF_MOV64_IMM(BPF_REG_4, 8),
  3295. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3296. BPF_FUNC_skb_load_bytes),
  3297. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  3298. BPF_EXIT_INSN(),
  3299. },
  3300. .result = REJECT,
  3301. .errstr = "invalid stack type R3 off=-513 access_size=8",
  3302. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3303. },
  3304. {
  3305. "raw_stack: skb_load_bytes, invalid access 2",
  3306. .insns = {
  3307. BPF_MOV64_IMM(BPF_REG_2, 4),
  3308. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3309. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
  3310. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3311. BPF_MOV64_IMM(BPF_REG_4, 8),
  3312. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3313. BPF_FUNC_skb_load_bytes),
  3314. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  3315. BPF_EXIT_INSN(),
  3316. },
  3317. .result = REJECT,
  3318. .errstr = "invalid stack type R3 off=-1 access_size=8",
  3319. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3320. },
  3321. {
  3322. "raw_stack: skb_load_bytes, invalid access 3",
  3323. .insns = {
  3324. BPF_MOV64_IMM(BPF_REG_2, 4),
  3325. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3326. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
  3327. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3328. BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
  3329. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3330. BPF_FUNC_skb_load_bytes),
  3331. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  3332. BPF_EXIT_INSN(),
  3333. },
  3334. .result = REJECT,
  3335. .errstr = "R4 min value is negative",
  3336. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3337. },
  3338. {
  3339. "raw_stack: skb_load_bytes, invalid access 4",
  3340. .insns = {
  3341. BPF_MOV64_IMM(BPF_REG_2, 4),
  3342. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3343. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
  3344. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3345. BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
  3346. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3347. BPF_FUNC_skb_load_bytes),
  3348. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  3349. BPF_EXIT_INSN(),
  3350. },
  3351. .result = REJECT,
  3352. .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
  3353. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3354. },
  3355. {
  3356. "raw_stack: skb_load_bytes, invalid access 5",
  3357. .insns = {
  3358. BPF_MOV64_IMM(BPF_REG_2, 4),
  3359. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3360. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  3361. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3362. BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
  3363. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3364. BPF_FUNC_skb_load_bytes),
  3365. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  3366. BPF_EXIT_INSN(),
  3367. },
  3368. .result = REJECT,
  3369. .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
  3370. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3371. },
  3372. {
  3373. "raw_stack: skb_load_bytes, invalid access 6",
  3374. .insns = {
  3375. BPF_MOV64_IMM(BPF_REG_2, 4),
  3376. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3377. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  3378. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3379. BPF_MOV64_IMM(BPF_REG_4, 0),
  3380. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3381. BPF_FUNC_skb_load_bytes),
  3382. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  3383. BPF_EXIT_INSN(),
  3384. },
  3385. .result = REJECT,
  3386. .errstr = "invalid stack type R3 off=-512 access_size=0",
  3387. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3388. },
  3389. {
  3390. "raw_stack: skb_load_bytes, large access",
  3391. .insns = {
  3392. BPF_MOV64_IMM(BPF_REG_2, 4),
  3393. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  3394. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  3395. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3396. BPF_MOV64_IMM(BPF_REG_4, 512),
  3397. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3398. BPF_FUNC_skb_load_bytes),
  3399. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  3400. BPF_EXIT_INSN(),
  3401. },
  3402. .result = ACCEPT,
  3403. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3404. },
  3405. {
  3406. "context stores via ST",
  3407. .insns = {
  3408. BPF_MOV64_IMM(BPF_REG_0, 0),
  3409. BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
  3410. BPF_EXIT_INSN(),
  3411. },
  3412. .errstr = "BPF_ST stores into R1 ctx is not allowed",
  3413. .result = REJECT,
  3414. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3415. },
  3416. {
  3417. "context stores via XADD",
  3418. .insns = {
  3419. BPF_MOV64_IMM(BPF_REG_0, 0),
  3420. BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
  3421. BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
  3422. BPF_EXIT_INSN(),
  3423. },
  3424. .errstr = "BPF_XADD stores into R1 ctx is not allowed",
  3425. .result = REJECT,
  3426. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3427. },
  3428. {
  3429. "direct packet access: test1",
  3430. .insns = {
  3431. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3432. offsetof(struct __sk_buff, data)),
  3433. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3434. offsetof(struct __sk_buff, data_end)),
  3435. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3436. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3437. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3438. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3439. BPF_MOV64_IMM(BPF_REG_0, 0),
  3440. BPF_EXIT_INSN(),
  3441. },
  3442. .result = ACCEPT,
  3443. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3444. },
  3445. {
  3446. "direct packet access: test2",
  3447. .insns = {
  3448. BPF_MOV64_IMM(BPF_REG_0, 1),
  3449. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
  3450. offsetof(struct __sk_buff, data_end)),
  3451. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3452. offsetof(struct __sk_buff, data)),
  3453. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  3454. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  3455. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
  3456. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
  3457. BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
  3458. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
  3459. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3460. offsetof(struct __sk_buff, data)),
  3461. BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
  3462. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3463. offsetof(struct __sk_buff, len)),
  3464. BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
  3465. BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
  3466. BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
  3467. BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
  3468. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
  3469. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  3470. offsetof(struct __sk_buff, data_end)),
  3471. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  3472. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
  3473. BPF_MOV64_IMM(BPF_REG_0, 0),
  3474. BPF_EXIT_INSN(),
  3475. },
  3476. .result = ACCEPT,
  3477. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3478. },
  3479. {
  3480. "direct packet access: test3",
  3481. .insns = {
  3482. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3483. offsetof(struct __sk_buff, data)),
  3484. BPF_MOV64_IMM(BPF_REG_0, 0),
  3485. BPF_EXIT_INSN(),
  3486. },
  3487. .errstr = "invalid bpf_context access off=76",
  3488. .result = REJECT,
  3489. .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
  3490. },
  3491. {
  3492. "direct packet access: test4 (write)",
  3493. .insns = {
  3494. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3495. offsetof(struct __sk_buff, data)),
  3496. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3497. offsetof(struct __sk_buff, data_end)),
  3498. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3499. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3500. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3501. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  3502. BPF_MOV64_IMM(BPF_REG_0, 0),
  3503. BPF_EXIT_INSN(),
  3504. },
  3505. .result = ACCEPT,
  3506. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3507. },
  3508. {
  3509. "direct packet access: test5 (pkt_end >= reg, good access)",
  3510. .insns = {
  3511. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3512. offsetof(struct __sk_buff, data)),
  3513. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3514. offsetof(struct __sk_buff, data_end)),
  3515. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3516. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3517. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
  3518. BPF_MOV64_IMM(BPF_REG_0, 1),
  3519. BPF_EXIT_INSN(),
  3520. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3521. BPF_MOV64_IMM(BPF_REG_0, 0),
  3522. BPF_EXIT_INSN(),
  3523. },
  3524. .result = ACCEPT,
  3525. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3526. },
  3527. {
  3528. "direct packet access: test6 (pkt_end >= reg, bad access)",
  3529. .insns = {
  3530. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3531. offsetof(struct __sk_buff, data)),
  3532. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3533. offsetof(struct __sk_buff, data_end)),
  3534. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3535. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3536. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
  3537. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3538. BPF_MOV64_IMM(BPF_REG_0, 1),
  3539. BPF_EXIT_INSN(),
  3540. BPF_MOV64_IMM(BPF_REG_0, 0),
  3541. BPF_EXIT_INSN(),
  3542. },
  3543. .errstr = "invalid access to packet",
  3544. .result = REJECT,
  3545. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3546. },
  3547. {
  3548. "direct packet access: test7 (pkt_end >= reg, both accesses)",
  3549. .insns = {
  3550. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3551. offsetof(struct __sk_buff, data)),
  3552. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3553. offsetof(struct __sk_buff, data_end)),
  3554. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3555. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3556. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
  3557. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3558. BPF_MOV64_IMM(BPF_REG_0, 1),
  3559. BPF_EXIT_INSN(),
  3560. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3561. BPF_MOV64_IMM(BPF_REG_0, 0),
  3562. BPF_EXIT_INSN(),
  3563. },
  3564. .errstr = "invalid access to packet",
  3565. .result = REJECT,
  3566. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3567. },
  3568. {
  3569. "direct packet access: test8 (double test, variant 1)",
  3570. .insns = {
  3571. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3572. offsetof(struct __sk_buff, data)),
  3573. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3574. offsetof(struct __sk_buff, data_end)),
  3575. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3576. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3577. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
  3578. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3579. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3580. BPF_MOV64_IMM(BPF_REG_0, 1),
  3581. BPF_EXIT_INSN(),
  3582. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3583. BPF_MOV64_IMM(BPF_REG_0, 0),
  3584. BPF_EXIT_INSN(),
  3585. },
  3586. .result = ACCEPT,
  3587. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3588. },
  3589. {
  3590. "direct packet access: test9 (double test, variant 2)",
  3591. .insns = {
  3592. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3593. offsetof(struct __sk_buff, data)),
  3594. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3595. offsetof(struct __sk_buff, data_end)),
  3596. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3597. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3598. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
  3599. BPF_MOV64_IMM(BPF_REG_0, 1),
  3600. BPF_EXIT_INSN(),
  3601. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3602. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3603. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3604. BPF_MOV64_IMM(BPF_REG_0, 0),
  3605. BPF_EXIT_INSN(),
  3606. },
  3607. .result = ACCEPT,
  3608. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3609. },
  3610. {
  3611. "direct packet access: test10 (write invalid)",
  3612. .insns = {
  3613. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3614. offsetof(struct __sk_buff, data)),
  3615. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3616. offsetof(struct __sk_buff, data_end)),
  3617. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3618. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3619. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
  3620. BPF_MOV64_IMM(BPF_REG_0, 0),
  3621. BPF_EXIT_INSN(),
  3622. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  3623. BPF_MOV64_IMM(BPF_REG_0, 0),
  3624. BPF_EXIT_INSN(),
  3625. },
  3626. .errstr = "invalid access to packet",
  3627. .result = REJECT,
  3628. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3629. },
  3630. {
  3631. "direct packet access: test11 (shift, good access)",
  3632. .insns = {
  3633. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3634. offsetof(struct __sk_buff, data)),
  3635. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3636. offsetof(struct __sk_buff, data_end)),
  3637. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3638. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
  3639. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
  3640. BPF_MOV64_IMM(BPF_REG_3, 144),
  3641. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  3642. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
  3643. BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
  3644. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  3645. BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
  3646. BPF_MOV64_IMM(BPF_REG_0, 1),
  3647. BPF_EXIT_INSN(),
  3648. BPF_MOV64_IMM(BPF_REG_0, 0),
  3649. BPF_EXIT_INSN(),
  3650. },
  3651. .result = ACCEPT,
  3652. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3653. .retval = 1,
  3654. },
  3655. {
  3656. "direct packet access: test12 (and, good access)",
  3657. .insns = {
  3658. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3659. offsetof(struct __sk_buff, data)),
  3660. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3661. offsetof(struct __sk_buff, data_end)),
  3662. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3663. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
  3664. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
  3665. BPF_MOV64_IMM(BPF_REG_3, 144),
  3666. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  3667. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
  3668. BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
  3669. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  3670. BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
  3671. BPF_MOV64_IMM(BPF_REG_0, 1),
  3672. BPF_EXIT_INSN(),
  3673. BPF_MOV64_IMM(BPF_REG_0, 0),
  3674. BPF_EXIT_INSN(),
  3675. },
  3676. .result = ACCEPT,
  3677. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3678. .retval = 1,
  3679. },
  3680. {
  3681. "direct packet access: test13 (branches, good access)",
  3682. .insns = {
  3683. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3684. offsetof(struct __sk_buff, data)),
  3685. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3686. offsetof(struct __sk_buff, data_end)),
  3687. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3688. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
  3689. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
  3690. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3691. offsetof(struct __sk_buff, mark)),
  3692. BPF_MOV64_IMM(BPF_REG_4, 1),
  3693. BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
  3694. BPF_MOV64_IMM(BPF_REG_3, 14),
  3695. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  3696. BPF_MOV64_IMM(BPF_REG_3, 24),
  3697. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  3698. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
  3699. BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
  3700. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  3701. BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
  3702. BPF_MOV64_IMM(BPF_REG_0, 1),
  3703. BPF_EXIT_INSN(),
  3704. BPF_MOV64_IMM(BPF_REG_0, 0),
  3705. BPF_EXIT_INSN(),
  3706. },
  3707. .result = ACCEPT,
  3708. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3709. .retval = 1,
  3710. },
  3711. {
  3712. "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
  3713. .insns = {
  3714. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3715. offsetof(struct __sk_buff, data)),
  3716. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3717. offsetof(struct __sk_buff, data_end)),
  3718. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3719. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
  3720. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
  3721. BPF_MOV64_IMM(BPF_REG_5, 12),
  3722. BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
  3723. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  3724. BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
  3725. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
  3726. BPF_MOV64_IMM(BPF_REG_0, 1),
  3727. BPF_EXIT_INSN(),
  3728. BPF_MOV64_IMM(BPF_REG_0, 0),
  3729. BPF_EXIT_INSN(),
  3730. },
  3731. .result = ACCEPT,
  3732. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3733. .retval = 1,
  3734. },
  3735. {
  3736. "direct packet access: test15 (spill with xadd)",
  3737. .insns = {
  3738. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3739. offsetof(struct __sk_buff, data)),
  3740. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3741. offsetof(struct __sk_buff, data_end)),
  3742. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3743. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3744. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
  3745. BPF_MOV64_IMM(BPF_REG_5, 4096),
  3746. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  3747. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  3748. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  3749. BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
  3750. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
  3751. BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
  3752. BPF_MOV64_IMM(BPF_REG_0, 0),
  3753. BPF_EXIT_INSN(),
  3754. },
  3755. .errstr = "R2 invalid mem access 'inv'",
  3756. .result = REJECT,
  3757. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3758. },
  3759. {
  3760. "direct packet access: test16 (arith on data_end)",
  3761. .insns = {
  3762. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3763. offsetof(struct __sk_buff, data)),
  3764. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3765. offsetof(struct __sk_buff, data_end)),
  3766. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3767. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3768. BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
  3769. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3770. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  3771. BPF_MOV64_IMM(BPF_REG_0, 0),
  3772. BPF_EXIT_INSN(),
  3773. },
  3774. .errstr = "R3 pointer arithmetic on pkt_end",
  3775. .result = REJECT,
  3776. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3777. },
  3778. {
  3779. "direct packet access: test17 (pruning, alignment)",
  3780. .insns = {
  3781. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3782. offsetof(struct __sk_buff, data)),
  3783. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3784. offsetof(struct __sk_buff, data_end)),
  3785. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  3786. offsetof(struct __sk_buff, mark)),
  3787. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3788. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
  3789. BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
  3790. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3791. BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
  3792. BPF_MOV64_IMM(BPF_REG_0, 0),
  3793. BPF_EXIT_INSN(),
  3794. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
  3795. BPF_JMP_A(-6),
  3796. },
  3797. .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
  3798. .result = REJECT,
  3799. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3800. .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
  3801. },
  3802. {
  3803. "direct packet access: test18 (imm += pkt_ptr, 1)",
  3804. .insns = {
  3805. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3806. offsetof(struct __sk_buff, data)),
  3807. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3808. offsetof(struct __sk_buff, data_end)),
  3809. BPF_MOV64_IMM(BPF_REG_0, 8),
  3810. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  3811. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3812. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  3813. BPF_MOV64_IMM(BPF_REG_0, 0),
  3814. BPF_EXIT_INSN(),
  3815. },
  3816. .result = ACCEPT,
  3817. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3818. },
  3819. {
  3820. "direct packet access: test19 (imm += pkt_ptr, 2)",
  3821. .insns = {
  3822. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3823. offsetof(struct __sk_buff, data)),
  3824. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3825. offsetof(struct __sk_buff, data_end)),
  3826. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3827. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3828. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
  3829. BPF_MOV64_IMM(BPF_REG_4, 4),
  3830. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
  3831. BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
  3832. BPF_MOV64_IMM(BPF_REG_0, 0),
  3833. BPF_EXIT_INSN(),
  3834. },
  3835. .result = ACCEPT,
  3836. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3837. },
  3838. {
  3839. "direct packet access: test20 (x += pkt_ptr, 1)",
  3840. .insns = {
  3841. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3842. offsetof(struct __sk_buff, data)),
  3843. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3844. offsetof(struct __sk_buff, data_end)),
  3845. BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
  3846. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  3847. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  3848. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
  3849. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  3850. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
  3851. BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
  3852. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
  3853. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
  3854. BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
  3855. BPF_MOV64_IMM(BPF_REG_0, 0),
  3856. BPF_EXIT_INSN(),
  3857. },
  3858. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3859. .result = ACCEPT,
  3860. },
  3861. {
  3862. "direct packet access: test21 (x += pkt_ptr, 2)",
  3863. .insns = {
  3864. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3865. offsetof(struct __sk_buff, data)),
  3866. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3867. offsetof(struct __sk_buff, data_end)),
  3868. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3869. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3870. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
  3871. BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
  3872. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
  3873. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  3874. BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
  3875. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
  3876. BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
  3877. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
  3878. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
  3879. BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
  3880. BPF_MOV64_IMM(BPF_REG_0, 0),
  3881. BPF_EXIT_INSN(),
  3882. },
  3883. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3884. .result = ACCEPT,
  3885. },
  3886. {
  3887. "direct packet access: test22 (x += pkt_ptr, 3)",
  3888. .insns = {
  3889. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3890. offsetof(struct __sk_buff, data)),
  3891. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3892. offsetof(struct __sk_buff, data_end)),
  3893. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3894. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3895. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
  3896. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
  3897. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
  3898. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
  3899. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
  3900. BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
  3901. BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
  3902. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  3903. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
  3904. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
  3905. BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
  3906. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
  3907. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
  3908. BPF_MOV64_IMM(BPF_REG_2, 1),
  3909. BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
  3910. BPF_MOV64_IMM(BPF_REG_0, 0),
  3911. BPF_EXIT_INSN(),
  3912. },
  3913. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3914. .result = ACCEPT,
  3915. },
  3916. {
  3917. "direct packet access: test23 (x += pkt_ptr, 4)",
  3918. .insns = {
  3919. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3920. offsetof(struct __sk_buff, data)),
  3921. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3922. offsetof(struct __sk_buff, data_end)),
  3923. BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
  3924. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  3925. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  3926. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
  3927. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  3928. BPF_MOV64_IMM(BPF_REG_0, 31),
  3929. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
  3930. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  3931. BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
  3932. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
  3933. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3934. BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
  3935. BPF_MOV64_IMM(BPF_REG_0, 0),
  3936. BPF_EXIT_INSN(),
  3937. },
  3938. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3939. .result = REJECT,
  3940. .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
  3941. },
  3942. {
  3943. "direct packet access: test24 (x += pkt_ptr, 5)",
  3944. .insns = {
  3945. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3946. offsetof(struct __sk_buff, data)),
  3947. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3948. offsetof(struct __sk_buff, data_end)),
  3949. BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
  3950. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  3951. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  3952. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
  3953. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  3954. BPF_MOV64_IMM(BPF_REG_0, 64),
  3955. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
  3956. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  3957. BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
  3958. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
  3959. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3960. BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
  3961. BPF_MOV64_IMM(BPF_REG_0, 0),
  3962. BPF_EXIT_INSN(),
  3963. },
  3964. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3965. .result = ACCEPT,
  3966. },
  3967. {
  3968. "direct packet access: test25 (marking on <, good access)",
  3969. .insns = {
  3970. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3971. offsetof(struct __sk_buff, data)),
  3972. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3973. offsetof(struct __sk_buff, data_end)),
  3974. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3975. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3976. BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
  3977. BPF_MOV64_IMM(BPF_REG_0, 0),
  3978. BPF_EXIT_INSN(),
  3979. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3980. BPF_JMP_IMM(BPF_JA, 0, 0, -4),
  3981. },
  3982. .result = ACCEPT,
  3983. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3984. },
  3985. {
  3986. "direct packet access: test26 (marking on <, bad access)",
  3987. .insns = {
  3988. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3989. offsetof(struct __sk_buff, data)),
  3990. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3991. offsetof(struct __sk_buff, data_end)),
  3992. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3993. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3994. BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
  3995. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3996. BPF_MOV64_IMM(BPF_REG_0, 0),
  3997. BPF_EXIT_INSN(),
  3998. BPF_JMP_IMM(BPF_JA, 0, 0, -3),
  3999. },
  4000. .result = REJECT,
  4001. .errstr = "invalid access to packet",
  4002. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4003. },
  4004. {
  4005. "direct packet access: test27 (marking on <=, good access)",
  4006. .insns = {
  4007. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  4008. offsetof(struct __sk_buff, data)),
  4009. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  4010. offsetof(struct __sk_buff, data_end)),
  4011. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  4012. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  4013. BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
  4014. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  4015. BPF_MOV64_IMM(BPF_REG_0, 1),
  4016. BPF_EXIT_INSN(),
  4017. },
  4018. .result = ACCEPT,
  4019. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4020. .retval = 1,
  4021. },
  4022. {
  4023. "direct packet access: test28 (marking on <=, bad access)",
  4024. .insns = {
  4025. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  4026. offsetof(struct __sk_buff, data)),
  4027. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  4028. offsetof(struct __sk_buff, data_end)),
  4029. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  4030. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  4031. BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
  4032. BPF_MOV64_IMM(BPF_REG_0, 1),
  4033. BPF_EXIT_INSN(),
  4034. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  4035. BPF_JMP_IMM(BPF_JA, 0, 0, -4),
  4036. },
  4037. .result = REJECT,
  4038. .errstr = "invalid access to packet",
  4039. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4040. },
  4041. {
  4042. "helper access to packet: test1, valid packet_ptr range",
  4043. .insns = {
  4044. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  4045. offsetof(struct xdp_md, data)),
  4046. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  4047. offsetof(struct xdp_md, data_end)),
  4048. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  4049. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  4050. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
  4051. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4052. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  4053. BPF_MOV64_IMM(BPF_REG_4, 0),
  4054. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4055. BPF_FUNC_map_update_elem),
  4056. BPF_MOV64_IMM(BPF_REG_0, 0),
  4057. BPF_EXIT_INSN(),
  4058. },
  4059. .fixup_map_hash_8b = { 5 },
  4060. .result_unpriv = ACCEPT,
  4061. .result = ACCEPT,
  4062. .prog_type = BPF_PROG_TYPE_XDP,
  4063. },
  4064. {
  4065. "helper access to packet: test2, unchecked packet_ptr",
  4066. .insns = {
  4067. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  4068. offsetof(struct xdp_md, data)),
  4069. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4070. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4071. BPF_FUNC_map_lookup_elem),
  4072. BPF_MOV64_IMM(BPF_REG_0, 0),
  4073. BPF_EXIT_INSN(),
  4074. },
  4075. .fixup_map_hash_8b = { 1 },
  4076. .result = REJECT,
  4077. .errstr = "invalid access to packet",
  4078. .prog_type = BPF_PROG_TYPE_XDP,
  4079. },
  4080. {
  4081. "helper access to packet: test3, variable add",
  4082. .insns = {
  4083. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  4084. offsetof(struct xdp_md, data)),
  4085. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  4086. offsetof(struct xdp_md, data_end)),
  4087. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  4088. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
  4089. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
  4090. BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
  4091. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  4092. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
  4093. BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
  4094. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
  4095. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
  4096. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4097. BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
  4098. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4099. BPF_FUNC_map_lookup_elem),
  4100. BPF_MOV64_IMM(BPF_REG_0, 0),
  4101. BPF_EXIT_INSN(),
  4102. },
  4103. .fixup_map_hash_8b = { 11 },
  4104. .result = ACCEPT,
  4105. .prog_type = BPF_PROG_TYPE_XDP,
  4106. },
  4107. {
  4108. "helper access to packet: test4, packet_ptr with bad range",
  4109. .insns = {
  4110. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  4111. offsetof(struct xdp_md, data)),
  4112. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  4113. offsetof(struct xdp_md, data_end)),
  4114. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  4115. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  4116. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
  4117. BPF_MOV64_IMM(BPF_REG_0, 0),
  4118. BPF_EXIT_INSN(),
  4119. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4120. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4121. BPF_FUNC_map_lookup_elem),
  4122. BPF_MOV64_IMM(BPF_REG_0, 0),
  4123. BPF_EXIT_INSN(),
  4124. },
  4125. .fixup_map_hash_8b = { 7 },
  4126. .result = REJECT,
  4127. .errstr = "invalid access to packet",
  4128. .prog_type = BPF_PROG_TYPE_XDP,
  4129. },
  4130. {
  4131. "helper access to packet: test5, packet_ptr with too short range",
  4132. .insns = {
  4133. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  4134. offsetof(struct xdp_md, data)),
  4135. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  4136. offsetof(struct xdp_md, data_end)),
  4137. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  4138. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  4139. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
  4140. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
  4141. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4142. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4143. BPF_FUNC_map_lookup_elem),
  4144. BPF_MOV64_IMM(BPF_REG_0, 0),
  4145. BPF_EXIT_INSN(),
  4146. },
  4147. .fixup_map_hash_8b = { 6 },
  4148. .result = REJECT,
  4149. .errstr = "invalid access to packet",
  4150. .prog_type = BPF_PROG_TYPE_XDP,
  4151. },
  4152. {
  4153. "helper access to packet: test6, cls valid packet_ptr range",
  4154. .insns = {
  4155. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  4156. offsetof(struct __sk_buff, data)),
  4157. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  4158. offsetof(struct __sk_buff, data_end)),
  4159. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  4160. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  4161. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
  4162. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4163. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  4164. BPF_MOV64_IMM(BPF_REG_4, 0),
  4165. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4166. BPF_FUNC_map_update_elem),
  4167. BPF_MOV64_IMM(BPF_REG_0, 0),
  4168. BPF_EXIT_INSN(),
  4169. },
  4170. .fixup_map_hash_8b = { 5 },
  4171. .result = ACCEPT,
  4172. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4173. },
  4174. {
  4175. "helper access to packet: test7, cls unchecked packet_ptr",
  4176. .insns = {
  4177. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  4178. offsetof(struct __sk_buff, data)),
  4179. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4180. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4181. BPF_FUNC_map_lookup_elem),
  4182. BPF_MOV64_IMM(BPF_REG_0, 0),
  4183. BPF_EXIT_INSN(),
  4184. },
  4185. .fixup_map_hash_8b = { 1 },
  4186. .result = REJECT,
  4187. .errstr = "invalid access to packet",
  4188. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4189. },
  4190. {
  4191. "helper access to packet: test8, cls variable add",
  4192. .insns = {
  4193. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  4194. offsetof(struct __sk_buff, data)),
  4195. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  4196. offsetof(struct __sk_buff, data_end)),
  4197. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  4198. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
  4199. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
  4200. BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
  4201. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  4202. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
  4203. BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
  4204. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
  4205. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
  4206. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4207. BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
  4208. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4209. BPF_FUNC_map_lookup_elem),
  4210. BPF_MOV64_IMM(BPF_REG_0, 0),
  4211. BPF_EXIT_INSN(),
  4212. },
  4213. .fixup_map_hash_8b = { 11 },
  4214. .result = ACCEPT,
  4215. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4216. },
  4217. {
  4218. "helper access to packet: test9, cls packet_ptr with bad range",
  4219. .insns = {
  4220. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  4221. offsetof(struct __sk_buff, data)),
  4222. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  4223. offsetof(struct __sk_buff, data_end)),
  4224. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  4225. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  4226. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
  4227. BPF_MOV64_IMM(BPF_REG_0, 0),
  4228. BPF_EXIT_INSN(),
  4229. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4230. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4231. BPF_FUNC_map_lookup_elem),
  4232. BPF_MOV64_IMM(BPF_REG_0, 0),
  4233. BPF_EXIT_INSN(),
  4234. },
  4235. .fixup_map_hash_8b = { 7 },
  4236. .result = REJECT,
  4237. .errstr = "invalid access to packet",
  4238. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4239. },
  4240. {
  4241. "helper access to packet: test10, cls packet_ptr with too short range",
  4242. .insns = {
  4243. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  4244. offsetof(struct __sk_buff, data)),
  4245. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  4246. offsetof(struct __sk_buff, data_end)),
  4247. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  4248. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  4249. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
  4250. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
  4251. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4252. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4253. BPF_FUNC_map_lookup_elem),
  4254. BPF_MOV64_IMM(BPF_REG_0, 0),
  4255. BPF_EXIT_INSN(),
  4256. },
  4257. .fixup_map_hash_8b = { 6 },
  4258. .result = REJECT,
  4259. .errstr = "invalid access to packet",
  4260. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4261. },
  4262. {
  4263. "helper access to packet: test11, cls unsuitable helper 1",
  4264. .insns = {
  4265. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4266. offsetof(struct __sk_buff, data)),
  4267. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  4268. offsetof(struct __sk_buff, data_end)),
  4269. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  4270. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  4271. BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
  4272. BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
  4273. BPF_MOV64_IMM(BPF_REG_2, 0),
  4274. BPF_MOV64_IMM(BPF_REG_4, 42),
  4275. BPF_MOV64_IMM(BPF_REG_5, 0),
  4276. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4277. BPF_FUNC_skb_store_bytes),
  4278. BPF_MOV64_IMM(BPF_REG_0, 0),
  4279. BPF_EXIT_INSN(),
  4280. },
  4281. .result = REJECT,
  4282. .errstr = "helper access to the packet",
  4283. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4284. },
  4285. {
  4286. "helper access to packet: test12, cls unsuitable helper 2",
  4287. .insns = {
  4288. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4289. offsetof(struct __sk_buff, data)),
  4290. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  4291. offsetof(struct __sk_buff, data_end)),
  4292. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  4293. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
  4294. BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
  4295. BPF_MOV64_IMM(BPF_REG_2, 0),
  4296. BPF_MOV64_IMM(BPF_REG_4, 4),
  4297. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4298. BPF_FUNC_skb_load_bytes),
  4299. BPF_MOV64_IMM(BPF_REG_0, 0),
  4300. BPF_EXIT_INSN(),
  4301. },
  4302. .result = REJECT,
  4303. .errstr = "helper access to the packet",
  4304. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4305. },
  4306. {
  4307. "helper access to packet: test13, cls helper ok",
  4308. .insns = {
  4309. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4310. offsetof(struct __sk_buff, data)),
  4311. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  4312. offsetof(struct __sk_buff, data_end)),
  4313. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  4314. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  4315. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  4316. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  4317. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  4318. BPF_MOV64_IMM(BPF_REG_2, 4),
  4319. BPF_MOV64_IMM(BPF_REG_3, 0),
  4320. BPF_MOV64_IMM(BPF_REG_4, 0),
  4321. BPF_MOV64_IMM(BPF_REG_5, 0),
  4322. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4323. BPF_FUNC_csum_diff),
  4324. BPF_MOV64_IMM(BPF_REG_0, 0),
  4325. BPF_EXIT_INSN(),
  4326. },
  4327. .result = ACCEPT,
  4328. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4329. },
  4330. {
  4331. "helper access to packet: test14, cls helper ok sub",
  4332. .insns = {
  4333. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4334. offsetof(struct __sk_buff, data)),
  4335. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  4336. offsetof(struct __sk_buff, data_end)),
  4337. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  4338. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  4339. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  4340. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  4341. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
  4342. BPF_MOV64_IMM(BPF_REG_2, 4),
  4343. BPF_MOV64_IMM(BPF_REG_3, 0),
  4344. BPF_MOV64_IMM(BPF_REG_4, 0),
  4345. BPF_MOV64_IMM(BPF_REG_5, 0),
  4346. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4347. BPF_FUNC_csum_diff),
  4348. BPF_MOV64_IMM(BPF_REG_0, 0),
  4349. BPF_EXIT_INSN(),
  4350. },
  4351. .result = ACCEPT,
  4352. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4353. },
  4354. {
  4355. "helper access to packet: test15, cls helper fail sub",
  4356. .insns = {
  4357. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4358. offsetof(struct __sk_buff, data)),
  4359. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  4360. offsetof(struct __sk_buff, data_end)),
  4361. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  4362. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  4363. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  4364. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  4365. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
  4366. BPF_MOV64_IMM(BPF_REG_2, 4),
  4367. BPF_MOV64_IMM(BPF_REG_3, 0),
  4368. BPF_MOV64_IMM(BPF_REG_4, 0),
  4369. BPF_MOV64_IMM(BPF_REG_5, 0),
  4370. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4371. BPF_FUNC_csum_diff),
  4372. BPF_MOV64_IMM(BPF_REG_0, 0),
  4373. BPF_EXIT_INSN(),
  4374. },
  4375. .result = REJECT,
  4376. .errstr = "invalid access to packet",
  4377. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4378. },
  4379. {
  4380. "helper access to packet: test16, cls helper fail range 1",
  4381. .insns = {
  4382. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4383. offsetof(struct __sk_buff, data)),
  4384. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  4385. offsetof(struct __sk_buff, data_end)),
  4386. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  4387. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  4388. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  4389. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  4390. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  4391. BPF_MOV64_IMM(BPF_REG_2, 8),
  4392. BPF_MOV64_IMM(BPF_REG_3, 0),
  4393. BPF_MOV64_IMM(BPF_REG_4, 0),
  4394. BPF_MOV64_IMM(BPF_REG_5, 0),
  4395. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4396. BPF_FUNC_csum_diff),
  4397. BPF_MOV64_IMM(BPF_REG_0, 0),
  4398. BPF_EXIT_INSN(),
  4399. },
  4400. .result = REJECT,
  4401. .errstr = "invalid access to packet",
  4402. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4403. },
  4404. {
  4405. "helper access to packet: test17, cls helper fail range 2",
  4406. .insns = {
  4407. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4408. offsetof(struct __sk_buff, data)),
  4409. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  4410. offsetof(struct __sk_buff, data_end)),
  4411. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  4412. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  4413. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  4414. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  4415. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  4416. BPF_MOV64_IMM(BPF_REG_2, -9),
  4417. BPF_MOV64_IMM(BPF_REG_3, 0),
  4418. BPF_MOV64_IMM(BPF_REG_4, 0),
  4419. BPF_MOV64_IMM(BPF_REG_5, 0),
  4420. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4421. BPF_FUNC_csum_diff),
  4422. BPF_MOV64_IMM(BPF_REG_0, 0),
  4423. BPF_EXIT_INSN(),
  4424. },
  4425. .result = REJECT,
  4426. .errstr = "R2 min value is negative",
  4427. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4428. },
  4429. {
  4430. "helper access to packet: test18, cls helper fail range 3",
  4431. .insns = {
  4432. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4433. offsetof(struct __sk_buff, data)),
  4434. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  4435. offsetof(struct __sk_buff, data_end)),
  4436. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  4437. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  4438. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  4439. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  4440. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  4441. BPF_MOV64_IMM(BPF_REG_2, ~0),
  4442. BPF_MOV64_IMM(BPF_REG_3, 0),
  4443. BPF_MOV64_IMM(BPF_REG_4, 0),
  4444. BPF_MOV64_IMM(BPF_REG_5, 0),
  4445. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4446. BPF_FUNC_csum_diff),
  4447. BPF_MOV64_IMM(BPF_REG_0, 0),
  4448. BPF_EXIT_INSN(),
  4449. },
  4450. .result = REJECT,
  4451. .errstr = "R2 min value is negative",
  4452. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4453. },
  4454. {
  4455. "helper access to packet: test19, cls helper range zero",
  4456. .insns = {
  4457. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4458. offsetof(struct __sk_buff, data)),
  4459. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  4460. offsetof(struct __sk_buff, data_end)),
  4461. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  4462. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  4463. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  4464. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  4465. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  4466. BPF_MOV64_IMM(BPF_REG_2, 0),
  4467. BPF_MOV64_IMM(BPF_REG_3, 0),
  4468. BPF_MOV64_IMM(BPF_REG_4, 0),
  4469. BPF_MOV64_IMM(BPF_REG_5, 0),
  4470. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4471. BPF_FUNC_csum_diff),
  4472. BPF_MOV64_IMM(BPF_REG_0, 0),
  4473. BPF_EXIT_INSN(),
  4474. },
  4475. .result = ACCEPT,
  4476. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4477. },
  4478. {
  4479. "helper access to packet: test20, pkt end as input",
  4480. .insns = {
  4481. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4482. offsetof(struct __sk_buff, data)),
  4483. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  4484. offsetof(struct __sk_buff, data_end)),
  4485. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  4486. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  4487. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  4488. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  4489. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  4490. BPF_MOV64_IMM(BPF_REG_2, 4),
  4491. BPF_MOV64_IMM(BPF_REG_3, 0),
  4492. BPF_MOV64_IMM(BPF_REG_4, 0),
  4493. BPF_MOV64_IMM(BPF_REG_5, 0),
  4494. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4495. BPF_FUNC_csum_diff),
  4496. BPF_MOV64_IMM(BPF_REG_0, 0),
  4497. BPF_EXIT_INSN(),
  4498. },
  4499. .result = REJECT,
  4500. .errstr = "R1 type=pkt_end expected=fp",
  4501. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4502. },
  4503. {
  4504. "helper access to packet: test21, wrong reg",
  4505. .insns = {
  4506. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4507. offsetof(struct __sk_buff, data)),
  4508. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  4509. offsetof(struct __sk_buff, data_end)),
  4510. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  4511. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  4512. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  4513. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  4514. BPF_MOV64_IMM(BPF_REG_2, 4),
  4515. BPF_MOV64_IMM(BPF_REG_3, 0),
  4516. BPF_MOV64_IMM(BPF_REG_4, 0),
  4517. BPF_MOV64_IMM(BPF_REG_5, 0),
  4518. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4519. BPF_FUNC_csum_diff),
  4520. BPF_MOV64_IMM(BPF_REG_0, 0),
  4521. BPF_EXIT_INSN(),
  4522. },
  4523. .result = REJECT,
  4524. .errstr = "invalid access to packet",
  4525. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  4526. },
  4527. {
  4528. "prevent map lookup in sockmap",
  4529. .insns = {
  4530. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4531. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4532. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4533. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4534. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4535. BPF_FUNC_map_lookup_elem),
  4536. BPF_EXIT_INSN(),
  4537. },
  4538. .fixup_map_sockmap = { 3 },
  4539. .result = REJECT,
  4540. .errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
  4541. .prog_type = BPF_PROG_TYPE_SOCK_OPS,
  4542. },
  4543. {
  4544. "prevent map lookup in sockhash",
  4545. .insns = {
  4546. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4547. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4548. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4549. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4550. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4551. BPF_FUNC_map_lookup_elem),
  4552. BPF_EXIT_INSN(),
  4553. },
  4554. .fixup_map_sockhash = { 3 },
  4555. .result = REJECT,
  4556. .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
  4557. .prog_type = BPF_PROG_TYPE_SOCK_OPS,
  4558. },
  4559. {
  4560. "prevent map lookup in xskmap",
  4561. .insns = {
  4562. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4563. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4564. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4565. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4566. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4567. BPF_FUNC_map_lookup_elem),
  4568. BPF_EXIT_INSN(),
  4569. },
  4570. .fixup_map_xskmap = { 3 },
  4571. .result = REJECT,
  4572. .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
  4573. .prog_type = BPF_PROG_TYPE_XDP,
  4574. },
  4575. {
  4576. "prevent map lookup in stack trace",
  4577. .insns = {
  4578. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4579. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4580. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4581. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4582. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4583. BPF_FUNC_map_lookup_elem),
  4584. BPF_EXIT_INSN(),
  4585. },
  4586. .fixup_map_stacktrace = { 3 },
  4587. .result = REJECT,
  4588. .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
  4589. .prog_type = BPF_PROG_TYPE_PERF_EVENT,
  4590. },
  4591. {
  4592. "prevent map lookup in prog array",
  4593. .insns = {
  4594. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4595. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4596. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4597. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4598. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4599. BPF_FUNC_map_lookup_elem),
  4600. BPF_EXIT_INSN(),
  4601. },
  4602. .fixup_prog2 = { 3 },
  4603. .result = REJECT,
  4604. .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
  4605. },
  4606. {
  4607. "valid map access into an array with a constant",
  4608. .insns = {
  4609. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4610. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4611. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4612. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4613. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4614. BPF_FUNC_map_lookup_elem),
  4615. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  4616. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  4617. offsetof(struct test_val, foo)),
  4618. BPF_EXIT_INSN(),
  4619. },
  4620. .fixup_map_hash_48b = { 3 },
  4621. .errstr_unpriv = "R0 leaks addr",
  4622. .result_unpriv = REJECT,
  4623. .result = ACCEPT,
  4624. },
  4625. {
  4626. "valid map access into an array with a register",
  4627. .insns = {
  4628. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4629. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4630. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4631. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4632. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4633. BPF_FUNC_map_lookup_elem),
  4634. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  4635. BPF_MOV64_IMM(BPF_REG_1, 4),
  4636. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  4637. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  4638. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  4639. offsetof(struct test_val, foo)),
  4640. BPF_EXIT_INSN(),
  4641. },
  4642. .fixup_map_hash_48b = { 3 },
  4643. .errstr_unpriv = "R0 leaks addr",
  4644. .result_unpriv = REJECT,
  4645. .result = ACCEPT,
  4646. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  4647. },
  4648. {
  4649. "valid map access into an array with a variable",
  4650. .insns = {
  4651. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4652. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4653. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4654. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4655. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4656. BPF_FUNC_map_lookup_elem),
  4657. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  4658. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  4659. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
  4660. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  4661. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  4662. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  4663. offsetof(struct test_val, foo)),
  4664. BPF_EXIT_INSN(),
  4665. },
  4666. .fixup_map_hash_48b = { 3 },
  4667. .errstr_unpriv = "R0 leaks addr",
  4668. .result_unpriv = REJECT,
  4669. .result = ACCEPT,
  4670. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  4671. },
  4672. {
  4673. "valid map access into an array with a signed variable",
  4674. .insns = {
  4675. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4676. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4677. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4678. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4679. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4680. BPF_FUNC_map_lookup_elem),
  4681. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  4682. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  4683. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
  4684. BPF_MOV32_IMM(BPF_REG_1, 0),
  4685. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
  4686. BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
  4687. BPF_MOV32_IMM(BPF_REG_1, 0),
  4688. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  4689. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  4690. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  4691. offsetof(struct test_val, foo)),
  4692. BPF_EXIT_INSN(),
  4693. },
  4694. .fixup_map_hash_48b = { 3 },
  4695. .errstr_unpriv = "R0 leaks addr",
  4696. .result_unpriv = REJECT,
  4697. .result = ACCEPT,
  4698. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  4699. },
  4700. {
  4701. "invalid map access into an array with a constant",
  4702. .insns = {
  4703. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4704. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4705. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4706. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4707. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4708. BPF_FUNC_map_lookup_elem),
  4709. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  4710. BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
  4711. offsetof(struct test_val, foo)),
  4712. BPF_EXIT_INSN(),
  4713. },
  4714. .fixup_map_hash_48b = { 3 },
  4715. .errstr = "invalid access to map value, value_size=48 off=48 size=8",
  4716. .result = REJECT,
  4717. },
  4718. {
  4719. "invalid map access into an array with a register",
  4720. .insns = {
  4721. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4722. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4723. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4724. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4725. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4726. BPF_FUNC_map_lookup_elem),
  4727. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  4728. BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
  4729. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  4730. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  4731. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  4732. offsetof(struct test_val, foo)),
  4733. BPF_EXIT_INSN(),
  4734. },
  4735. .fixup_map_hash_48b = { 3 },
  4736. .errstr = "R0 min value is outside of the array range",
  4737. .result = REJECT,
  4738. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  4739. },
  4740. {
  4741. "invalid map access into an array with a variable",
  4742. .insns = {
  4743. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4744. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4745. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4746. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4747. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4748. BPF_FUNC_map_lookup_elem),
  4749. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  4750. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  4751. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  4752. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  4753. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  4754. offsetof(struct test_val, foo)),
  4755. BPF_EXIT_INSN(),
  4756. },
  4757. .fixup_map_hash_48b = { 3 },
  4758. .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
  4759. .result = REJECT,
  4760. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  4761. },
  4762. {
  4763. "invalid map access into an array with no floor check",
  4764. .insns = {
  4765. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4766. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4767. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4768. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4769. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4770. BPF_FUNC_map_lookup_elem),
  4771. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  4772. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  4773. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
  4774. BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
  4775. BPF_MOV32_IMM(BPF_REG_1, 0),
  4776. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  4777. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  4778. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  4779. offsetof(struct test_val, foo)),
  4780. BPF_EXIT_INSN(),
  4781. },
  4782. .fixup_map_hash_48b = { 3 },
  4783. .errstr_unpriv = "R0 leaks addr",
  4784. .errstr = "R0 unbounded memory access",
  4785. .result_unpriv = REJECT,
  4786. .result = REJECT,
  4787. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  4788. },
  4789. {
  4790. "invalid map access into an array with a invalid max check",
  4791. .insns = {
  4792. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4793. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4794. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4795. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4796. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4797. BPF_FUNC_map_lookup_elem),
  4798. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  4799. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  4800. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
  4801. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  4802. BPF_MOV32_IMM(BPF_REG_1, 0),
  4803. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  4804. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  4805. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  4806. offsetof(struct test_val, foo)),
  4807. BPF_EXIT_INSN(),
  4808. },
  4809. .fixup_map_hash_48b = { 3 },
  4810. .errstr_unpriv = "R0 leaks addr",
  4811. .errstr = "invalid access to map value, value_size=48 off=44 size=8",
  4812. .result_unpriv = REJECT,
  4813. .result = REJECT,
  4814. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  4815. },
  4816. {
  4817. "invalid map access into an array with a invalid max check",
  4818. .insns = {
  4819. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4820. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4821. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4822. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4823. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4824. BPF_FUNC_map_lookup_elem),
  4825. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
  4826. BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
  4827. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  4828. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4829. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4830. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4831. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  4832. BPF_FUNC_map_lookup_elem),
  4833. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  4834. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
  4835. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  4836. offsetof(struct test_val, foo)),
  4837. BPF_EXIT_INSN(),
  4838. },
  4839. .fixup_map_hash_48b = { 3, 11 },
  4840. .errstr = "R0 pointer += pointer",
  4841. .result = REJECT,
  4842. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  4843. },
  4844. {
  4845. "direct packet read test#1 for CGROUP_SKB",
  4846. .insns = {
  4847. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  4848. offsetof(struct __sk_buff, data)),
  4849. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  4850. offsetof(struct __sk_buff, data_end)),
  4851. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
  4852. offsetof(struct __sk_buff, len)),
  4853. BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
  4854. offsetof(struct __sk_buff, pkt_type)),
  4855. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4856. offsetof(struct __sk_buff, mark)),
  4857. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
  4858. offsetof(struct __sk_buff, mark)),
  4859. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  4860. offsetof(struct __sk_buff, queue_mapping)),
  4861. BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
  4862. offsetof(struct __sk_buff, protocol)),
  4863. BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
  4864. offsetof(struct __sk_buff, vlan_present)),
  4865. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  4866. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  4867. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  4868. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  4869. BPF_MOV64_IMM(BPF_REG_0, 0),
  4870. BPF_EXIT_INSN(),
  4871. },
  4872. .result = ACCEPT,
  4873. .result_unpriv = REJECT,
  4874. .errstr_unpriv = "invalid bpf_context access off=76 size=4",
  4875. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  4876. },
  4877. {
  4878. "direct packet read test#2 for CGROUP_SKB",
  4879. .insns = {
  4880. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
  4881. offsetof(struct __sk_buff, vlan_tci)),
  4882. BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
  4883. offsetof(struct __sk_buff, vlan_proto)),
  4884. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4885. offsetof(struct __sk_buff, priority)),
  4886. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
  4887. offsetof(struct __sk_buff, priority)),
  4888. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  4889. offsetof(struct __sk_buff,
  4890. ingress_ifindex)),
  4891. BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
  4892. offsetof(struct __sk_buff, tc_index)),
  4893. BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
  4894. offsetof(struct __sk_buff, hash)),
  4895. BPF_MOV64_IMM(BPF_REG_0, 0),
  4896. BPF_EXIT_INSN(),
  4897. },
  4898. .result = ACCEPT,
  4899. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  4900. },
  4901. {
  4902. "direct packet read test#3 for CGROUP_SKB",
  4903. .insns = {
  4904. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
  4905. offsetof(struct __sk_buff, cb[0])),
  4906. BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
  4907. offsetof(struct __sk_buff, cb[1])),
  4908. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4909. offsetof(struct __sk_buff, cb[2])),
  4910. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  4911. offsetof(struct __sk_buff, cb[3])),
  4912. BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
  4913. offsetof(struct __sk_buff, cb[4])),
  4914. BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
  4915. offsetof(struct __sk_buff, napi_id)),
  4916. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
  4917. offsetof(struct __sk_buff, cb[0])),
  4918. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
  4919. offsetof(struct __sk_buff, cb[1])),
  4920. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
  4921. offsetof(struct __sk_buff, cb[2])),
  4922. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
  4923. offsetof(struct __sk_buff, cb[3])),
  4924. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
  4925. offsetof(struct __sk_buff, cb[4])),
  4926. BPF_MOV64_IMM(BPF_REG_0, 0),
  4927. BPF_EXIT_INSN(),
  4928. },
  4929. .result = ACCEPT,
  4930. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  4931. },
  4932. {
  4933. "direct packet read test#4 for CGROUP_SKB",
  4934. .insns = {
  4935. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  4936. offsetof(struct __sk_buff, family)),
  4937. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  4938. offsetof(struct __sk_buff, remote_ip4)),
  4939. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
  4940. offsetof(struct __sk_buff, local_ip4)),
  4941. BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
  4942. offsetof(struct __sk_buff, remote_ip6[0])),
  4943. BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
  4944. offsetof(struct __sk_buff, remote_ip6[1])),
  4945. BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
  4946. offsetof(struct __sk_buff, remote_ip6[2])),
  4947. BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
  4948. offsetof(struct __sk_buff, remote_ip6[3])),
  4949. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4950. offsetof(struct __sk_buff, local_ip6[0])),
  4951. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4952. offsetof(struct __sk_buff, local_ip6[1])),
  4953. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4954. offsetof(struct __sk_buff, local_ip6[2])),
  4955. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  4956. offsetof(struct __sk_buff, local_ip6[3])),
  4957. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  4958. offsetof(struct __sk_buff, remote_port)),
  4959. BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
  4960. offsetof(struct __sk_buff, local_port)),
  4961. BPF_MOV64_IMM(BPF_REG_0, 0),
  4962. BPF_EXIT_INSN(),
  4963. },
  4964. .result = ACCEPT,
  4965. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  4966. },
  4967. {
  4968. "invalid access of tc_classid for CGROUP_SKB",
  4969. .insns = {
  4970. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  4971. offsetof(struct __sk_buff, tc_classid)),
  4972. BPF_MOV64_IMM(BPF_REG_0, 0),
  4973. BPF_EXIT_INSN(),
  4974. },
  4975. .result = REJECT,
  4976. .errstr = "invalid bpf_context access",
  4977. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  4978. },
  4979. {
  4980. "invalid access of data_meta for CGROUP_SKB",
  4981. .insns = {
  4982. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  4983. offsetof(struct __sk_buff, data_meta)),
  4984. BPF_MOV64_IMM(BPF_REG_0, 0),
  4985. BPF_EXIT_INSN(),
  4986. },
  4987. .result = REJECT,
  4988. .errstr = "invalid bpf_context access",
  4989. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  4990. },
  4991. {
  4992. "invalid access of flow_keys for CGROUP_SKB",
  4993. .insns = {
  4994. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  4995. offsetof(struct __sk_buff, flow_keys)),
  4996. BPF_MOV64_IMM(BPF_REG_0, 0),
  4997. BPF_EXIT_INSN(),
  4998. },
  4999. .result = REJECT,
  5000. .errstr = "invalid bpf_context access",
  5001. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5002. },
  5003. {
  5004. "invalid write access to napi_id for CGROUP_SKB",
  5005. .insns = {
  5006. BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
  5007. offsetof(struct __sk_buff, napi_id)),
  5008. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
  5009. offsetof(struct __sk_buff, napi_id)),
  5010. BPF_MOV64_IMM(BPF_REG_0, 0),
  5011. BPF_EXIT_INSN(),
  5012. },
  5013. .result = REJECT,
  5014. .errstr = "invalid bpf_context access",
  5015. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5016. },
  5017. {
  5018. "valid cgroup storage access",
  5019. .insns = {
  5020. BPF_MOV64_IMM(BPF_REG_2, 0),
  5021. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5022. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5023. BPF_FUNC_get_local_storage),
  5024. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  5025. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  5026. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
  5027. BPF_EXIT_INSN(),
  5028. },
  5029. .fixup_cgroup_storage = { 1 },
  5030. .result = ACCEPT,
  5031. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5032. },
  5033. {
  5034. "invalid cgroup storage access 1",
  5035. .insns = {
  5036. BPF_MOV64_IMM(BPF_REG_2, 0),
  5037. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5038. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5039. BPF_FUNC_get_local_storage),
  5040. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  5041. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  5042. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
  5043. BPF_EXIT_INSN(),
  5044. },
  5045. .fixup_map_hash_8b = { 1 },
  5046. .result = REJECT,
  5047. .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
  5048. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5049. },
  5050. {
  5051. "invalid cgroup storage access 2",
  5052. .insns = {
  5053. BPF_MOV64_IMM(BPF_REG_2, 0),
  5054. BPF_LD_MAP_FD(BPF_REG_1, 1),
  5055. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5056. BPF_FUNC_get_local_storage),
  5057. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
  5058. BPF_EXIT_INSN(),
  5059. },
  5060. .result = REJECT,
  5061. .errstr = "fd 1 is not pointing to valid bpf_map",
  5062. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5063. },
  5064. {
  5065. "invalid cgroup storage access 3",
  5066. .insns = {
  5067. BPF_MOV64_IMM(BPF_REG_2, 0),
  5068. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5069. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5070. BPF_FUNC_get_local_storage),
  5071. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
  5072. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
  5073. BPF_MOV64_IMM(BPF_REG_0, 0),
  5074. BPF_EXIT_INSN(),
  5075. },
  5076. .fixup_cgroup_storage = { 1 },
  5077. .result = REJECT,
  5078. .errstr = "invalid access to map value, value_size=64 off=256 size=4",
  5079. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5080. },
  5081. {
  5082. "invalid cgroup storage access 4",
  5083. .insns = {
  5084. BPF_MOV64_IMM(BPF_REG_2, 0),
  5085. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5086. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5087. BPF_FUNC_get_local_storage),
  5088. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
  5089. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  5090. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
  5091. BPF_EXIT_INSN(),
  5092. },
  5093. .fixup_cgroup_storage = { 1 },
  5094. .result = REJECT,
  5095. .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
  5096. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5097. },
  5098. {
  5099. "invalid cgroup storage access 5",
  5100. .insns = {
  5101. BPF_MOV64_IMM(BPF_REG_2, 7),
  5102. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5103. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5104. BPF_FUNC_get_local_storage),
  5105. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  5106. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  5107. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
  5108. BPF_EXIT_INSN(),
  5109. },
  5110. .fixup_cgroup_storage = { 1 },
  5111. .result = REJECT,
  5112. .errstr = "get_local_storage() doesn't support non-zero flags",
  5113. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5114. },
  5115. {
  5116. "invalid cgroup storage access 6",
  5117. .insns = {
  5118. BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
  5119. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5120. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5121. BPF_FUNC_get_local_storage),
  5122. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  5123. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  5124. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
  5125. BPF_EXIT_INSN(),
  5126. },
  5127. .fixup_cgroup_storage = { 1 },
  5128. .result = REJECT,
  5129. .errstr = "get_local_storage() doesn't support non-zero flags",
  5130. .errstr_unpriv = "R2 leaks addr into helper function",
  5131. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5132. },
  5133. {
  5134. "valid per-cpu cgroup storage access",
  5135. .insns = {
  5136. BPF_MOV64_IMM(BPF_REG_2, 0),
  5137. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5138. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5139. BPF_FUNC_get_local_storage),
  5140. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  5141. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  5142. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
  5143. BPF_EXIT_INSN(),
  5144. },
  5145. .fixup_percpu_cgroup_storage = { 1 },
  5146. .result = ACCEPT,
  5147. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5148. },
  5149. {
  5150. "invalid per-cpu cgroup storage access 1",
  5151. .insns = {
  5152. BPF_MOV64_IMM(BPF_REG_2, 0),
  5153. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5154. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5155. BPF_FUNC_get_local_storage),
  5156. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  5157. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  5158. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
  5159. BPF_EXIT_INSN(),
  5160. },
  5161. .fixup_map_hash_8b = { 1 },
  5162. .result = REJECT,
  5163. .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
  5164. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5165. },
  5166. {
  5167. "invalid per-cpu cgroup storage access 2",
  5168. .insns = {
  5169. BPF_MOV64_IMM(BPF_REG_2, 0),
  5170. BPF_LD_MAP_FD(BPF_REG_1, 1),
  5171. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5172. BPF_FUNC_get_local_storage),
  5173. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
  5174. BPF_EXIT_INSN(),
  5175. },
  5176. .result = REJECT,
  5177. .errstr = "fd 1 is not pointing to valid bpf_map",
  5178. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5179. },
  5180. {
  5181. "invalid per-cpu cgroup storage access 3",
  5182. .insns = {
  5183. BPF_MOV64_IMM(BPF_REG_2, 0),
  5184. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5185. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5186. BPF_FUNC_get_local_storage),
  5187. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
  5188. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
  5189. BPF_MOV64_IMM(BPF_REG_0, 0),
  5190. BPF_EXIT_INSN(),
  5191. },
  5192. .fixup_percpu_cgroup_storage = { 1 },
  5193. .result = REJECT,
  5194. .errstr = "invalid access to map value, value_size=64 off=256 size=4",
  5195. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5196. },
  5197. {
  5198. "invalid per-cpu cgroup storage access 4",
  5199. .insns = {
  5200. BPF_MOV64_IMM(BPF_REG_2, 0),
  5201. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5202. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5203. BPF_FUNC_get_local_storage),
  5204. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
  5205. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  5206. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
  5207. BPF_EXIT_INSN(),
  5208. },
  5209. .fixup_cgroup_storage = { 1 },
  5210. .result = REJECT,
  5211. .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
  5212. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5213. },
  5214. {
  5215. "invalid per-cpu cgroup storage access 5",
  5216. .insns = {
  5217. BPF_MOV64_IMM(BPF_REG_2, 7),
  5218. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5219. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5220. BPF_FUNC_get_local_storage),
  5221. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  5222. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  5223. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
  5224. BPF_EXIT_INSN(),
  5225. },
  5226. .fixup_percpu_cgroup_storage = { 1 },
  5227. .result = REJECT,
  5228. .errstr = "get_local_storage() doesn't support non-zero flags",
  5229. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5230. },
  5231. {
  5232. "invalid per-cpu cgroup storage access 6",
  5233. .insns = {
  5234. BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
  5235. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5236. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5237. BPF_FUNC_get_local_storage),
  5238. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  5239. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  5240. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
  5241. BPF_EXIT_INSN(),
  5242. },
  5243. .fixup_percpu_cgroup_storage = { 1 },
  5244. .result = REJECT,
  5245. .errstr = "get_local_storage() doesn't support non-zero flags",
  5246. .errstr_unpriv = "R2 leaks addr into helper function",
  5247. .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
  5248. },
  5249. {
  5250. "multiple registers share map_lookup_elem result",
  5251. .insns = {
  5252. BPF_MOV64_IMM(BPF_REG_1, 10),
  5253. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  5254. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5255. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5256. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5257. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5258. BPF_FUNC_map_lookup_elem),
  5259. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  5260. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  5261. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  5262. BPF_EXIT_INSN(),
  5263. },
  5264. .fixup_map_hash_8b = { 4 },
  5265. .result = ACCEPT,
  5266. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  5267. },
  5268. {
  5269. "alu ops on ptr_to_map_value_or_null, 1",
  5270. .insns = {
  5271. BPF_MOV64_IMM(BPF_REG_1, 10),
  5272. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  5273. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5274. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5275. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5276. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5277. BPF_FUNC_map_lookup_elem),
  5278. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  5279. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
  5280. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
  5281. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  5282. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  5283. BPF_EXIT_INSN(),
  5284. },
  5285. .fixup_map_hash_8b = { 4 },
  5286. .errstr = "R4 pointer arithmetic on map_value_or_null",
  5287. .result = REJECT,
  5288. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  5289. },
  5290. {
  5291. "alu ops on ptr_to_map_value_or_null, 2",
  5292. .insns = {
  5293. BPF_MOV64_IMM(BPF_REG_1, 10),
  5294. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  5295. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5296. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5297. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5298. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5299. BPF_FUNC_map_lookup_elem),
  5300. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  5301. BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
  5302. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  5303. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  5304. BPF_EXIT_INSN(),
  5305. },
  5306. .fixup_map_hash_8b = { 4 },
  5307. .errstr = "R4 pointer arithmetic on map_value_or_null",
  5308. .result = REJECT,
  5309. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  5310. },
  5311. {
  5312. "alu ops on ptr_to_map_value_or_null, 3",
  5313. .insns = {
  5314. BPF_MOV64_IMM(BPF_REG_1, 10),
  5315. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  5316. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5317. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5318. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5319. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5320. BPF_FUNC_map_lookup_elem),
  5321. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  5322. BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
  5323. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  5324. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  5325. BPF_EXIT_INSN(),
  5326. },
  5327. .fixup_map_hash_8b = { 4 },
  5328. .errstr = "R4 pointer arithmetic on map_value_or_null",
  5329. .result = REJECT,
  5330. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  5331. },
  5332. {
  5333. "invalid memory access with multiple map_lookup_elem calls",
  5334. .insns = {
  5335. BPF_MOV64_IMM(BPF_REG_1, 10),
  5336. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  5337. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5338. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5339. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5340. BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
  5341. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  5342. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5343. BPF_FUNC_map_lookup_elem),
  5344. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  5345. BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
  5346. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  5347. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5348. BPF_FUNC_map_lookup_elem),
  5349. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  5350. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  5351. BPF_EXIT_INSN(),
  5352. },
  5353. .fixup_map_hash_8b = { 4 },
  5354. .result = REJECT,
  5355. .errstr = "R4 !read_ok",
  5356. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  5357. },
  5358. {
  5359. "valid indirect map_lookup_elem access with 2nd lookup in branch",
  5360. .insns = {
  5361. BPF_MOV64_IMM(BPF_REG_1, 10),
  5362. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  5363. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5364. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5365. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5366. BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
  5367. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  5368. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5369. BPF_FUNC_map_lookup_elem),
  5370. BPF_MOV64_IMM(BPF_REG_2, 10),
  5371. BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
  5372. BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
  5373. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  5374. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5375. BPF_FUNC_map_lookup_elem),
  5376. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  5377. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  5378. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  5379. BPF_EXIT_INSN(),
  5380. },
  5381. .fixup_map_hash_8b = { 4 },
  5382. .result = ACCEPT,
  5383. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  5384. },
  5385. {
  5386. "invalid map access from else condition",
  5387. .insns = {
  5388. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5389. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5390. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5391. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5392. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  5393. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  5394. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  5395. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
  5396. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
  5397. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  5398. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  5399. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
  5400. BPF_EXIT_INSN(),
  5401. },
  5402. .fixup_map_hash_48b = { 3 },
  5403. .errstr = "R0 unbounded memory access",
  5404. .result = REJECT,
  5405. .errstr_unpriv = "R0 leaks addr",
  5406. .result_unpriv = REJECT,
  5407. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  5408. },
  5409. {
  5410. "constant register |= constant should keep constant type",
  5411. .insns = {
  5412. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  5413. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  5414. BPF_MOV64_IMM(BPF_REG_2, 34),
  5415. BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
  5416. BPF_MOV64_IMM(BPF_REG_3, 0),
  5417. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5418. BPF_EXIT_INSN(),
  5419. },
  5420. .result = ACCEPT,
  5421. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5422. },
  5423. {
  5424. "constant register |= constant should not bypass stack boundary checks",
  5425. .insns = {
  5426. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  5427. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  5428. BPF_MOV64_IMM(BPF_REG_2, 34),
  5429. BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
  5430. BPF_MOV64_IMM(BPF_REG_3, 0),
  5431. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5432. BPF_EXIT_INSN(),
  5433. },
  5434. .errstr = "invalid stack type R1 off=-48 access_size=58",
  5435. .result = REJECT,
  5436. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5437. },
  5438. {
  5439. "constant register |= constant register should keep constant type",
  5440. .insns = {
  5441. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  5442. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  5443. BPF_MOV64_IMM(BPF_REG_2, 34),
  5444. BPF_MOV64_IMM(BPF_REG_4, 13),
  5445. BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
  5446. BPF_MOV64_IMM(BPF_REG_3, 0),
  5447. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5448. BPF_EXIT_INSN(),
  5449. },
  5450. .result = ACCEPT,
  5451. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5452. },
  5453. {
  5454. "constant register |= constant register should not bypass stack boundary checks",
  5455. .insns = {
  5456. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  5457. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  5458. BPF_MOV64_IMM(BPF_REG_2, 34),
  5459. BPF_MOV64_IMM(BPF_REG_4, 24),
  5460. BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
  5461. BPF_MOV64_IMM(BPF_REG_3, 0),
  5462. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5463. BPF_EXIT_INSN(),
  5464. },
  5465. .errstr = "invalid stack type R1 off=-48 access_size=58",
  5466. .result = REJECT,
  5467. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5468. },
  5469. {
  5470. "invalid direct packet write for LWT_IN",
  5471. .insns = {
  5472. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  5473. offsetof(struct __sk_buff, data)),
  5474. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  5475. offsetof(struct __sk_buff, data_end)),
  5476. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  5477. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  5478. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  5479. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  5480. BPF_MOV64_IMM(BPF_REG_0, 0),
  5481. BPF_EXIT_INSN(),
  5482. },
  5483. .errstr = "cannot write into packet",
  5484. .result = REJECT,
  5485. .prog_type = BPF_PROG_TYPE_LWT_IN,
  5486. },
  5487. {
  5488. "invalid direct packet write for LWT_OUT",
  5489. .insns = {
  5490. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  5491. offsetof(struct __sk_buff, data)),
  5492. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  5493. offsetof(struct __sk_buff, data_end)),
  5494. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  5495. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  5496. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  5497. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  5498. BPF_MOV64_IMM(BPF_REG_0, 0),
  5499. BPF_EXIT_INSN(),
  5500. },
  5501. .errstr = "cannot write into packet",
  5502. .result = REJECT,
  5503. .prog_type = BPF_PROG_TYPE_LWT_OUT,
  5504. },
  5505. {
  5506. "direct packet write for LWT_XMIT",
  5507. .insns = {
  5508. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  5509. offsetof(struct __sk_buff, data)),
  5510. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  5511. offsetof(struct __sk_buff, data_end)),
  5512. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  5513. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  5514. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  5515. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  5516. BPF_MOV64_IMM(BPF_REG_0, 0),
  5517. BPF_EXIT_INSN(),
  5518. },
  5519. .result = ACCEPT,
  5520. .prog_type = BPF_PROG_TYPE_LWT_XMIT,
  5521. },
  5522. {
  5523. "direct packet read for LWT_IN",
  5524. .insns = {
  5525. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  5526. offsetof(struct __sk_buff, data)),
  5527. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  5528. offsetof(struct __sk_buff, data_end)),
  5529. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  5530. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  5531. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  5532. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  5533. BPF_MOV64_IMM(BPF_REG_0, 0),
  5534. BPF_EXIT_INSN(),
  5535. },
  5536. .result = ACCEPT,
  5537. .prog_type = BPF_PROG_TYPE_LWT_IN,
  5538. },
  5539. {
  5540. "direct packet read for LWT_OUT",
  5541. .insns = {
  5542. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  5543. offsetof(struct __sk_buff, data)),
  5544. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  5545. offsetof(struct __sk_buff, data_end)),
  5546. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  5547. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  5548. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  5549. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  5550. BPF_MOV64_IMM(BPF_REG_0, 0),
  5551. BPF_EXIT_INSN(),
  5552. },
  5553. .result = ACCEPT,
  5554. .prog_type = BPF_PROG_TYPE_LWT_OUT,
  5555. },
  5556. {
  5557. "direct packet read for LWT_XMIT",
  5558. .insns = {
  5559. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  5560. offsetof(struct __sk_buff, data)),
  5561. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  5562. offsetof(struct __sk_buff, data_end)),
  5563. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  5564. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  5565. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  5566. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  5567. BPF_MOV64_IMM(BPF_REG_0, 0),
  5568. BPF_EXIT_INSN(),
  5569. },
  5570. .result = ACCEPT,
  5571. .prog_type = BPF_PROG_TYPE_LWT_XMIT,
  5572. },
  5573. {
  5574. "overlapping checks for direct packet access",
  5575. .insns = {
  5576. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  5577. offsetof(struct __sk_buff, data)),
  5578. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  5579. offsetof(struct __sk_buff, data_end)),
  5580. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  5581. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  5582. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
  5583. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  5584. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
  5585. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
  5586. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
  5587. BPF_MOV64_IMM(BPF_REG_0, 0),
  5588. BPF_EXIT_INSN(),
  5589. },
  5590. .result = ACCEPT,
  5591. .prog_type = BPF_PROG_TYPE_LWT_XMIT,
  5592. },
  5593. {
  5594. "make headroom for LWT_XMIT",
  5595. .insns = {
  5596. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  5597. BPF_MOV64_IMM(BPF_REG_2, 34),
  5598. BPF_MOV64_IMM(BPF_REG_3, 0),
  5599. BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
  5600. /* split for s390 to succeed */
  5601. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  5602. BPF_MOV64_IMM(BPF_REG_2, 42),
  5603. BPF_MOV64_IMM(BPF_REG_3, 0),
  5604. BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
  5605. BPF_MOV64_IMM(BPF_REG_0, 0),
  5606. BPF_EXIT_INSN(),
  5607. },
  5608. .result = ACCEPT,
  5609. .prog_type = BPF_PROG_TYPE_LWT_XMIT,
  5610. },
  5611. {
  5612. "invalid access of tc_classid for LWT_IN",
  5613. .insns = {
  5614. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  5615. offsetof(struct __sk_buff, tc_classid)),
  5616. BPF_EXIT_INSN(),
  5617. },
  5618. .result = REJECT,
  5619. .errstr = "invalid bpf_context access",
  5620. },
  5621. {
  5622. "invalid access of tc_classid for LWT_OUT",
  5623. .insns = {
  5624. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  5625. offsetof(struct __sk_buff, tc_classid)),
  5626. BPF_EXIT_INSN(),
  5627. },
  5628. .result = REJECT,
  5629. .errstr = "invalid bpf_context access",
  5630. },
  5631. {
  5632. "invalid access of tc_classid for LWT_XMIT",
  5633. .insns = {
  5634. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  5635. offsetof(struct __sk_buff, tc_classid)),
  5636. BPF_EXIT_INSN(),
  5637. },
  5638. .result = REJECT,
  5639. .errstr = "invalid bpf_context access",
  5640. },
  5641. {
  5642. "leak pointer into ctx 1",
  5643. .insns = {
  5644. BPF_MOV64_IMM(BPF_REG_0, 0),
  5645. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  5646. offsetof(struct __sk_buff, cb[0])),
  5647. BPF_LD_MAP_FD(BPF_REG_2, 0),
  5648. BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
  5649. offsetof(struct __sk_buff, cb[0])),
  5650. BPF_EXIT_INSN(),
  5651. },
  5652. .fixup_map_hash_8b = { 2 },
  5653. .errstr_unpriv = "R2 leaks addr into mem",
  5654. .result_unpriv = REJECT,
  5655. .result = REJECT,
  5656. .errstr = "BPF_XADD stores into R1 ctx is not allowed",
  5657. },
  5658. {
  5659. "leak pointer into ctx 2",
  5660. .insns = {
  5661. BPF_MOV64_IMM(BPF_REG_0, 0),
  5662. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  5663. offsetof(struct __sk_buff, cb[0])),
  5664. BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
  5665. offsetof(struct __sk_buff, cb[0])),
  5666. BPF_EXIT_INSN(),
  5667. },
  5668. .errstr_unpriv = "R10 leaks addr into mem",
  5669. .result_unpriv = REJECT,
  5670. .result = REJECT,
  5671. .errstr = "BPF_XADD stores into R1 ctx is not allowed",
  5672. },
  5673. {
  5674. "leak pointer into ctx 3",
  5675. .insns = {
  5676. BPF_MOV64_IMM(BPF_REG_0, 0),
  5677. BPF_LD_MAP_FD(BPF_REG_2, 0),
  5678. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
  5679. offsetof(struct __sk_buff, cb[0])),
  5680. BPF_EXIT_INSN(),
  5681. },
  5682. .fixup_map_hash_8b = { 1 },
  5683. .errstr_unpriv = "R2 leaks addr into ctx",
  5684. .result_unpriv = REJECT,
  5685. .result = ACCEPT,
  5686. },
  5687. {
  5688. "leak pointer into map val",
  5689. .insns = {
  5690. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  5691. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5692. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5693. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5694. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5695. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5696. BPF_FUNC_map_lookup_elem),
  5697. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  5698. BPF_MOV64_IMM(BPF_REG_3, 0),
  5699. BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
  5700. BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  5701. BPF_MOV64_IMM(BPF_REG_0, 0),
  5702. BPF_EXIT_INSN(),
  5703. },
  5704. .fixup_map_hash_8b = { 4 },
  5705. .errstr_unpriv = "R6 leaks addr into mem",
  5706. .result_unpriv = REJECT,
  5707. .result = ACCEPT,
  5708. },
  5709. {
  5710. "helper access to map: full range",
  5711. .insns = {
  5712. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5713. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5714. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5715. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5716. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5717. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  5718. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5719. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
  5720. BPF_MOV64_IMM(BPF_REG_3, 0),
  5721. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5722. BPF_EXIT_INSN(),
  5723. },
  5724. .fixup_map_hash_48b = { 3 },
  5725. .result = ACCEPT,
  5726. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5727. },
  5728. {
  5729. "helper access to map: partial range",
  5730. .insns = {
  5731. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5732. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5733. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5734. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5735. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5736. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  5737. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5738. BPF_MOV64_IMM(BPF_REG_2, 8),
  5739. BPF_MOV64_IMM(BPF_REG_3, 0),
  5740. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5741. BPF_EXIT_INSN(),
  5742. },
  5743. .fixup_map_hash_48b = { 3 },
  5744. .result = ACCEPT,
  5745. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5746. },
  5747. {
  5748. "helper access to map: empty range",
  5749. .insns = {
  5750. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5751. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5752. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5753. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5754. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5755. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  5756. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5757. BPF_MOV64_IMM(BPF_REG_2, 0),
  5758. BPF_EMIT_CALL(BPF_FUNC_trace_printk),
  5759. BPF_EXIT_INSN(),
  5760. },
  5761. .fixup_map_hash_48b = { 3 },
  5762. .errstr = "invalid access to map value, value_size=48 off=0 size=0",
  5763. .result = REJECT,
  5764. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5765. },
  5766. {
  5767. "helper access to map: out-of-bound range",
  5768. .insns = {
  5769. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5770. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5771. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5772. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5773. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5774. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  5775. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5776. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
  5777. BPF_MOV64_IMM(BPF_REG_3, 0),
  5778. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5779. BPF_EXIT_INSN(),
  5780. },
  5781. .fixup_map_hash_48b = { 3 },
  5782. .errstr = "invalid access to map value, value_size=48 off=0 size=56",
  5783. .result = REJECT,
  5784. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5785. },
  5786. {
  5787. "helper access to map: negative range",
  5788. .insns = {
  5789. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5790. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5791. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5792. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5793. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5794. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  5795. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5796. BPF_MOV64_IMM(BPF_REG_2, -8),
  5797. BPF_MOV64_IMM(BPF_REG_3, 0),
  5798. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5799. BPF_EXIT_INSN(),
  5800. },
  5801. .fixup_map_hash_48b = { 3 },
  5802. .errstr = "R2 min value is negative",
  5803. .result = REJECT,
  5804. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5805. },
  5806. {
  5807. "helper access to adjusted map (via const imm): full range",
  5808. .insns = {
  5809. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5810. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5811. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5812. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5813. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5814. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  5815. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5816. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  5817. offsetof(struct test_val, foo)),
  5818. BPF_MOV64_IMM(BPF_REG_2,
  5819. sizeof(struct test_val) -
  5820. offsetof(struct test_val, foo)),
  5821. BPF_MOV64_IMM(BPF_REG_3, 0),
  5822. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5823. BPF_EXIT_INSN(),
  5824. },
  5825. .fixup_map_hash_48b = { 3 },
  5826. .result = ACCEPT,
  5827. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5828. },
  5829. {
  5830. "helper access to adjusted map (via const imm): partial range",
  5831. .insns = {
  5832. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5833. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5834. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5835. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5836. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5837. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  5838. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5839. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  5840. offsetof(struct test_val, foo)),
  5841. BPF_MOV64_IMM(BPF_REG_2, 8),
  5842. BPF_MOV64_IMM(BPF_REG_3, 0),
  5843. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5844. BPF_EXIT_INSN(),
  5845. },
  5846. .fixup_map_hash_48b = { 3 },
  5847. .result = ACCEPT,
  5848. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5849. },
  5850. {
  5851. "helper access to adjusted map (via const imm): empty range",
  5852. .insns = {
  5853. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5854. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5855. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5856. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5857. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5858. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  5859. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5860. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  5861. offsetof(struct test_val, foo)),
  5862. BPF_MOV64_IMM(BPF_REG_2, 0),
  5863. BPF_EMIT_CALL(BPF_FUNC_trace_printk),
  5864. BPF_EXIT_INSN(),
  5865. },
  5866. .fixup_map_hash_48b = { 3 },
  5867. .errstr = "invalid access to map value, value_size=48 off=4 size=0",
  5868. .result = REJECT,
  5869. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5870. },
  5871. {
  5872. "helper access to adjusted map (via const imm): out-of-bound range",
  5873. .insns = {
  5874. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5875. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5876. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5877. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5878. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5879. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  5880. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5881. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  5882. offsetof(struct test_val, foo)),
  5883. BPF_MOV64_IMM(BPF_REG_2,
  5884. sizeof(struct test_val) -
  5885. offsetof(struct test_val, foo) + 8),
  5886. BPF_MOV64_IMM(BPF_REG_3, 0),
  5887. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5888. BPF_EXIT_INSN(),
  5889. },
  5890. .fixup_map_hash_48b = { 3 },
  5891. .errstr = "invalid access to map value, value_size=48 off=4 size=52",
  5892. .result = REJECT,
  5893. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5894. },
  5895. {
  5896. "helper access to adjusted map (via const imm): negative range (> adjustment)",
  5897. .insns = {
  5898. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5899. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5900. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5901. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5902. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5903. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  5904. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5905. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  5906. offsetof(struct test_val, foo)),
  5907. BPF_MOV64_IMM(BPF_REG_2, -8),
  5908. BPF_MOV64_IMM(BPF_REG_3, 0),
  5909. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5910. BPF_EXIT_INSN(),
  5911. },
  5912. .fixup_map_hash_48b = { 3 },
  5913. .errstr = "R2 min value is negative",
  5914. .result = REJECT,
  5915. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5916. },
  5917. {
  5918. "helper access to adjusted map (via const imm): negative range (< adjustment)",
  5919. .insns = {
  5920. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5921. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5922. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5923. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5924. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5925. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  5926. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5927. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  5928. offsetof(struct test_val, foo)),
  5929. BPF_MOV64_IMM(BPF_REG_2, -1),
  5930. BPF_MOV64_IMM(BPF_REG_3, 0),
  5931. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5932. BPF_EXIT_INSN(),
  5933. },
  5934. .fixup_map_hash_48b = { 3 },
  5935. .errstr = "R2 min value is negative",
  5936. .result = REJECT,
  5937. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5938. },
  5939. {
  5940. "helper access to adjusted map (via const reg): full range",
  5941. .insns = {
  5942. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5943. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5944. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5945. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5946. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5947. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  5948. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5949. BPF_MOV64_IMM(BPF_REG_3,
  5950. offsetof(struct test_val, foo)),
  5951. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  5952. BPF_MOV64_IMM(BPF_REG_2,
  5953. sizeof(struct test_val) -
  5954. offsetof(struct test_val, foo)),
  5955. BPF_MOV64_IMM(BPF_REG_3, 0),
  5956. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5957. BPF_EXIT_INSN(),
  5958. },
  5959. .fixup_map_hash_48b = { 3 },
  5960. .result = ACCEPT,
  5961. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5962. },
  5963. {
  5964. "helper access to adjusted map (via const reg): partial range",
  5965. .insns = {
  5966. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5967. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5968. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5969. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5970. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5971. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  5972. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5973. BPF_MOV64_IMM(BPF_REG_3,
  5974. offsetof(struct test_val, foo)),
  5975. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  5976. BPF_MOV64_IMM(BPF_REG_2, 8),
  5977. BPF_MOV64_IMM(BPF_REG_3, 0),
  5978. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5979. BPF_EXIT_INSN(),
  5980. },
  5981. .fixup_map_hash_48b = { 3 },
  5982. .result = ACCEPT,
  5983. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5984. },
  5985. {
  5986. "helper access to adjusted map (via const reg): empty range",
  5987. .insns = {
  5988. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5989. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5990. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5991. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5992. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5993. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  5994. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5995. BPF_MOV64_IMM(BPF_REG_3, 0),
  5996. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  5997. BPF_MOV64_IMM(BPF_REG_2, 0),
  5998. BPF_EMIT_CALL(BPF_FUNC_trace_printk),
  5999. BPF_EXIT_INSN(),
  6000. },
  6001. .fixup_map_hash_48b = { 3 },
  6002. .errstr = "R1 min value is outside of the array range",
  6003. .result = REJECT,
  6004. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6005. },
  6006. {
  6007. "helper access to adjusted map (via const reg): out-of-bound range",
  6008. .insns = {
  6009. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6010. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6011. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6012. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6013. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6014. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  6015. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6016. BPF_MOV64_IMM(BPF_REG_3,
  6017. offsetof(struct test_val, foo)),
  6018. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6019. BPF_MOV64_IMM(BPF_REG_2,
  6020. sizeof(struct test_val) -
  6021. offsetof(struct test_val, foo) + 8),
  6022. BPF_MOV64_IMM(BPF_REG_3, 0),
  6023. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  6024. BPF_EXIT_INSN(),
  6025. },
  6026. .fixup_map_hash_48b = { 3 },
  6027. .errstr = "invalid access to map value, value_size=48 off=4 size=52",
  6028. .result = REJECT,
  6029. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6030. },
  6031. {
  6032. "helper access to adjusted map (via const reg): negative range (> adjustment)",
  6033. .insns = {
  6034. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6035. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6036. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6037. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6038. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6039. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  6040. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6041. BPF_MOV64_IMM(BPF_REG_3,
  6042. offsetof(struct test_val, foo)),
  6043. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6044. BPF_MOV64_IMM(BPF_REG_2, -8),
  6045. BPF_MOV64_IMM(BPF_REG_3, 0),
  6046. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  6047. BPF_EXIT_INSN(),
  6048. },
  6049. .fixup_map_hash_48b = { 3 },
  6050. .errstr = "R2 min value is negative",
  6051. .result = REJECT,
  6052. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6053. },
  6054. {
  6055. "helper access to adjusted map (via const reg): negative range (< adjustment)",
  6056. .insns = {
  6057. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6058. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6059. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6060. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6061. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6062. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  6063. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6064. BPF_MOV64_IMM(BPF_REG_3,
  6065. offsetof(struct test_val, foo)),
  6066. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6067. BPF_MOV64_IMM(BPF_REG_2, -1),
  6068. BPF_MOV64_IMM(BPF_REG_3, 0),
  6069. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  6070. BPF_EXIT_INSN(),
  6071. },
  6072. .fixup_map_hash_48b = { 3 },
  6073. .errstr = "R2 min value is negative",
  6074. .result = REJECT,
  6075. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6076. },
  6077. {
  6078. "helper access to adjusted map (via variable): full range",
  6079. .insns = {
  6080. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6081. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6082. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6083. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6084. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6085. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  6086. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6087. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6088. BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
  6089. offsetof(struct test_val, foo), 4),
  6090. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6091. BPF_MOV64_IMM(BPF_REG_2,
  6092. sizeof(struct test_val) -
  6093. offsetof(struct test_val, foo)),
  6094. BPF_MOV64_IMM(BPF_REG_3, 0),
  6095. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  6096. BPF_EXIT_INSN(),
  6097. },
  6098. .fixup_map_hash_48b = { 3 },
  6099. .result = ACCEPT,
  6100. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6101. },
  6102. {
  6103. "helper access to adjusted map (via variable): partial range",
  6104. .insns = {
  6105. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6106. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6107. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6108. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6109. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6110. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  6111. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6112. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6113. BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
  6114. offsetof(struct test_val, foo), 4),
  6115. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6116. BPF_MOV64_IMM(BPF_REG_2, 8),
  6117. BPF_MOV64_IMM(BPF_REG_3, 0),
  6118. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  6119. BPF_EXIT_INSN(),
  6120. },
  6121. .fixup_map_hash_48b = { 3 },
  6122. .result = ACCEPT,
  6123. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6124. },
  6125. {
  6126. "helper access to adjusted map (via variable): empty range",
  6127. .insns = {
  6128. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6129. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6130. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6131. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6132. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6133. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  6134. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6135. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6136. BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
  6137. offsetof(struct test_val, foo), 3),
  6138. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6139. BPF_MOV64_IMM(BPF_REG_2, 0),
  6140. BPF_EMIT_CALL(BPF_FUNC_trace_printk),
  6141. BPF_EXIT_INSN(),
  6142. },
  6143. .fixup_map_hash_48b = { 3 },
  6144. .errstr = "R1 min value is outside of the array range",
  6145. .result = REJECT,
  6146. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6147. },
  6148. {
  6149. "helper access to adjusted map (via variable): no max check",
  6150. .insns = {
  6151. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6152. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6153. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6154. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6155. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6156. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  6157. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6158. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6159. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6160. BPF_MOV64_IMM(BPF_REG_2, 1),
  6161. BPF_MOV64_IMM(BPF_REG_3, 0),
  6162. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  6163. BPF_EXIT_INSN(),
  6164. },
  6165. .fixup_map_hash_48b = { 3 },
  6166. .errstr = "R1 unbounded memory access",
  6167. .result = REJECT,
  6168. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6169. },
  6170. {
  6171. "helper access to adjusted map (via variable): wrong max check",
  6172. .insns = {
  6173. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6174. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6175. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6176. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6177. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6178. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  6179. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6180. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6181. BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
  6182. offsetof(struct test_val, foo), 4),
  6183. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6184. BPF_MOV64_IMM(BPF_REG_2,
  6185. sizeof(struct test_val) -
  6186. offsetof(struct test_val, foo) + 1),
  6187. BPF_MOV64_IMM(BPF_REG_3, 0),
  6188. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  6189. BPF_EXIT_INSN(),
  6190. },
  6191. .fixup_map_hash_48b = { 3 },
  6192. .errstr = "invalid access to map value, value_size=48 off=4 size=45",
  6193. .result = REJECT,
  6194. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6195. },
  6196. {
  6197. "helper access to map: bounds check using <, good access",
  6198. .insns = {
  6199. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6200. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6201. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6202. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6203. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6204. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  6205. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6206. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6207. BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
  6208. BPF_MOV64_IMM(BPF_REG_0, 0),
  6209. BPF_EXIT_INSN(),
  6210. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6211. BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
  6212. BPF_MOV64_IMM(BPF_REG_0, 0),
  6213. BPF_EXIT_INSN(),
  6214. },
  6215. .fixup_map_hash_48b = { 3 },
  6216. .result = ACCEPT,
  6217. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6218. },
  6219. {
  6220. "helper access to map: bounds check using <, bad access",
  6221. .insns = {
  6222. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6223. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6224. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6225. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6226. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6227. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  6228. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6229. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6230. BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
  6231. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6232. BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
  6233. BPF_MOV64_IMM(BPF_REG_0, 0),
  6234. BPF_EXIT_INSN(),
  6235. BPF_MOV64_IMM(BPF_REG_0, 0),
  6236. BPF_EXIT_INSN(),
  6237. },
  6238. .fixup_map_hash_48b = { 3 },
  6239. .result = REJECT,
  6240. .errstr = "R1 unbounded memory access",
  6241. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6242. },
  6243. {
  6244. "helper access to map: bounds check using <=, good access",
  6245. .insns = {
  6246. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6247. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6248. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6249. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6250. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6251. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  6252. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6253. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6254. BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
  6255. BPF_MOV64_IMM(BPF_REG_0, 0),
  6256. BPF_EXIT_INSN(),
  6257. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6258. BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
  6259. BPF_MOV64_IMM(BPF_REG_0, 0),
  6260. BPF_EXIT_INSN(),
  6261. },
  6262. .fixup_map_hash_48b = { 3 },
  6263. .result = ACCEPT,
  6264. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6265. },
  6266. {
  6267. "helper access to map: bounds check using <=, bad access",
  6268. .insns = {
  6269. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6270. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6271. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6272. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6273. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6274. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  6275. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6276. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6277. BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
  6278. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6279. BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
  6280. BPF_MOV64_IMM(BPF_REG_0, 0),
  6281. BPF_EXIT_INSN(),
  6282. BPF_MOV64_IMM(BPF_REG_0, 0),
  6283. BPF_EXIT_INSN(),
  6284. },
  6285. .fixup_map_hash_48b = { 3 },
  6286. .result = REJECT,
  6287. .errstr = "R1 unbounded memory access",
  6288. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6289. },
  6290. {
  6291. "helper access to map: bounds check using s<, good access",
  6292. .insns = {
  6293. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6294. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6295. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6296. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6297. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6298. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  6299. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6300. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6301. BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
  6302. BPF_MOV64_IMM(BPF_REG_0, 0),
  6303. BPF_EXIT_INSN(),
  6304. BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
  6305. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6306. BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
  6307. BPF_MOV64_IMM(BPF_REG_0, 0),
  6308. BPF_EXIT_INSN(),
  6309. },
  6310. .fixup_map_hash_48b = { 3 },
  6311. .result = ACCEPT,
  6312. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6313. },
  6314. {
  6315. "helper access to map: bounds check using s<, good access 2",
  6316. .insns = {
  6317. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6318. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6319. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6320. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6321. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6322. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  6323. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6324. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6325. BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
  6326. BPF_MOV64_IMM(BPF_REG_0, 0),
  6327. BPF_EXIT_INSN(),
  6328. BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
  6329. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6330. BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
  6331. BPF_MOV64_IMM(BPF_REG_0, 0),
  6332. BPF_EXIT_INSN(),
  6333. },
  6334. .fixup_map_hash_48b = { 3 },
  6335. .result = ACCEPT,
  6336. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6337. },
  6338. {
  6339. "helper access to map: bounds check using s<, bad access",
  6340. .insns = {
  6341. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6342. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6343. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6344. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6345. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6346. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  6347. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6348. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
  6349. BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
  6350. BPF_MOV64_IMM(BPF_REG_0, 0),
  6351. BPF_EXIT_INSN(),
  6352. BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
  6353. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6354. BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
  6355. BPF_MOV64_IMM(BPF_REG_0, 0),
  6356. BPF_EXIT_INSN(),
  6357. },
  6358. .fixup_map_hash_48b = { 3 },
  6359. .result = REJECT,
  6360. .errstr = "R1 min value is negative",
  6361. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6362. },
  6363. {
  6364. "helper access to map: bounds check using s<=, good access",
  6365. .insns = {
  6366. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6367. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6368. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6369. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6370. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6371. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  6372. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6373. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6374. BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
  6375. BPF_MOV64_IMM(BPF_REG_0, 0),
  6376. BPF_EXIT_INSN(),
  6377. BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
  6378. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6379. BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
  6380. BPF_MOV64_IMM(BPF_REG_0, 0),
  6381. BPF_EXIT_INSN(),
  6382. },
  6383. .fixup_map_hash_48b = { 3 },
  6384. .result = ACCEPT,
  6385. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6386. },
  6387. {
  6388. "helper access to map: bounds check using s<=, good access 2",
  6389. .insns = {
  6390. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6391. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6392. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6393. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6394. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6395. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  6396. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6397. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6398. BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
  6399. BPF_MOV64_IMM(BPF_REG_0, 0),
  6400. BPF_EXIT_INSN(),
  6401. BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
  6402. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6403. BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
  6404. BPF_MOV64_IMM(BPF_REG_0, 0),
  6405. BPF_EXIT_INSN(),
  6406. },
  6407. .fixup_map_hash_48b = { 3 },
  6408. .result = ACCEPT,
  6409. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6410. },
  6411. {
  6412. "helper access to map: bounds check using s<=, bad access",
  6413. .insns = {
  6414. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6415. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6416. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6417. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6418. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6419. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  6420. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  6421. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
  6422. BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
  6423. BPF_MOV64_IMM(BPF_REG_0, 0),
  6424. BPF_EXIT_INSN(),
  6425. BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
  6426. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  6427. BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
  6428. BPF_MOV64_IMM(BPF_REG_0, 0),
  6429. BPF_EXIT_INSN(),
  6430. },
  6431. .fixup_map_hash_48b = { 3 },
  6432. .result = REJECT,
  6433. .errstr = "R1 min value is negative",
  6434. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6435. },
  6436. {
  6437. "map access: known scalar += value_ptr",
  6438. .insns = {
  6439. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  6440. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6441. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6442. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6443. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  6444. BPF_FUNC_map_lookup_elem),
  6445. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  6446. BPF_MOV64_IMM(BPF_REG_1, 4),
  6447. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
  6448. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
  6449. BPF_MOV64_IMM(BPF_REG_0, 1),
  6450. BPF_EXIT_INSN(),
  6451. },
  6452. .fixup_map_array_48b = { 3 },
  6453. .result = ACCEPT,
  6454. .retval = 1,
  6455. },
  6456. {
  6457. "map access: value_ptr += known scalar",
  6458. .insns = {
  6459. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  6460. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6461. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6462. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6463. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  6464. BPF_FUNC_map_lookup_elem),
  6465. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  6466. BPF_MOV64_IMM(BPF_REG_1, 4),
  6467. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  6468. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  6469. BPF_MOV64_IMM(BPF_REG_0, 1),
  6470. BPF_EXIT_INSN(),
  6471. },
  6472. .fixup_map_array_48b = { 3 },
  6473. .result = ACCEPT,
  6474. .retval = 1,
  6475. },
  6476. {
  6477. "map access: unknown scalar += value_ptr",
  6478. .insns = {
  6479. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  6480. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6481. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6482. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6483. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  6484. BPF_FUNC_map_lookup_elem),
  6485. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  6486. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  6487. BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
  6488. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
  6489. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
  6490. BPF_MOV64_IMM(BPF_REG_0, 1),
  6491. BPF_EXIT_INSN(),
  6492. },
  6493. .fixup_map_array_48b = { 3 },
  6494. .result = ACCEPT,
  6495. .retval = 1,
  6496. },
  6497. {
  6498. "map access: value_ptr += unknown scalar",
  6499. .insns = {
  6500. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  6501. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6502. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6503. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6504. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  6505. BPF_FUNC_map_lookup_elem),
  6506. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  6507. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  6508. BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
  6509. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  6510. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  6511. BPF_MOV64_IMM(BPF_REG_0, 1),
  6512. BPF_EXIT_INSN(),
  6513. },
  6514. .fixup_map_array_48b = { 3 },
  6515. .result = ACCEPT,
  6516. .retval = 1,
  6517. },
  6518. {
  6519. "map access: value_ptr += value_ptr",
  6520. .insns = {
  6521. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  6522. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6523. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6524. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6525. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  6526. BPF_FUNC_map_lookup_elem),
  6527. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  6528. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
  6529. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  6530. BPF_MOV64_IMM(BPF_REG_0, 1),
  6531. BPF_EXIT_INSN(),
  6532. },
  6533. .fixup_map_array_48b = { 3 },
  6534. .result = REJECT,
  6535. .errstr = "R0 pointer += pointer prohibited",
  6536. },
  6537. {
  6538. "map access: known scalar -= value_ptr",
  6539. .insns = {
  6540. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  6541. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6542. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6543. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6544. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  6545. BPF_FUNC_map_lookup_elem),
  6546. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  6547. BPF_MOV64_IMM(BPF_REG_1, 4),
  6548. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
  6549. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
  6550. BPF_MOV64_IMM(BPF_REG_0, 1),
  6551. BPF_EXIT_INSN(),
  6552. },
  6553. .fixup_map_array_48b = { 3 },
  6554. .result = REJECT,
  6555. .errstr = "R1 tried to subtract pointer from scalar",
  6556. },
  6557. {
  6558. "map access: value_ptr -= known scalar",
  6559. .insns = {
  6560. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  6561. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6562. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6563. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6564. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  6565. BPF_FUNC_map_lookup_elem),
  6566. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  6567. BPF_MOV64_IMM(BPF_REG_1, 4),
  6568. BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
  6569. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  6570. BPF_MOV64_IMM(BPF_REG_0, 1),
  6571. BPF_EXIT_INSN(),
  6572. },
  6573. .fixup_map_array_48b = { 3 },
  6574. .result = REJECT,
  6575. .errstr = "R0 min value is outside of the array range",
  6576. },
  6577. {
  6578. "map access: value_ptr -= known scalar, 2",
  6579. .insns = {
  6580. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  6581. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6582. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6583. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6584. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  6585. BPF_FUNC_map_lookup_elem),
  6586. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  6587. BPF_MOV64_IMM(BPF_REG_1, 6),
  6588. BPF_MOV64_IMM(BPF_REG_2, 4),
  6589. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  6590. BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
  6591. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  6592. BPF_MOV64_IMM(BPF_REG_0, 1),
  6593. BPF_EXIT_INSN(),
  6594. },
  6595. .fixup_map_array_48b = { 3 },
  6596. .result = ACCEPT,
  6597. .retval = 1,
  6598. },
  6599. {
  6600. "map access: unknown scalar -= value_ptr",
  6601. .insns = {
  6602. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  6603. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6604. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6605. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6606. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  6607. BPF_FUNC_map_lookup_elem),
  6608. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  6609. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  6610. BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
  6611. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
  6612. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
  6613. BPF_MOV64_IMM(BPF_REG_0, 1),
  6614. BPF_EXIT_INSN(),
  6615. },
  6616. .fixup_map_array_48b = { 3 },
  6617. .result = REJECT,
  6618. .errstr = "R1 tried to subtract pointer from scalar",
  6619. },
  6620. {
  6621. "map access: value_ptr -= unknown scalar",
  6622. .insns = {
  6623. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  6624. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6625. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6626. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6627. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  6628. BPF_FUNC_map_lookup_elem),
  6629. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  6630. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  6631. BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
  6632. BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
  6633. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  6634. BPF_MOV64_IMM(BPF_REG_0, 1),
  6635. BPF_EXIT_INSN(),
  6636. },
  6637. .fixup_map_array_48b = { 3 },
  6638. .result = REJECT,
  6639. .errstr = "R0 min value is negative",
  6640. },
  6641. {
  6642. "map access: value_ptr -= unknown scalar, 2",
  6643. .insns = {
  6644. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  6645. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6646. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6647. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6648. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  6649. BPF_FUNC_map_lookup_elem),
  6650. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
  6651. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  6652. BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
  6653. BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
  6654. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  6655. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  6656. BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
  6657. BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
  6658. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  6659. BPF_MOV64_IMM(BPF_REG_0, 1),
  6660. BPF_EXIT_INSN(),
  6661. },
  6662. .fixup_map_array_48b = { 3 },
  6663. .result = ACCEPT,
  6664. .retval = 1,
  6665. },
  6666. {
  6667. "map access: value_ptr -= value_ptr",
  6668. .insns = {
  6669. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  6670. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6671. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6672. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6673. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  6674. BPF_FUNC_map_lookup_elem),
  6675. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  6676. BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
  6677. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  6678. BPF_MOV64_IMM(BPF_REG_0, 1),
  6679. BPF_EXIT_INSN(),
  6680. },
  6681. .fixup_map_array_48b = { 3 },
  6682. .result = REJECT,
  6683. .errstr = "R0 invalid mem access 'inv'",
  6684. .errstr_unpriv = "R0 pointer -= pointer prohibited",
  6685. },
  6686. {
  6687. "map lookup helper access to map",
  6688. .insns = {
  6689. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6690. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6691. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6692. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6693. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6694. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  6695. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  6696. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6697. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6698. BPF_EXIT_INSN(),
  6699. },
  6700. .fixup_map_hash_16b = { 3, 8 },
  6701. .result = ACCEPT,
  6702. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6703. },
  6704. {
  6705. "map update helper access to map",
  6706. .insns = {
  6707. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6708. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6709. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6710. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6711. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6712. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  6713. BPF_MOV64_IMM(BPF_REG_4, 0),
  6714. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  6715. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  6716. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6717. BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
  6718. BPF_EXIT_INSN(),
  6719. },
  6720. .fixup_map_hash_16b = { 3, 10 },
  6721. .result = ACCEPT,
  6722. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6723. },
  6724. {
  6725. "map update helper access to map: wrong size",
  6726. .insns = {
  6727. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6728. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6729. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6730. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6731. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6732. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  6733. BPF_MOV64_IMM(BPF_REG_4, 0),
  6734. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  6735. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  6736. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6737. BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
  6738. BPF_EXIT_INSN(),
  6739. },
  6740. .fixup_map_hash_8b = { 3 },
  6741. .fixup_map_hash_16b = { 10 },
  6742. .result = REJECT,
  6743. .errstr = "invalid access to map value, value_size=8 off=0 size=16",
  6744. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6745. },
  6746. {
  6747. "map helper access to adjusted map (via const imm)",
  6748. .insns = {
  6749. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6750. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6751. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6752. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6753. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6754. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  6755. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  6756. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
  6757. offsetof(struct other_val, bar)),
  6758. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6759. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6760. BPF_EXIT_INSN(),
  6761. },
  6762. .fixup_map_hash_16b = { 3, 9 },
  6763. .result = ACCEPT,
  6764. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6765. },
  6766. {
  6767. "map helper access to adjusted map (via const imm): out-of-bound 1",
  6768. .insns = {
  6769. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6770. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6771. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6772. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6773. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6774. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  6775. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  6776. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
  6777. sizeof(struct other_val) - 4),
  6778. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6779. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6780. BPF_EXIT_INSN(),
  6781. },
  6782. .fixup_map_hash_16b = { 3, 9 },
  6783. .result = REJECT,
  6784. .errstr = "invalid access to map value, value_size=16 off=12 size=8",
  6785. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6786. },
  6787. {
  6788. "map helper access to adjusted map (via const imm): out-of-bound 2",
  6789. .insns = {
  6790. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6791. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6792. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6793. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6794. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6795. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  6796. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  6797. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
  6798. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6799. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6800. BPF_EXIT_INSN(),
  6801. },
  6802. .fixup_map_hash_16b = { 3, 9 },
  6803. .result = REJECT,
  6804. .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
  6805. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6806. },
  6807. {
  6808. "map helper access to adjusted map (via const reg)",
  6809. .insns = {
  6810. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6811. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6812. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6813. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6814. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6815. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  6816. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  6817. BPF_MOV64_IMM(BPF_REG_3,
  6818. offsetof(struct other_val, bar)),
  6819. BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
  6820. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6821. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6822. BPF_EXIT_INSN(),
  6823. },
  6824. .fixup_map_hash_16b = { 3, 10 },
  6825. .result = ACCEPT,
  6826. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6827. },
  6828. {
  6829. "map helper access to adjusted map (via const reg): out-of-bound 1",
  6830. .insns = {
  6831. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6832. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6833. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6834. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6835. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6836. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  6837. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  6838. BPF_MOV64_IMM(BPF_REG_3,
  6839. sizeof(struct other_val) - 4),
  6840. BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
  6841. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6842. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6843. BPF_EXIT_INSN(),
  6844. },
  6845. .fixup_map_hash_16b = { 3, 10 },
  6846. .result = REJECT,
  6847. .errstr = "invalid access to map value, value_size=16 off=12 size=8",
  6848. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6849. },
  6850. {
  6851. "map helper access to adjusted map (via const reg): out-of-bound 2",
  6852. .insns = {
  6853. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6854. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6855. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6856. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6857. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6858. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  6859. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  6860. BPF_MOV64_IMM(BPF_REG_3, -4),
  6861. BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
  6862. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6863. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6864. BPF_EXIT_INSN(),
  6865. },
  6866. .fixup_map_hash_16b = { 3, 10 },
  6867. .result = REJECT,
  6868. .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
  6869. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6870. },
  6871. {
  6872. "map helper access to adjusted map (via variable)",
  6873. .insns = {
  6874. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6875. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6876. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6877. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6878. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6879. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  6880. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  6881. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6882. BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
  6883. offsetof(struct other_val, bar), 4),
  6884. BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
  6885. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6886. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6887. BPF_EXIT_INSN(),
  6888. },
  6889. .fixup_map_hash_16b = { 3, 11 },
  6890. .result = ACCEPT,
  6891. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6892. },
  6893. {
  6894. "map helper access to adjusted map (via variable): no max check",
  6895. .insns = {
  6896. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6897. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6898. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6899. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6900. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6901. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  6902. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  6903. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6904. BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
  6905. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6906. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6907. BPF_EXIT_INSN(),
  6908. },
  6909. .fixup_map_hash_16b = { 3, 10 },
  6910. .result = REJECT,
  6911. .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
  6912. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6913. },
  6914. {
  6915. "map helper access to adjusted map (via variable): wrong max check",
  6916. .insns = {
  6917. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6918. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6919. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6920. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6921. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6922. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  6923. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  6924. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  6925. BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
  6926. offsetof(struct other_val, bar) + 1, 4),
  6927. BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
  6928. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6929. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6930. BPF_EXIT_INSN(),
  6931. },
  6932. .fixup_map_hash_16b = { 3, 11 },
  6933. .result = REJECT,
  6934. .errstr = "invalid access to map value, value_size=16 off=9 size=8",
  6935. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  6936. },
  6937. {
  6938. "map element value is preserved across register spilling",
  6939. .insns = {
  6940. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6941. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6942. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6943. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6944. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6945. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  6946. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
  6947. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  6948. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
  6949. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  6950. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
  6951. BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
  6952. BPF_EXIT_INSN(),
  6953. },
  6954. .fixup_map_hash_48b = { 3 },
  6955. .errstr_unpriv = "R0 leaks addr",
  6956. .result = ACCEPT,
  6957. .result_unpriv = REJECT,
  6958. },
  6959. {
  6960. "map element value or null is marked on register spilling",
  6961. .insns = {
  6962. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6963. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6964. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6965. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6966. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6967. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  6968. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
  6969. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  6970. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  6971. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
  6972. BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
  6973. BPF_EXIT_INSN(),
  6974. },
  6975. .fixup_map_hash_48b = { 3 },
  6976. .errstr_unpriv = "R0 leaks addr",
  6977. .result = ACCEPT,
  6978. .result_unpriv = REJECT,
  6979. },
  6980. {
  6981. "map element value store of cleared call register",
  6982. .insns = {
  6983. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  6984. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  6985. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  6986. BPF_LD_MAP_FD(BPF_REG_1, 0),
  6987. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  6988. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  6989. BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
  6990. BPF_EXIT_INSN(),
  6991. },
  6992. .fixup_map_hash_48b = { 3 },
  6993. .errstr_unpriv = "R1 !read_ok",
  6994. .errstr = "R1 !read_ok",
  6995. .result = REJECT,
  6996. .result_unpriv = REJECT,
  6997. },
  6998. {
  6999. "map element value with unaligned store",
  7000. .insns = {
  7001. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7002. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7003. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  7004. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7005. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  7006. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
  7007. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
  7008. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
  7009. BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
  7010. BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
  7011. BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
  7012. BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
  7013. BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
  7014. BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
  7015. BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
  7016. BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
  7017. BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
  7018. BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
  7019. BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
  7020. BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
  7021. BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
  7022. BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
  7023. BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
  7024. BPF_EXIT_INSN(),
  7025. },
  7026. .fixup_map_hash_48b = { 3 },
  7027. .errstr_unpriv = "R0 leaks addr",
  7028. .result = ACCEPT,
  7029. .result_unpriv = REJECT,
  7030. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  7031. },
  7032. {
  7033. "map element value with unaligned load",
  7034. .insns = {
  7035. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7036. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7037. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  7038. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7039. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  7040. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
  7041. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  7042. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
  7043. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
  7044. BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  7045. BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
  7046. BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
  7047. BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
  7048. BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
  7049. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
  7050. BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  7051. BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
  7052. BPF_EXIT_INSN(),
  7053. },
  7054. .fixup_map_hash_48b = { 3 },
  7055. .errstr_unpriv = "R0 leaks addr",
  7056. .result = ACCEPT,
  7057. .result_unpriv = REJECT,
  7058. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  7059. },
  7060. {
  7061. "map element value illegal alu op, 1",
  7062. .insns = {
  7063. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7064. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7065. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  7066. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7067. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  7068. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  7069. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
  7070. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
  7071. BPF_EXIT_INSN(),
  7072. },
  7073. .fixup_map_hash_48b = { 3 },
  7074. .errstr = "R0 bitwise operator &= on pointer",
  7075. .result = REJECT,
  7076. },
  7077. {
  7078. "map element value illegal alu op, 2",
  7079. .insns = {
  7080. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7081. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7082. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  7083. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7084. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  7085. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  7086. BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
  7087. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
  7088. BPF_EXIT_INSN(),
  7089. },
  7090. .fixup_map_hash_48b = { 3 },
  7091. .errstr = "R0 32-bit pointer arithmetic prohibited",
  7092. .result = REJECT,
  7093. },
  7094. {
  7095. "map element value illegal alu op, 3",
  7096. .insns = {
  7097. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7098. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7099. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  7100. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7101. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  7102. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  7103. BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
  7104. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
  7105. BPF_EXIT_INSN(),
  7106. },
  7107. .fixup_map_hash_48b = { 3 },
  7108. .errstr = "R0 pointer arithmetic with /= operator",
  7109. .result = REJECT,
  7110. },
  7111. {
  7112. "map element value illegal alu op, 4",
  7113. .insns = {
  7114. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7115. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7116. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  7117. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7118. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  7119. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  7120. BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
  7121. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
  7122. BPF_EXIT_INSN(),
  7123. },
  7124. .fixup_map_hash_48b = { 3 },
  7125. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  7126. .errstr = "invalid mem access 'inv'",
  7127. .result = REJECT,
  7128. .result_unpriv = REJECT,
  7129. },
  7130. {
  7131. "map element value illegal alu op, 5",
  7132. .insns = {
  7133. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7134. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7135. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  7136. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7137. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  7138. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  7139. BPF_MOV64_IMM(BPF_REG_3, 4096),
  7140. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7141. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7142. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
  7143. BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
  7144. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
  7145. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
  7146. BPF_EXIT_INSN(),
  7147. },
  7148. .fixup_map_hash_48b = { 3 },
  7149. .errstr = "R0 invalid mem access 'inv'",
  7150. .result = REJECT,
  7151. },
  7152. {
  7153. "map element value is preserved across register spilling",
  7154. .insns = {
  7155. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7156. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7157. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  7158. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7159. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  7160. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  7161. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
  7162. offsetof(struct test_val, foo)),
  7163. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
  7164. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7165. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
  7166. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  7167. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
  7168. BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
  7169. BPF_EXIT_INSN(),
  7170. },
  7171. .fixup_map_hash_48b = { 3 },
  7172. .errstr_unpriv = "R0 leaks addr",
  7173. .result = ACCEPT,
  7174. .result_unpriv = REJECT,
  7175. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  7176. },
  7177. {
  7178. "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
  7179. .insns = {
  7180. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7181. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  7182. BPF_MOV64_IMM(BPF_REG_0, 0),
  7183. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
  7184. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
  7185. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
  7186. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
  7187. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
  7188. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
  7189. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
  7190. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  7191. BPF_MOV64_IMM(BPF_REG_2, 16),
  7192. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  7193. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  7194. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
  7195. BPF_MOV64_IMM(BPF_REG_4, 0),
  7196. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  7197. BPF_MOV64_IMM(BPF_REG_3, 0),
  7198. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7199. BPF_MOV64_IMM(BPF_REG_0, 0),
  7200. BPF_EXIT_INSN(),
  7201. },
  7202. .result = ACCEPT,
  7203. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7204. },
  7205. {
  7206. "helper access to variable memory: stack, bitwise AND, zero included",
  7207. .insns = {
  7208. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7209. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  7210. BPF_MOV64_IMM(BPF_REG_2, 16),
  7211. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  7212. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  7213. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
  7214. BPF_MOV64_IMM(BPF_REG_3, 0),
  7215. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7216. BPF_EXIT_INSN(),
  7217. },
  7218. .errstr = "invalid indirect read from stack off -64+0 size 64",
  7219. .result = REJECT,
  7220. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7221. },
  7222. {
  7223. "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
  7224. .insns = {
  7225. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7226. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  7227. BPF_MOV64_IMM(BPF_REG_2, 16),
  7228. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  7229. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  7230. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
  7231. BPF_MOV64_IMM(BPF_REG_4, 0),
  7232. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  7233. BPF_MOV64_IMM(BPF_REG_3, 0),
  7234. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7235. BPF_MOV64_IMM(BPF_REG_0, 0),
  7236. BPF_EXIT_INSN(),
  7237. },
  7238. .errstr = "invalid stack type R1 off=-64 access_size=65",
  7239. .result = REJECT,
  7240. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7241. },
  7242. {
  7243. "helper access to variable memory: stack, JMP, correct bounds",
  7244. .insns = {
  7245. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7246. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  7247. BPF_MOV64_IMM(BPF_REG_0, 0),
  7248. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
  7249. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
  7250. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
  7251. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
  7252. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
  7253. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
  7254. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
  7255. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  7256. BPF_MOV64_IMM(BPF_REG_2, 16),
  7257. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  7258. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  7259. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
  7260. BPF_MOV64_IMM(BPF_REG_4, 0),
  7261. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  7262. BPF_MOV64_IMM(BPF_REG_3, 0),
  7263. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7264. BPF_MOV64_IMM(BPF_REG_0, 0),
  7265. BPF_EXIT_INSN(),
  7266. },
  7267. .result = ACCEPT,
  7268. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7269. },
  7270. {
  7271. "helper access to variable memory: stack, JMP (signed), correct bounds",
  7272. .insns = {
  7273. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7274. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  7275. BPF_MOV64_IMM(BPF_REG_0, 0),
  7276. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
  7277. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
  7278. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
  7279. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
  7280. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
  7281. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
  7282. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
  7283. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  7284. BPF_MOV64_IMM(BPF_REG_2, 16),
  7285. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  7286. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  7287. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
  7288. BPF_MOV64_IMM(BPF_REG_4, 0),
  7289. BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
  7290. BPF_MOV64_IMM(BPF_REG_3, 0),
  7291. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7292. BPF_MOV64_IMM(BPF_REG_0, 0),
  7293. BPF_EXIT_INSN(),
  7294. },
  7295. .result = ACCEPT,
  7296. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7297. },
  7298. {
  7299. "helper access to variable memory: stack, JMP, bounds + offset",
  7300. .insns = {
  7301. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7302. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  7303. BPF_MOV64_IMM(BPF_REG_2, 16),
  7304. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  7305. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  7306. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
  7307. BPF_MOV64_IMM(BPF_REG_4, 0),
  7308. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
  7309. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  7310. BPF_MOV64_IMM(BPF_REG_3, 0),
  7311. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7312. BPF_MOV64_IMM(BPF_REG_0, 0),
  7313. BPF_EXIT_INSN(),
  7314. },
  7315. .errstr = "invalid stack type R1 off=-64 access_size=65",
  7316. .result = REJECT,
  7317. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7318. },
  7319. {
  7320. "helper access to variable memory: stack, JMP, wrong max",
  7321. .insns = {
  7322. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7323. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  7324. BPF_MOV64_IMM(BPF_REG_2, 16),
  7325. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  7326. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  7327. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
  7328. BPF_MOV64_IMM(BPF_REG_4, 0),
  7329. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  7330. BPF_MOV64_IMM(BPF_REG_3, 0),
  7331. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7332. BPF_MOV64_IMM(BPF_REG_0, 0),
  7333. BPF_EXIT_INSN(),
  7334. },
  7335. .errstr = "invalid stack type R1 off=-64 access_size=65",
  7336. .result = REJECT,
  7337. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7338. },
  7339. {
  7340. "helper access to variable memory: stack, JMP, no max check",
  7341. .insns = {
  7342. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7343. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  7344. BPF_MOV64_IMM(BPF_REG_2, 16),
  7345. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  7346. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  7347. BPF_MOV64_IMM(BPF_REG_4, 0),
  7348. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  7349. BPF_MOV64_IMM(BPF_REG_3, 0),
  7350. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7351. BPF_MOV64_IMM(BPF_REG_0, 0),
  7352. BPF_EXIT_INSN(),
  7353. },
  7354. /* because max wasn't checked, signed min is negative */
  7355. .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
  7356. .result = REJECT,
  7357. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7358. },
  7359. {
  7360. "helper access to variable memory: stack, JMP, no min check",
  7361. .insns = {
  7362. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7363. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  7364. BPF_MOV64_IMM(BPF_REG_2, 16),
  7365. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  7366. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  7367. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
  7368. BPF_MOV64_IMM(BPF_REG_3, 0),
  7369. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7370. BPF_MOV64_IMM(BPF_REG_0, 0),
  7371. BPF_EXIT_INSN(),
  7372. },
  7373. .errstr = "invalid indirect read from stack off -64+0 size 64",
  7374. .result = REJECT,
  7375. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7376. },
  7377. {
  7378. "helper access to variable memory: stack, JMP (signed), no min check",
  7379. .insns = {
  7380. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7381. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  7382. BPF_MOV64_IMM(BPF_REG_2, 16),
  7383. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  7384. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  7385. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
  7386. BPF_MOV64_IMM(BPF_REG_3, 0),
  7387. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7388. BPF_MOV64_IMM(BPF_REG_0, 0),
  7389. BPF_EXIT_INSN(),
  7390. },
  7391. .errstr = "R2 min value is negative",
  7392. .result = REJECT,
  7393. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7394. },
  7395. {
  7396. "helper access to variable memory: map, JMP, correct bounds",
  7397. .insns = {
  7398. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7399. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7400. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  7401. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7402. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  7403. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
  7404. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  7405. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
  7406. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  7407. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  7408. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
  7409. sizeof(struct test_val), 4),
  7410. BPF_MOV64_IMM(BPF_REG_4, 0),
  7411. BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
  7412. BPF_MOV64_IMM(BPF_REG_3, 0),
  7413. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7414. BPF_MOV64_IMM(BPF_REG_0, 0),
  7415. BPF_EXIT_INSN(),
  7416. },
  7417. .fixup_map_hash_48b = { 3 },
  7418. .result = ACCEPT,
  7419. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7420. },
  7421. {
  7422. "helper access to variable memory: map, JMP, wrong max",
  7423. .insns = {
  7424. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7425. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7426. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  7427. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7428. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  7429. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
  7430. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  7431. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
  7432. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  7433. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  7434. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
  7435. sizeof(struct test_val) + 1, 4),
  7436. BPF_MOV64_IMM(BPF_REG_4, 0),
  7437. BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
  7438. BPF_MOV64_IMM(BPF_REG_3, 0),
  7439. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7440. BPF_MOV64_IMM(BPF_REG_0, 0),
  7441. BPF_EXIT_INSN(),
  7442. },
  7443. .fixup_map_hash_48b = { 3 },
  7444. .errstr = "invalid access to map value, value_size=48 off=0 size=49",
  7445. .result = REJECT,
  7446. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7447. },
  7448. {
  7449. "helper access to variable memory: map adjusted, JMP, correct bounds",
  7450. .insns = {
  7451. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7452. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7453. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  7454. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7455. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  7456. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
  7457. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  7458. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
  7459. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
  7460. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  7461. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  7462. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
  7463. sizeof(struct test_val) - 20, 4),
  7464. BPF_MOV64_IMM(BPF_REG_4, 0),
  7465. BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
  7466. BPF_MOV64_IMM(BPF_REG_3, 0),
  7467. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7468. BPF_MOV64_IMM(BPF_REG_0, 0),
  7469. BPF_EXIT_INSN(),
  7470. },
  7471. .fixup_map_hash_48b = { 3 },
  7472. .result = ACCEPT,
  7473. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7474. },
  7475. {
  7476. "helper access to variable memory: map adjusted, JMP, wrong max",
  7477. .insns = {
  7478. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7479. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7480. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  7481. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7482. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  7483. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
  7484. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  7485. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
  7486. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
  7487. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  7488. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  7489. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
  7490. sizeof(struct test_val) - 19, 4),
  7491. BPF_MOV64_IMM(BPF_REG_4, 0),
  7492. BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
  7493. BPF_MOV64_IMM(BPF_REG_3, 0),
  7494. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7495. BPF_MOV64_IMM(BPF_REG_0, 0),
  7496. BPF_EXIT_INSN(),
  7497. },
  7498. .fixup_map_hash_48b = { 3 },
  7499. .errstr = "R1 min value is outside of the array range",
  7500. .result = REJECT,
  7501. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7502. },
  7503. {
  7504. "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
  7505. .insns = {
  7506. BPF_MOV64_IMM(BPF_REG_1, 0),
  7507. BPF_MOV64_IMM(BPF_REG_2, 0),
  7508. BPF_MOV64_IMM(BPF_REG_3, 0),
  7509. BPF_MOV64_IMM(BPF_REG_4, 0),
  7510. BPF_MOV64_IMM(BPF_REG_5, 0),
  7511. BPF_EMIT_CALL(BPF_FUNC_csum_diff),
  7512. BPF_EXIT_INSN(),
  7513. },
  7514. .result = ACCEPT,
  7515. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  7516. },
  7517. {
  7518. "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
  7519. .insns = {
  7520. BPF_MOV64_IMM(BPF_REG_1, 0),
  7521. BPF_MOV64_IMM(BPF_REG_2, 1),
  7522. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  7523. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  7524. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
  7525. BPF_MOV64_IMM(BPF_REG_3, 0),
  7526. BPF_MOV64_IMM(BPF_REG_4, 0),
  7527. BPF_MOV64_IMM(BPF_REG_5, 0),
  7528. BPF_EMIT_CALL(BPF_FUNC_csum_diff),
  7529. BPF_EXIT_INSN(),
  7530. },
  7531. .errstr = "R1 type=inv expected=fp",
  7532. .result = REJECT,
  7533. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  7534. },
  7535. {
  7536. "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
  7537. .insns = {
  7538. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7539. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  7540. BPF_MOV64_IMM(BPF_REG_2, 0),
  7541. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
  7542. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
  7543. BPF_MOV64_IMM(BPF_REG_3, 0),
  7544. BPF_MOV64_IMM(BPF_REG_4, 0),
  7545. BPF_MOV64_IMM(BPF_REG_5, 0),
  7546. BPF_EMIT_CALL(BPF_FUNC_csum_diff),
  7547. BPF_EXIT_INSN(),
  7548. },
  7549. .result = ACCEPT,
  7550. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  7551. },
  7552. {
  7553. "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
  7554. .insns = {
  7555. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  7556. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7557. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7558. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7559. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  7560. BPF_FUNC_map_lookup_elem),
  7561. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  7562. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  7563. BPF_MOV64_IMM(BPF_REG_2, 0),
  7564. BPF_MOV64_IMM(BPF_REG_3, 0),
  7565. BPF_MOV64_IMM(BPF_REG_4, 0),
  7566. BPF_MOV64_IMM(BPF_REG_5, 0),
  7567. BPF_EMIT_CALL(BPF_FUNC_csum_diff),
  7568. BPF_EXIT_INSN(),
  7569. },
  7570. .fixup_map_hash_8b = { 3 },
  7571. .result = ACCEPT,
  7572. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  7573. },
  7574. {
  7575. "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
  7576. .insns = {
  7577. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  7578. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7579. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7580. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7581. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  7582. BPF_FUNC_map_lookup_elem),
  7583. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  7584. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
  7585. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
  7586. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7587. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  7588. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
  7589. BPF_MOV64_IMM(BPF_REG_3, 0),
  7590. BPF_MOV64_IMM(BPF_REG_4, 0),
  7591. BPF_MOV64_IMM(BPF_REG_5, 0),
  7592. BPF_EMIT_CALL(BPF_FUNC_csum_diff),
  7593. BPF_EXIT_INSN(),
  7594. },
  7595. .fixup_map_hash_8b = { 3 },
  7596. .result = ACCEPT,
  7597. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  7598. },
  7599. {
  7600. "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
  7601. .insns = {
  7602. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  7603. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7604. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7605. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7606. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  7607. BPF_FUNC_map_lookup_elem),
  7608. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  7609. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  7610. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
  7611. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
  7612. BPF_MOV64_IMM(BPF_REG_3, 0),
  7613. BPF_MOV64_IMM(BPF_REG_4, 0),
  7614. BPF_MOV64_IMM(BPF_REG_5, 0),
  7615. BPF_EMIT_CALL(BPF_FUNC_csum_diff),
  7616. BPF_EXIT_INSN(),
  7617. },
  7618. .fixup_map_hash_8b = { 3 },
  7619. .result = ACCEPT,
  7620. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  7621. },
  7622. {
  7623. "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
  7624. .insns = {
  7625. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  7626. offsetof(struct __sk_buff, data)),
  7627. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  7628. offsetof(struct __sk_buff, data_end)),
  7629. BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
  7630. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  7631. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
  7632. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  7633. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
  7634. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
  7635. BPF_MOV64_IMM(BPF_REG_3, 0),
  7636. BPF_MOV64_IMM(BPF_REG_4, 0),
  7637. BPF_MOV64_IMM(BPF_REG_5, 0),
  7638. BPF_EMIT_CALL(BPF_FUNC_csum_diff),
  7639. BPF_EXIT_INSN(),
  7640. },
  7641. .result = ACCEPT,
  7642. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  7643. .retval = 0 /* csum_diff of 64-byte packet */,
  7644. },
  7645. {
  7646. "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
  7647. .insns = {
  7648. BPF_MOV64_IMM(BPF_REG_1, 0),
  7649. BPF_MOV64_IMM(BPF_REG_2, 0),
  7650. BPF_MOV64_IMM(BPF_REG_3, 0),
  7651. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7652. BPF_EXIT_INSN(),
  7653. },
  7654. .errstr = "R1 type=inv expected=fp",
  7655. .result = REJECT,
  7656. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7657. },
  7658. {
  7659. "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
  7660. .insns = {
  7661. BPF_MOV64_IMM(BPF_REG_1, 0),
  7662. BPF_MOV64_IMM(BPF_REG_2, 1),
  7663. BPF_MOV64_IMM(BPF_REG_3, 0),
  7664. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7665. BPF_EXIT_INSN(),
  7666. },
  7667. .errstr = "R1 type=inv expected=fp",
  7668. .result = REJECT,
  7669. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7670. },
  7671. {
  7672. "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
  7673. .insns = {
  7674. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7675. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  7676. BPF_MOV64_IMM(BPF_REG_2, 0),
  7677. BPF_MOV64_IMM(BPF_REG_3, 0),
  7678. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7679. BPF_EXIT_INSN(),
  7680. },
  7681. .result = ACCEPT,
  7682. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7683. },
  7684. {
  7685. "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
  7686. .insns = {
  7687. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  7688. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7689. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7690. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7691. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  7692. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  7693. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  7694. BPF_MOV64_IMM(BPF_REG_2, 0),
  7695. BPF_MOV64_IMM(BPF_REG_3, 0),
  7696. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7697. BPF_EXIT_INSN(),
  7698. },
  7699. .fixup_map_hash_8b = { 3 },
  7700. .result = ACCEPT,
  7701. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7702. },
  7703. {
  7704. "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
  7705. .insns = {
  7706. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  7707. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7708. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7709. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7710. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  7711. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  7712. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
  7713. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
  7714. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7715. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  7716. BPF_MOV64_IMM(BPF_REG_3, 0),
  7717. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7718. BPF_EXIT_INSN(),
  7719. },
  7720. .fixup_map_hash_8b = { 3 },
  7721. .result = ACCEPT,
  7722. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7723. },
  7724. {
  7725. "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
  7726. .insns = {
  7727. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  7728. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7729. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7730. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7731. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  7732. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  7733. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  7734. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
  7735. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
  7736. BPF_MOV64_IMM(BPF_REG_3, 0),
  7737. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7738. BPF_EXIT_INSN(),
  7739. },
  7740. .fixup_map_hash_8b = { 3 },
  7741. .result = ACCEPT,
  7742. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7743. },
  7744. {
  7745. "helper access to variable memory: 8 bytes leak",
  7746. .insns = {
  7747. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7748. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  7749. BPF_MOV64_IMM(BPF_REG_0, 0),
  7750. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
  7751. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
  7752. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
  7753. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
  7754. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
  7755. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
  7756. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  7757. BPF_MOV64_IMM(BPF_REG_2, 1),
  7758. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  7759. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  7760. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
  7761. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  7762. BPF_MOV64_IMM(BPF_REG_3, 0),
  7763. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7764. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  7765. BPF_EXIT_INSN(),
  7766. },
  7767. .errstr = "invalid indirect read from stack off -64+32 size 64",
  7768. .result = REJECT,
  7769. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7770. },
  7771. {
  7772. "helper access to variable memory: 8 bytes no leak (init memory)",
  7773. .insns = {
  7774. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  7775. BPF_MOV64_IMM(BPF_REG_0, 0),
  7776. BPF_MOV64_IMM(BPF_REG_0, 0),
  7777. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
  7778. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
  7779. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
  7780. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
  7781. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
  7782. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
  7783. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
  7784. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  7785. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  7786. BPF_MOV64_IMM(BPF_REG_2, 0),
  7787. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
  7788. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
  7789. BPF_MOV64_IMM(BPF_REG_3, 0),
  7790. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  7791. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  7792. BPF_EXIT_INSN(),
  7793. },
  7794. .result = ACCEPT,
  7795. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  7796. },
  7797. {
  7798. "invalid and of negative number",
  7799. .insns = {
  7800. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  7801. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7802. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7803. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7804. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  7805. BPF_FUNC_map_lookup_elem),
  7806. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  7807. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  7808. BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
  7809. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  7810. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  7811. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  7812. offsetof(struct test_val, foo)),
  7813. BPF_EXIT_INSN(),
  7814. },
  7815. .fixup_map_hash_48b = { 3 },
  7816. .errstr = "R0 max value is outside of the array range",
  7817. .result = REJECT,
  7818. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  7819. },
  7820. {
  7821. "invalid range check",
  7822. .insns = {
  7823. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  7824. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7825. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  7826. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7827. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  7828. BPF_FUNC_map_lookup_elem),
  7829. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
  7830. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  7831. BPF_MOV64_IMM(BPF_REG_9, 1),
  7832. BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
  7833. BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
  7834. BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
  7835. BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
  7836. BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
  7837. BPF_MOV32_IMM(BPF_REG_3, 1),
  7838. BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
  7839. BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
  7840. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
  7841. BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
  7842. BPF_MOV64_REG(BPF_REG_0, 0),
  7843. BPF_EXIT_INSN(),
  7844. },
  7845. .fixup_map_hash_48b = { 3 },
  7846. .errstr = "R0 max value is outside of the array range",
  7847. .result = REJECT,
  7848. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  7849. },
  7850. {
  7851. "map in map access",
  7852. .insns = {
  7853. BPF_ST_MEM(0, BPF_REG_10, -4, 0),
  7854. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7855. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
  7856. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7857. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  7858. BPF_FUNC_map_lookup_elem),
  7859. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  7860. BPF_ST_MEM(0, BPF_REG_10, -4, 0),
  7861. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7862. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
  7863. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  7864. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  7865. BPF_FUNC_map_lookup_elem),
  7866. BPF_MOV64_IMM(BPF_REG_0, 0),
  7867. BPF_EXIT_INSN(),
  7868. },
  7869. .fixup_map_in_map = { 3 },
  7870. .result = ACCEPT,
  7871. },
  7872. {
  7873. "invalid inner map pointer",
  7874. .insns = {
  7875. BPF_ST_MEM(0, BPF_REG_10, -4, 0),
  7876. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7877. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
  7878. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7879. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  7880. BPF_FUNC_map_lookup_elem),
  7881. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  7882. BPF_ST_MEM(0, BPF_REG_10, -4, 0),
  7883. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7884. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
  7885. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  7886. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  7887. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  7888. BPF_FUNC_map_lookup_elem),
  7889. BPF_MOV64_IMM(BPF_REG_0, 0),
  7890. BPF_EXIT_INSN(),
  7891. },
  7892. .fixup_map_in_map = { 3 },
  7893. .errstr = "R1 pointer arithmetic on map_ptr prohibited",
  7894. .result = REJECT,
  7895. },
  7896. {
  7897. "forgot null checking on the inner map pointer",
  7898. .insns = {
  7899. BPF_ST_MEM(0, BPF_REG_10, -4, 0),
  7900. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7901. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
  7902. BPF_LD_MAP_FD(BPF_REG_1, 0),
  7903. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  7904. BPF_FUNC_map_lookup_elem),
  7905. BPF_ST_MEM(0, BPF_REG_10, -4, 0),
  7906. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  7907. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
  7908. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  7909. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  7910. BPF_FUNC_map_lookup_elem),
  7911. BPF_MOV64_IMM(BPF_REG_0, 0),
  7912. BPF_EXIT_INSN(),
  7913. },
  7914. .fixup_map_in_map = { 3 },
  7915. .errstr = "R1 type=map_value_or_null expected=map_ptr",
  7916. .result = REJECT,
  7917. },
  7918. {
  7919. "ld_abs: check calling conv, r1",
  7920. .insns = {
  7921. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  7922. BPF_MOV64_IMM(BPF_REG_1, 0),
  7923. BPF_LD_ABS(BPF_W, -0x200000),
  7924. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  7925. BPF_EXIT_INSN(),
  7926. },
  7927. .errstr = "R1 !read_ok",
  7928. .result = REJECT,
  7929. },
  7930. {
  7931. "ld_abs: check calling conv, r2",
  7932. .insns = {
  7933. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  7934. BPF_MOV64_IMM(BPF_REG_2, 0),
  7935. BPF_LD_ABS(BPF_W, -0x200000),
  7936. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  7937. BPF_EXIT_INSN(),
  7938. },
  7939. .errstr = "R2 !read_ok",
  7940. .result = REJECT,
  7941. },
  7942. {
  7943. "ld_abs: check calling conv, r3",
  7944. .insns = {
  7945. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  7946. BPF_MOV64_IMM(BPF_REG_3, 0),
  7947. BPF_LD_ABS(BPF_W, -0x200000),
  7948. BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
  7949. BPF_EXIT_INSN(),
  7950. },
  7951. .errstr = "R3 !read_ok",
  7952. .result = REJECT,
  7953. },
  7954. {
  7955. "ld_abs: check calling conv, r4",
  7956. .insns = {
  7957. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  7958. BPF_MOV64_IMM(BPF_REG_4, 0),
  7959. BPF_LD_ABS(BPF_W, -0x200000),
  7960. BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
  7961. BPF_EXIT_INSN(),
  7962. },
  7963. .errstr = "R4 !read_ok",
  7964. .result = REJECT,
  7965. },
  7966. {
  7967. "ld_abs: check calling conv, r5",
  7968. .insns = {
  7969. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  7970. BPF_MOV64_IMM(BPF_REG_5, 0),
  7971. BPF_LD_ABS(BPF_W, -0x200000),
  7972. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  7973. BPF_EXIT_INSN(),
  7974. },
  7975. .errstr = "R5 !read_ok",
  7976. .result = REJECT,
  7977. },
  7978. {
  7979. "ld_abs: check calling conv, r7",
  7980. .insns = {
  7981. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  7982. BPF_MOV64_IMM(BPF_REG_7, 0),
  7983. BPF_LD_ABS(BPF_W, -0x200000),
  7984. BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
  7985. BPF_EXIT_INSN(),
  7986. },
  7987. .result = ACCEPT,
  7988. },
  7989. {
  7990. "ld_abs: tests on r6 and skb data reload helper",
  7991. .insns = {
  7992. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  7993. BPF_LD_ABS(BPF_B, 0),
  7994. BPF_LD_ABS(BPF_H, 0),
  7995. BPF_LD_ABS(BPF_W, 0),
  7996. BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
  7997. BPF_MOV64_IMM(BPF_REG_6, 0),
  7998. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  7999. BPF_MOV64_IMM(BPF_REG_2, 1),
  8000. BPF_MOV64_IMM(BPF_REG_3, 2),
  8001. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8002. BPF_FUNC_skb_vlan_push),
  8003. BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
  8004. BPF_LD_ABS(BPF_B, 0),
  8005. BPF_LD_ABS(BPF_H, 0),
  8006. BPF_LD_ABS(BPF_W, 0),
  8007. BPF_MOV64_IMM(BPF_REG_0, 42),
  8008. BPF_EXIT_INSN(),
  8009. },
  8010. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  8011. .result = ACCEPT,
  8012. .retval = 42 /* ultimate return value */,
  8013. },
  8014. {
  8015. "ld_ind: check calling conv, r1",
  8016. .insns = {
  8017. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  8018. BPF_MOV64_IMM(BPF_REG_1, 1),
  8019. BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
  8020. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  8021. BPF_EXIT_INSN(),
  8022. },
  8023. .errstr = "R1 !read_ok",
  8024. .result = REJECT,
  8025. },
  8026. {
  8027. "ld_ind: check calling conv, r2",
  8028. .insns = {
  8029. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  8030. BPF_MOV64_IMM(BPF_REG_2, 1),
  8031. BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
  8032. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  8033. BPF_EXIT_INSN(),
  8034. },
  8035. .errstr = "R2 !read_ok",
  8036. .result = REJECT,
  8037. },
  8038. {
  8039. "ld_ind: check calling conv, r3",
  8040. .insns = {
  8041. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  8042. BPF_MOV64_IMM(BPF_REG_3, 1),
  8043. BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
  8044. BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
  8045. BPF_EXIT_INSN(),
  8046. },
  8047. .errstr = "R3 !read_ok",
  8048. .result = REJECT,
  8049. },
  8050. {
  8051. "ld_ind: check calling conv, r4",
  8052. .insns = {
  8053. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  8054. BPF_MOV64_IMM(BPF_REG_4, 1),
  8055. BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
  8056. BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
  8057. BPF_EXIT_INSN(),
  8058. },
  8059. .errstr = "R4 !read_ok",
  8060. .result = REJECT,
  8061. },
  8062. {
  8063. "ld_ind: check calling conv, r5",
  8064. .insns = {
  8065. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  8066. BPF_MOV64_IMM(BPF_REG_5, 1),
  8067. BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
  8068. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  8069. BPF_EXIT_INSN(),
  8070. },
  8071. .errstr = "R5 !read_ok",
  8072. .result = REJECT,
  8073. },
  8074. {
  8075. "ld_ind: check calling conv, r7",
  8076. .insns = {
  8077. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  8078. BPF_MOV64_IMM(BPF_REG_7, 1),
  8079. BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
  8080. BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
  8081. BPF_EXIT_INSN(),
  8082. },
  8083. .result = ACCEPT,
  8084. .retval = 1,
  8085. },
  8086. {
  8087. "check bpf_perf_event_data->sample_period byte load permitted",
  8088. .insns = {
  8089. BPF_MOV64_IMM(BPF_REG_0, 0),
  8090. #if __BYTE_ORDER == __LITTLE_ENDIAN
  8091. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  8092. offsetof(struct bpf_perf_event_data, sample_period)),
  8093. #else
  8094. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  8095. offsetof(struct bpf_perf_event_data, sample_period) + 7),
  8096. #endif
  8097. BPF_EXIT_INSN(),
  8098. },
  8099. .result = ACCEPT,
  8100. .prog_type = BPF_PROG_TYPE_PERF_EVENT,
  8101. },
  8102. {
  8103. "check bpf_perf_event_data->sample_period half load permitted",
  8104. .insns = {
  8105. BPF_MOV64_IMM(BPF_REG_0, 0),
  8106. #if __BYTE_ORDER == __LITTLE_ENDIAN
  8107. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  8108. offsetof(struct bpf_perf_event_data, sample_period)),
  8109. #else
  8110. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  8111. offsetof(struct bpf_perf_event_data, sample_period) + 6),
  8112. #endif
  8113. BPF_EXIT_INSN(),
  8114. },
  8115. .result = ACCEPT,
  8116. .prog_type = BPF_PROG_TYPE_PERF_EVENT,
  8117. },
  8118. {
  8119. "check bpf_perf_event_data->sample_period word load permitted",
  8120. .insns = {
  8121. BPF_MOV64_IMM(BPF_REG_0, 0),
  8122. #if __BYTE_ORDER == __LITTLE_ENDIAN
  8123. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  8124. offsetof(struct bpf_perf_event_data, sample_period)),
  8125. #else
  8126. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  8127. offsetof(struct bpf_perf_event_data, sample_period) + 4),
  8128. #endif
  8129. BPF_EXIT_INSN(),
  8130. },
  8131. .result = ACCEPT,
  8132. .prog_type = BPF_PROG_TYPE_PERF_EVENT,
  8133. },
  8134. {
  8135. "check bpf_perf_event_data->sample_period dword load permitted",
  8136. .insns = {
  8137. BPF_MOV64_IMM(BPF_REG_0, 0),
  8138. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
  8139. offsetof(struct bpf_perf_event_data, sample_period)),
  8140. BPF_EXIT_INSN(),
  8141. },
  8142. .result = ACCEPT,
  8143. .prog_type = BPF_PROG_TYPE_PERF_EVENT,
  8144. },
  8145. {
  8146. "check skb->data half load not permitted",
  8147. .insns = {
  8148. BPF_MOV64_IMM(BPF_REG_0, 0),
  8149. #if __BYTE_ORDER == __LITTLE_ENDIAN
  8150. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  8151. offsetof(struct __sk_buff, data)),
  8152. #else
  8153. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  8154. offsetof(struct __sk_buff, data) + 2),
  8155. #endif
  8156. BPF_EXIT_INSN(),
  8157. },
  8158. .result = REJECT,
  8159. .errstr = "invalid bpf_context access",
  8160. },
  8161. {
  8162. "check skb->tc_classid half load not permitted for lwt prog",
  8163. .insns = {
  8164. BPF_MOV64_IMM(BPF_REG_0, 0),
  8165. #if __BYTE_ORDER == __LITTLE_ENDIAN
  8166. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  8167. offsetof(struct __sk_buff, tc_classid)),
  8168. #else
  8169. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  8170. offsetof(struct __sk_buff, tc_classid) + 2),
  8171. #endif
  8172. BPF_EXIT_INSN(),
  8173. },
  8174. .result = REJECT,
  8175. .errstr = "invalid bpf_context access",
  8176. .prog_type = BPF_PROG_TYPE_LWT_IN,
  8177. },
  8178. {
  8179. "bounds checks mixing signed and unsigned, positive bounds",
  8180. .insns = {
  8181. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8182. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8183. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8184. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8185. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8186. BPF_FUNC_map_lookup_elem),
  8187. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  8188. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8189. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  8190. BPF_MOV64_IMM(BPF_REG_2, 2),
  8191. BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
  8192. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
  8193. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8194. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  8195. BPF_MOV64_IMM(BPF_REG_0, 0),
  8196. BPF_EXIT_INSN(),
  8197. },
  8198. .fixup_map_hash_8b = { 3 },
  8199. .errstr = "unbounded min value",
  8200. .result = REJECT,
  8201. },
  8202. {
  8203. "bounds checks mixing signed and unsigned",
  8204. .insns = {
  8205. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8206. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8207. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8208. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8209. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8210. BPF_FUNC_map_lookup_elem),
  8211. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  8212. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8213. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  8214. BPF_MOV64_IMM(BPF_REG_2, -1),
  8215. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
  8216. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  8217. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8218. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  8219. BPF_MOV64_IMM(BPF_REG_0, 0),
  8220. BPF_EXIT_INSN(),
  8221. },
  8222. .fixup_map_hash_8b = { 3 },
  8223. .errstr = "unbounded min value",
  8224. .result = REJECT,
  8225. },
  8226. {
  8227. "bounds checks mixing signed and unsigned, variant 2",
  8228. .insns = {
  8229. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8230. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8231. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8232. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8233. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8234. BPF_FUNC_map_lookup_elem),
  8235. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  8236. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8237. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  8238. BPF_MOV64_IMM(BPF_REG_2, -1),
  8239. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
  8240. BPF_MOV64_IMM(BPF_REG_8, 0),
  8241. BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
  8242. BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
  8243. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
  8244. BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
  8245. BPF_MOV64_IMM(BPF_REG_0, 0),
  8246. BPF_EXIT_INSN(),
  8247. },
  8248. .fixup_map_hash_8b = { 3 },
  8249. .errstr = "unbounded min value",
  8250. .result = REJECT,
  8251. },
  8252. {
  8253. "bounds checks mixing signed and unsigned, variant 3",
  8254. .insns = {
  8255. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8256. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8257. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8258. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8259. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8260. BPF_FUNC_map_lookup_elem),
  8261. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
  8262. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8263. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  8264. BPF_MOV64_IMM(BPF_REG_2, -1),
  8265. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
  8266. BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
  8267. BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
  8268. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
  8269. BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
  8270. BPF_MOV64_IMM(BPF_REG_0, 0),
  8271. BPF_EXIT_INSN(),
  8272. },
  8273. .fixup_map_hash_8b = { 3 },
  8274. .errstr = "unbounded min value",
  8275. .result = REJECT,
  8276. },
  8277. {
  8278. "bounds checks mixing signed and unsigned, variant 4",
  8279. .insns = {
  8280. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8281. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8282. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8283. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8284. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8285. BPF_FUNC_map_lookup_elem),
  8286. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  8287. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8288. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  8289. BPF_MOV64_IMM(BPF_REG_2, 1),
  8290. BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
  8291. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  8292. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8293. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  8294. BPF_MOV64_IMM(BPF_REG_0, 0),
  8295. BPF_EXIT_INSN(),
  8296. },
  8297. .fixup_map_hash_8b = { 3 },
  8298. .result = ACCEPT,
  8299. },
  8300. {
  8301. "bounds checks mixing signed and unsigned, variant 5",
  8302. .insns = {
  8303. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8304. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8305. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8306. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8307. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8308. BPF_FUNC_map_lookup_elem),
  8309. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  8310. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8311. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  8312. BPF_MOV64_IMM(BPF_REG_2, -1),
  8313. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
  8314. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
  8315. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
  8316. BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
  8317. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  8318. BPF_MOV64_IMM(BPF_REG_0, 0),
  8319. BPF_EXIT_INSN(),
  8320. },
  8321. .fixup_map_hash_8b = { 3 },
  8322. .errstr = "unbounded min value",
  8323. .result = REJECT,
  8324. },
  8325. {
  8326. "bounds checks mixing signed and unsigned, variant 6",
  8327. .insns = {
  8328. BPF_MOV64_IMM(BPF_REG_2, 0),
  8329. BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
  8330. BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
  8331. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8332. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
  8333. BPF_MOV64_IMM(BPF_REG_6, -1),
  8334. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
  8335. BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
  8336. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
  8337. BPF_MOV64_IMM(BPF_REG_5, 0),
  8338. BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
  8339. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8340. BPF_FUNC_skb_load_bytes),
  8341. BPF_MOV64_IMM(BPF_REG_0, 0),
  8342. BPF_EXIT_INSN(),
  8343. },
  8344. .errstr = "R4 min value is negative, either use unsigned",
  8345. .result = REJECT,
  8346. },
  8347. {
  8348. "bounds checks mixing signed and unsigned, variant 7",
  8349. .insns = {
  8350. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8351. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8352. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8353. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8354. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8355. BPF_FUNC_map_lookup_elem),
  8356. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  8357. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8358. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  8359. BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
  8360. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
  8361. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  8362. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8363. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  8364. BPF_MOV64_IMM(BPF_REG_0, 0),
  8365. BPF_EXIT_INSN(),
  8366. },
  8367. .fixup_map_hash_8b = { 3 },
  8368. .result = ACCEPT,
  8369. },
  8370. {
  8371. "bounds checks mixing signed and unsigned, variant 8",
  8372. .insns = {
  8373. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8374. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8375. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8376. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8377. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8378. BPF_FUNC_map_lookup_elem),
  8379. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  8380. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8381. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  8382. BPF_MOV64_IMM(BPF_REG_2, -1),
  8383. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
  8384. BPF_MOV64_IMM(BPF_REG_0, 0),
  8385. BPF_EXIT_INSN(),
  8386. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  8387. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8388. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  8389. BPF_MOV64_IMM(BPF_REG_0, 0),
  8390. BPF_EXIT_INSN(),
  8391. },
  8392. .fixup_map_hash_8b = { 3 },
  8393. .errstr = "unbounded min value",
  8394. .result = REJECT,
  8395. },
  8396. {
  8397. "bounds checks mixing signed and unsigned, variant 9",
  8398. .insns = {
  8399. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8400. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8401. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8402. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8403. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8404. BPF_FUNC_map_lookup_elem),
  8405. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
  8406. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8407. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  8408. BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
  8409. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
  8410. BPF_MOV64_IMM(BPF_REG_0, 0),
  8411. BPF_EXIT_INSN(),
  8412. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  8413. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8414. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  8415. BPF_MOV64_IMM(BPF_REG_0, 0),
  8416. BPF_EXIT_INSN(),
  8417. },
  8418. .fixup_map_hash_8b = { 3 },
  8419. .result = ACCEPT,
  8420. },
  8421. {
  8422. "bounds checks mixing signed and unsigned, variant 10",
  8423. .insns = {
  8424. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8425. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8426. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8427. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8428. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8429. BPF_FUNC_map_lookup_elem),
  8430. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  8431. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8432. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  8433. BPF_MOV64_IMM(BPF_REG_2, 0),
  8434. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
  8435. BPF_MOV64_IMM(BPF_REG_0, 0),
  8436. BPF_EXIT_INSN(),
  8437. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  8438. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8439. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  8440. BPF_MOV64_IMM(BPF_REG_0, 0),
  8441. BPF_EXIT_INSN(),
  8442. },
  8443. .fixup_map_hash_8b = { 3 },
  8444. .errstr = "unbounded min value",
  8445. .result = REJECT,
  8446. },
  8447. {
  8448. "bounds checks mixing signed and unsigned, variant 11",
  8449. .insns = {
  8450. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8451. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8452. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8453. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8454. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8455. BPF_FUNC_map_lookup_elem),
  8456. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  8457. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8458. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  8459. BPF_MOV64_IMM(BPF_REG_2, -1),
  8460. BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
  8461. /* Dead branch. */
  8462. BPF_MOV64_IMM(BPF_REG_0, 0),
  8463. BPF_EXIT_INSN(),
  8464. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  8465. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8466. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  8467. BPF_MOV64_IMM(BPF_REG_0, 0),
  8468. BPF_EXIT_INSN(),
  8469. },
  8470. .fixup_map_hash_8b = { 3 },
  8471. .errstr = "unbounded min value",
  8472. .result = REJECT,
  8473. },
  8474. {
  8475. "bounds checks mixing signed and unsigned, variant 12",
  8476. .insns = {
  8477. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8478. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8479. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8480. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8481. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8482. BPF_FUNC_map_lookup_elem),
  8483. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  8484. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8485. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  8486. BPF_MOV64_IMM(BPF_REG_2, -6),
  8487. BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
  8488. BPF_MOV64_IMM(BPF_REG_0, 0),
  8489. BPF_EXIT_INSN(),
  8490. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  8491. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8492. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  8493. BPF_MOV64_IMM(BPF_REG_0, 0),
  8494. BPF_EXIT_INSN(),
  8495. },
  8496. .fixup_map_hash_8b = { 3 },
  8497. .errstr = "unbounded min value",
  8498. .result = REJECT,
  8499. },
  8500. {
  8501. "bounds checks mixing signed and unsigned, variant 13",
  8502. .insns = {
  8503. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8504. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8505. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8506. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8507. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8508. BPF_FUNC_map_lookup_elem),
  8509. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  8510. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8511. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  8512. BPF_MOV64_IMM(BPF_REG_2, 2),
  8513. BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
  8514. BPF_MOV64_IMM(BPF_REG_7, 1),
  8515. BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
  8516. BPF_MOV64_IMM(BPF_REG_0, 0),
  8517. BPF_EXIT_INSN(),
  8518. BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
  8519. BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
  8520. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
  8521. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  8522. BPF_MOV64_IMM(BPF_REG_0, 0),
  8523. BPF_EXIT_INSN(),
  8524. },
  8525. .fixup_map_hash_8b = { 3 },
  8526. .errstr = "unbounded min value",
  8527. .result = REJECT,
  8528. },
  8529. {
  8530. "bounds checks mixing signed and unsigned, variant 14",
  8531. .insns = {
  8532. BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
  8533. offsetof(struct __sk_buff, mark)),
  8534. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8535. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8536. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8537. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8538. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8539. BPF_FUNC_map_lookup_elem),
  8540. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
  8541. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8542. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  8543. BPF_MOV64_IMM(BPF_REG_2, -1),
  8544. BPF_MOV64_IMM(BPF_REG_8, 2),
  8545. BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
  8546. BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
  8547. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  8548. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8549. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  8550. BPF_MOV64_IMM(BPF_REG_0, 0),
  8551. BPF_EXIT_INSN(),
  8552. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
  8553. BPF_JMP_IMM(BPF_JA, 0, 0, -7),
  8554. },
  8555. .fixup_map_hash_8b = { 4 },
  8556. .errstr = "unbounded min value",
  8557. .result = REJECT,
  8558. },
  8559. {
  8560. "bounds checks mixing signed and unsigned, variant 15",
  8561. .insns = {
  8562. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8563. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8564. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8565. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8566. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8567. BPF_FUNC_map_lookup_elem),
  8568. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  8569. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  8570. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  8571. BPF_MOV64_IMM(BPF_REG_2, -6),
  8572. BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
  8573. BPF_MOV64_IMM(BPF_REG_0, 0),
  8574. BPF_EXIT_INSN(),
  8575. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8576. BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
  8577. BPF_MOV64_IMM(BPF_REG_0, 0),
  8578. BPF_EXIT_INSN(),
  8579. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  8580. BPF_MOV64_IMM(BPF_REG_0, 0),
  8581. BPF_EXIT_INSN(),
  8582. },
  8583. .fixup_map_hash_8b = { 3 },
  8584. .errstr = "unbounded min value",
  8585. .result = REJECT,
  8586. .result_unpriv = REJECT,
  8587. },
  8588. {
  8589. "subtraction bounds (map value) variant 1",
  8590. .insns = {
  8591. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8592. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8593. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8594. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8595. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8596. BPF_FUNC_map_lookup_elem),
  8597. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  8598. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  8599. BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
  8600. BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
  8601. BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
  8602. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
  8603. BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
  8604. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8605. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  8606. BPF_EXIT_INSN(),
  8607. BPF_MOV64_IMM(BPF_REG_0, 0),
  8608. BPF_EXIT_INSN(),
  8609. },
  8610. .fixup_map_hash_8b = { 3 },
  8611. .errstr = "R0 max value is outside of the array range",
  8612. .result = REJECT,
  8613. },
  8614. {
  8615. "subtraction bounds (map value) variant 2",
  8616. .insns = {
  8617. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8618. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8619. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8620. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8621. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8622. BPF_FUNC_map_lookup_elem),
  8623. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
  8624. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  8625. BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
  8626. BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
  8627. BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
  8628. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
  8629. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8630. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  8631. BPF_EXIT_INSN(),
  8632. BPF_MOV64_IMM(BPF_REG_0, 0),
  8633. BPF_EXIT_INSN(),
  8634. },
  8635. .fixup_map_hash_8b = { 3 },
  8636. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  8637. .result = REJECT,
  8638. },
  8639. {
  8640. "bounds check based on zero-extended MOV",
  8641. .insns = {
  8642. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8643. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8644. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8645. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8646. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8647. BPF_FUNC_map_lookup_elem),
  8648. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  8649. /* r2 = 0x0000'0000'ffff'ffff */
  8650. BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
  8651. /* r2 = 0 */
  8652. BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
  8653. /* no-op */
  8654. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  8655. /* access at offset 0 */
  8656. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  8657. /* exit */
  8658. BPF_MOV64_IMM(BPF_REG_0, 0),
  8659. BPF_EXIT_INSN(),
  8660. },
  8661. .fixup_map_hash_8b = { 3 },
  8662. .result = ACCEPT
  8663. },
  8664. {
  8665. "bounds check based on sign-extended MOV. test1",
  8666. .insns = {
  8667. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8668. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8669. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8670. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8671. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8672. BPF_FUNC_map_lookup_elem),
  8673. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  8674. /* r2 = 0xffff'ffff'ffff'ffff */
  8675. BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
  8676. /* r2 = 0xffff'ffff */
  8677. BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
  8678. /* r0 = <oob pointer> */
  8679. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  8680. /* access to OOB pointer */
  8681. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  8682. /* exit */
  8683. BPF_MOV64_IMM(BPF_REG_0, 0),
  8684. BPF_EXIT_INSN(),
  8685. },
  8686. .fixup_map_hash_8b = { 3 },
  8687. .errstr = "map_value pointer and 4294967295",
  8688. .result = REJECT
  8689. },
  8690. {
  8691. "bounds check based on sign-extended MOV. test2",
  8692. .insns = {
  8693. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8694. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8695. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8696. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8697. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8698. BPF_FUNC_map_lookup_elem),
  8699. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  8700. /* r2 = 0xffff'ffff'ffff'ffff */
  8701. BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
  8702. /* r2 = 0xfff'ffff */
  8703. BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
  8704. /* r0 = <oob pointer> */
  8705. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  8706. /* access to OOB pointer */
  8707. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  8708. /* exit */
  8709. BPF_MOV64_IMM(BPF_REG_0, 0),
  8710. BPF_EXIT_INSN(),
  8711. },
  8712. .fixup_map_hash_8b = { 3 },
  8713. .errstr = "R0 min value is outside of the array range",
  8714. .result = REJECT
  8715. },
  8716. {
  8717. "bounds check based on reg_off + var_off + insn_off. test1",
  8718. .insns = {
  8719. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  8720. offsetof(struct __sk_buff, mark)),
  8721. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8722. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8723. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8724. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8725. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8726. BPF_FUNC_map_lookup_elem),
  8727. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  8728. BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
  8729. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
  8730. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
  8731. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
  8732. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
  8733. BPF_MOV64_IMM(BPF_REG_0, 0),
  8734. BPF_EXIT_INSN(),
  8735. },
  8736. .fixup_map_hash_8b = { 4 },
  8737. .errstr = "value_size=8 off=1073741825",
  8738. .result = REJECT,
  8739. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  8740. },
  8741. {
  8742. "bounds check based on reg_off + var_off + insn_off. test2",
  8743. .insns = {
  8744. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  8745. offsetof(struct __sk_buff, mark)),
  8746. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8747. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8748. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8749. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8750. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8751. BPF_FUNC_map_lookup_elem),
  8752. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  8753. BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
  8754. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
  8755. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
  8756. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
  8757. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
  8758. BPF_MOV64_IMM(BPF_REG_0, 0),
  8759. BPF_EXIT_INSN(),
  8760. },
  8761. .fixup_map_hash_8b = { 4 },
  8762. .errstr = "value 1073741823",
  8763. .result = REJECT,
  8764. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  8765. },
  8766. {
  8767. "bounds check after truncation of non-boundary-crossing range",
  8768. .insns = {
  8769. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8770. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8771. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8772. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8773. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8774. BPF_FUNC_map_lookup_elem),
  8775. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  8776. /* r1 = [0x00, 0xff] */
  8777. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  8778. BPF_MOV64_IMM(BPF_REG_2, 1),
  8779. /* r2 = 0x10'0000'0000 */
  8780. BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
  8781. /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
  8782. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
  8783. /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
  8784. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
  8785. /* r1 = [0x00, 0xff] */
  8786. BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
  8787. /* r1 = 0 */
  8788. BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
  8789. /* no-op */
  8790. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8791. /* access at offset 0 */
  8792. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  8793. /* exit */
  8794. BPF_MOV64_IMM(BPF_REG_0, 0),
  8795. BPF_EXIT_INSN(),
  8796. },
  8797. .fixup_map_hash_8b = { 3 },
  8798. .result = ACCEPT
  8799. },
  8800. {
  8801. "bounds check after truncation of boundary-crossing range (1)",
  8802. .insns = {
  8803. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8804. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8805. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8806. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8807. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8808. BPF_FUNC_map_lookup_elem),
  8809. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  8810. /* r1 = [0x00, 0xff] */
  8811. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  8812. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
  8813. /* r1 = [0xffff'ff80, 0x1'0000'007f] */
  8814. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
  8815. /* r1 = [0xffff'ff80, 0xffff'ffff] or
  8816. * [0x0000'0000, 0x0000'007f]
  8817. */
  8818. BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
  8819. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
  8820. /* r1 = [0x00, 0xff] or
  8821. * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
  8822. */
  8823. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
  8824. /* r1 = 0 or
  8825. * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
  8826. */
  8827. BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
  8828. /* no-op or OOB pointer computation */
  8829. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8830. /* potentially OOB access */
  8831. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  8832. /* exit */
  8833. BPF_MOV64_IMM(BPF_REG_0, 0),
  8834. BPF_EXIT_INSN(),
  8835. },
  8836. .fixup_map_hash_8b = { 3 },
  8837. /* not actually fully unbounded, but the bound is very high */
  8838. .errstr = "R0 unbounded memory access",
  8839. .result = REJECT
  8840. },
  8841. {
  8842. "bounds check after truncation of boundary-crossing range (2)",
  8843. .insns = {
  8844. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8845. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8846. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8847. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8848. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8849. BPF_FUNC_map_lookup_elem),
  8850. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  8851. /* r1 = [0x00, 0xff] */
  8852. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  8853. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
  8854. /* r1 = [0xffff'ff80, 0x1'0000'007f] */
  8855. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
  8856. /* r1 = [0xffff'ff80, 0xffff'ffff] or
  8857. * [0x0000'0000, 0x0000'007f]
  8858. * difference to previous test: truncation via MOV32
  8859. * instead of ALU32.
  8860. */
  8861. BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
  8862. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
  8863. /* r1 = [0x00, 0xff] or
  8864. * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
  8865. */
  8866. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
  8867. /* r1 = 0 or
  8868. * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
  8869. */
  8870. BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
  8871. /* no-op or OOB pointer computation */
  8872. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8873. /* potentially OOB access */
  8874. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  8875. /* exit */
  8876. BPF_MOV64_IMM(BPF_REG_0, 0),
  8877. BPF_EXIT_INSN(),
  8878. },
  8879. .fixup_map_hash_8b = { 3 },
  8880. /* not actually fully unbounded, but the bound is very high */
  8881. .errstr = "R0 unbounded memory access",
  8882. .result = REJECT
  8883. },
  8884. {
  8885. "bounds check after wrapping 32-bit addition",
  8886. .insns = {
  8887. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8888. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8889. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8890. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8891. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8892. BPF_FUNC_map_lookup_elem),
  8893. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  8894. /* r1 = 0x7fff'ffff */
  8895. BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
  8896. /* r1 = 0xffff'fffe */
  8897. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
  8898. /* r1 = 0 */
  8899. BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
  8900. /* no-op */
  8901. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8902. /* access at offset 0 */
  8903. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  8904. /* exit */
  8905. BPF_MOV64_IMM(BPF_REG_0, 0),
  8906. BPF_EXIT_INSN(),
  8907. },
  8908. .fixup_map_hash_8b = { 3 },
  8909. .result = ACCEPT
  8910. },
  8911. {
  8912. "bounds check after shift with oversized count operand",
  8913. .insns = {
  8914. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8915. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8916. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8917. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8918. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8919. BPF_FUNC_map_lookup_elem),
  8920. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  8921. BPF_MOV64_IMM(BPF_REG_2, 32),
  8922. BPF_MOV64_IMM(BPF_REG_1, 1),
  8923. /* r1 = (u32)1 << (u32)32 = ? */
  8924. BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
  8925. /* r1 = [0x0000, 0xffff] */
  8926. BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
  8927. /* computes unknown pointer, potentially OOB */
  8928. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8929. /* potentially OOB access */
  8930. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  8931. /* exit */
  8932. BPF_MOV64_IMM(BPF_REG_0, 0),
  8933. BPF_EXIT_INSN(),
  8934. },
  8935. .fixup_map_hash_8b = { 3 },
  8936. .errstr = "R0 max value is outside of the array range",
  8937. .result = REJECT
  8938. },
  8939. {
  8940. "bounds check after right shift of maybe-negative number",
  8941. .insns = {
  8942. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8943. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8944. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8945. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8946. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8947. BPF_FUNC_map_lookup_elem),
  8948. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  8949. /* r1 = [0x00, 0xff] */
  8950. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  8951. /* r1 = [-0x01, 0xfe] */
  8952. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
  8953. /* r1 = 0 or 0xff'ffff'ffff'ffff */
  8954. BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
  8955. /* r1 = 0 or 0xffff'ffff'ffff */
  8956. BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
  8957. /* computes unknown pointer, potentially OOB */
  8958. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  8959. /* potentially OOB access */
  8960. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  8961. /* exit */
  8962. BPF_MOV64_IMM(BPF_REG_0, 0),
  8963. BPF_EXIT_INSN(),
  8964. },
  8965. .fixup_map_hash_8b = { 3 },
  8966. .errstr = "R0 unbounded memory access",
  8967. .result = REJECT
  8968. },
  8969. {
  8970. "bounds check map access with off+size signed 32bit overflow. test1",
  8971. .insns = {
  8972. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8973. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8974. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8975. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8976. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8977. BPF_FUNC_map_lookup_elem),
  8978. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  8979. BPF_EXIT_INSN(),
  8980. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
  8981. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
  8982. BPF_JMP_A(0),
  8983. BPF_EXIT_INSN(),
  8984. },
  8985. .fixup_map_hash_8b = { 3 },
  8986. .errstr = "map_value pointer and 2147483646",
  8987. .result = REJECT
  8988. },
  8989. {
  8990. "bounds check map access with off+size signed 32bit overflow. test2",
  8991. .insns = {
  8992. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  8993. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  8994. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  8995. BPF_LD_MAP_FD(BPF_REG_1, 0),
  8996. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  8997. BPF_FUNC_map_lookup_elem),
  8998. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  8999. BPF_EXIT_INSN(),
  9000. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
  9001. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
  9002. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
  9003. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
  9004. BPF_JMP_A(0),
  9005. BPF_EXIT_INSN(),
  9006. },
  9007. .fixup_map_hash_8b = { 3 },
  9008. .errstr = "pointer offset 1073741822",
  9009. .result = REJECT
  9010. },
  9011. {
  9012. "bounds check map access with off+size signed 32bit overflow. test3",
  9013. .insns = {
  9014. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  9015. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  9016. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  9017. BPF_LD_MAP_FD(BPF_REG_1, 0),
  9018. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  9019. BPF_FUNC_map_lookup_elem),
  9020. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  9021. BPF_EXIT_INSN(),
  9022. BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
  9023. BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
  9024. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
  9025. BPF_JMP_A(0),
  9026. BPF_EXIT_INSN(),
  9027. },
  9028. .fixup_map_hash_8b = { 3 },
  9029. .errstr = "pointer offset -1073741822",
  9030. .result = REJECT
  9031. },
  9032. {
  9033. "bounds check map access with off+size signed 32bit overflow. test4",
  9034. .insns = {
  9035. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  9036. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  9037. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  9038. BPF_LD_MAP_FD(BPF_REG_1, 0),
  9039. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  9040. BPF_FUNC_map_lookup_elem),
  9041. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  9042. BPF_EXIT_INSN(),
  9043. BPF_MOV64_IMM(BPF_REG_1, 1000000),
  9044. BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
  9045. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  9046. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
  9047. BPF_JMP_A(0),
  9048. BPF_EXIT_INSN(),
  9049. },
  9050. .fixup_map_hash_8b = { 3 },
  9051. .errstr = "map_value pointer and 1000000000000",
  9052. .result = REJECT
  9053. },
  9054. {
  9055. "pointer/scalar confusion in state equality check (way 1)",
  9056. .insns = {
  9057. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  9058. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  9059. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  9060. BPF_LD_MAP_FD(BPF_REG_1, 0),
  9061. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  9062. BPF_FUNC_map_lookup_elem),
  9063. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  9064. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
  9065. BPF_JMP_A(1),
  9066. BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
  9067. BPF_JMP_A(0),
  9068. BPF_EXIT_INSN(),
  9069. },
  9070. .fixup_map_hash_8b = { 3 },
  9071. .result = ACCEPT,
  9072. .retval = POINTER_VALUE,
  9073. .result_unpriv = REJECT,
  9074. .errstr_unpriv = "R0 leaks addr as return value"
  9075. },
  9076. {
  9077. "pointer/scalar confusion in state equality check (way 2)",
  9078. .insns = {
  9079. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  9080. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  9081. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  9082. BPF_LD_MAP_FD(BPF_REG_1, 0),
  9083. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  9084. BPF_FUNC_map_lookup_elem),
  9085. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  9086. BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
  9087. BPF_JMP_A(1),
  9088. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
  9089. BPF_EXIT_INSN(),
  9090. },
  9091. .fixup_map_hash_8b = { 3 },
  9092. .result = ACCEPT,
  9093. .retval = POINTER_VALUE,
  9094. .result_unpriv = REJECT,
  9095. .errstr_unpriv = "R0 leaks addr as return value"
  9096. },
  9097. {
  9098. "variable-offset ctx access",
  9099. .insns = {
  9100. /* Get an unknown value */
  9101. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
  9102. /* Make it small and 4-byte aligned */
  9103. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
  9104. /* add it to skb. We now have either &skb->len or
  9105. * &skb->pkt_type, but we don't know which
  9106. */
  9107. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
  9108. /* dereference it */
  9109. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
  9110. BPF_EXIT_INSN(),
  9111. },
  9112. .errstr = "variable ctx access var_off=(0x0; 0x4)",
  9113. .result = REJECT,
  9114. .prog_type = BPF_PROG_TYPE_LWT_IN,
  9115. },
  9116. {
  9117. "variable-offset stack access",
  9118. .insns = {
  9119. /* Fill the top 8 bytes of the stack */
  9120. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  9121. /* Get an unknown value */
  9122. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
  9123. /* Make it small and 4-byte aligned */
  9124. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
  9125. BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
  9126. /* add it to fp. We now have either fp-4 or fp-8, but
  9127. * we don't know which
  9128. */
  9129. BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
  9130. /* dereference it */
  9131. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
  9132. BPF_EXIT_INSN(),
  9133. },
  9134. .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
  9135. .result = REJECT,
  9136. .prog_type = BPF_PROG_TYPE_LWT_IN,
  9137. },
  9138. {
  9139. "indirect variable-offset stack access",
  9140. .insns = {
  9141. /* Fill the top 8 bytes of the stack */
  9142. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  9143. /* Get an unknown value */
  9144. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
  9145. /* Make it small and 4-byte aligned */
  9146. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
  9147. BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
  9148. /* add it to fp. We now have either fp-4 or fp-8, but
  9149. * we don't know which
  9150. */
  9151. BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
  9152. /* dereference it indirectly */
  9153. BPF_LD_MAP_FD(BPF_REG_1, 0),
  9154. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  9155. BPF_FUNC_map_lookup_elem),
  9156. BPF_MOV64_IMM(BPF_REG_0, 0),
  9157. BPF_EXIT_INSN(),
  9158. },
  9159. .fixup_map_hash_8b = { 5 },
  9160. .errstr = "variable stack read R2",
  9161. .result = REJECT,
  9162. .prog_type = BPF_PROG_TYPE_LWT_IN,
  9163. },
  9164. {
  9165. "direct stack access with 32-bit wraparound. test1",
  9166. .insns = {
  9167. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  9168. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
  9169. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
  9170. BPF_MOV32_IMM(BPF_REG_0, 0),
  9171. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  9172. BPF_EXIT_INSN()
  9173. },
  9174. .errstr = "fp pointer and 2147483647",
  9175. .result = REJECT
  9176. },
  9177. {
  9178. "direct stack access with 32-bit wraparound. test2",
  9179. .insns = {
  9180. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  9181. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
  9182. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
  9183. BPF_MOV32_IMM(BPF_REG_0, 0),
  9184. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  9185. BPF_EXIT_INSN()
  9186. },
  9187. .errstr = "fp pointer and 1073741823",
  9188. .result = REJECT
  9189. },
  9190. {
  9191. "direct stack access with 32-bit wraparound. test3",
  9192. .insns = {
  9193. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  9194. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
  9195. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
  9196. BPF_MOV32_IMM(BPF_REG_0, 0),
  9197. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  9198. BPF_EXIT_INSN()
  9199. },
  9200. .errstr = "fp pointer offset 1073741822",
  9201. .result = REJECT
  9202. },
  9203. {
  9204. "liveness pruning and write screening",
  9205. .insns = {
  9206. /* Get an unknown value */
  9207. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
  9208. /* branch conditions teach us nothing about R2 */
  9209. BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
  9210. BPF_MOV64_IMM(BPF_REG_0, 0),
  9211. BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
  9212. BPF_MOV64_IMM(BPF_REG_0, 0),
  9213. BPF_EXIT_INSN(),
  9214. },
  9215. .errstr = "R0 !read_ok",
  9216. .result = REJECT,
  9217. .prog_type = BPF_PROG_TYPE_LWT_IN,
  9218. },
  9219. {
  9220. "varlen_map_value_access pruning",
  9221. .insns = {
  9222. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  9223. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  9224. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  9225. BPF_LD_MAP_FD(BPF_REG_1, 0),
  9226. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  9227. BPF_FUNC_map_lookup_elem),
  9228. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
  9229. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  9230. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
  9231. BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
  9232. BPF_MOV32_IMM(BPF_REG_1, 0),
  9233. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  9234. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  9235. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  9236. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  9237. offsetof(struct test_val, foo)),
  9238. BPF_EXIT_INSN(),
  9239. },
  9240. .fixup_map_hash_48b = { 3 },
  9241. .errstr_unpriv = "R0 leaks addr",
  9242. .errstr = "R0 unbounded memory access",
  9243. .result_unpriv = REJECT,
  9244. .result = REJECT,
  9245. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  9246. },
  9247. {
  9248. "invalid 64-bit BPF_END",
  9249. .insns = {
  9250. BPF_MOV32_IMM(BPF_REG_0, 0),
  9251. {
  9252. .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
  9253. .dst_reg = BPF_REG_0,
  9254. .src_reg = 0,
  9255. .off = 0,
  9256. .imm = 32,
  9257. },
  9258. BPF_EXIT_INSN(),
  9259. },
  9260. .errstr = "unknown opcode d7",
  9261. .result = REJECT,
  9262. },
  9263. {
  9264. "XDP, using ifindex from netdev",
  9265. .insns = {
  9266. BPF_MOV64_IMM(BPF_REG_0, 0),
  9267. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9268. offsetof(struct xdp_md, ingress_ifindex)),
  9269. BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
  9270. BPF_MOV64_IMM(BPF_REG_0, 1),
  9271. BPF_EXIT_INSN(),
  9272. },
  9273. .result = ACCEPT,
  9274. .prog_type = BPF_PROG_TYPE_XDP,
  9275. .retval = 1,
  9276. },
  9277. {
  9278. "meta access, test1",
  9279. .insns = {
  9280. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9281. offsetof(struct xdp_md, data_meta)),
  9282. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9283. offsetof(struct xdp_md, data)),
  9284. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  9285. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  9286. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  9287. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  9288. BPF_MOV64_IMM(BPF_REG_0, 0),
  9289. BPF_EXIT_INSN(),
  9290. },
  9291. .result = ACCEPT,
  9292. .prog_type = BPF_PROG_TYPE_XDP,
  9293. },
  9294. {
  9295. "meta access, test2",
  9296. .insns = {
  9297. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9298. offsetof(struct xdp_md, data_meta)),
  9299. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9300. offsetof(struct xdp_md, data)),
  9301. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  9302. BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
  9303. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  9304. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
  9305. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
  9306. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  9307. BPF_MOV64_IMM(BPF_REG_0, 0),
  9308. BPF_EXIT_INSN(),
  9309. },
  9310. .result = REJECT,
  9311. .errstr = "invalid access to packet, off=-8",
  9312. .prog_type = BPF_PROG_TYPE_XDP,
  9313. },
  9314. {
  9315. "meta access, test3",
  9316. .insns = {
  9317. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9318. offsetof(struct xdp_md, data_meta)),
  9319. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9320. offsetof(struct xdp_md, data_end)),
  9321. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  9322. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  9323. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  9324. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  9325. BPF_MOV64_IMM(BPF_REG_0, 0),
  9326. BPF_EXIT_INSN(),
  9327. },
  9328. .result = REJECT,
  9329. .errstr = "invalid access to packet",
  9330. .prog_type = BPF_PROG_TYPE_XDP,
  9331. },
  9332. {
  9333. "meta access, test4",
  9334. .insns = {
  9335. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9336. offsetof(struct xdp_md, data_meta)),
  9337. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9338. offsetof(struct xdp_md, data_end)),
  9339. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
  9340. offsetof(struct xdp_md, data)),
  9341. BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
  9342. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  9343. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  9344. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  9345. BPF_MOV64_IMM(BPF_REG_0, 0),
  9346. BPF_EXIT_INSN(),
  9347. },
  9348. .result = REJECT,
  9349. .errstr = "invalid access to packet",
  9350. .prog_type = BPF_PROG_TYPE_XDP,
  9351. },
  9352. {
  9353. "meta access, test5",
  9354. .insns = {
  9355. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9356. offsetof(struct xdp_md, data_meta)),
  9357. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
  9358. offsetof(struct xdp_md, data)),
  9359. BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
  9360. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  9361. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
  9362. BPF_MOV64_IMM(BPF_REG_2, -8),
  9363. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  9364. BPF_FUNC_xdp_adjust_meta),
  9365. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
  9366. BPF_MOV64_IMM(BPF_REG_0, 0),
  9367. BPF_EXIT_INSN(),
  9368. },
  9369. .result = REJECT,
  9370. .errstr = "R3 !read_ok",
  9371. .prog_type = BPF_PROG_TYPE_XDP,
  9372. },
  9373. {
  9374. "meta access, test6",
  9375. .insns = {
  9376. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9377. offsetof(struct xdp_md, data_meta)),
  9378. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9379. offsetof(struct xdp_md, data)),
  9380. BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
  9381. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  9382. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  9383. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
  9384. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
  9385. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  9386. BPF_MOV64_IMM(BPF_REG_0, 0),
  9387. BPF_EXIT_INSN(),
  9388. },
  9389. .result = REJECT,
  9390. .errstr = "invalid access to packet",
  9391. .prog_type = BPF_PROG_TYPE_XDP,
  9392. },
  9393. {
  9394. "meta access, test7",
  9395. .insns = {
  9396. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9397. offsetof(struct xdp_md, data_meta)),
  9398. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9399. offsetof(struct xdp_md, data)),
  9400. BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
  9401. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  9402. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  9403. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
  9404. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
  9405. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  9406. BPF_MOV64_IMM(BPF_REG_0, 0),
  9407. BPF_EXIT_INSN(),
  9408. },
  9409. .result = ACCEPT,
  9410. .prog_type = BPF_PROG_TYPE_XDP,
  9411. },
  9412. {
  9413. "meta access, test8",
  9414. .insns = {
  9415. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9416. offsetof(struct xdp_md, data_meta)),
  9417. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9418. offsetof(struct xdp_md, data)),
  9419. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  9420. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
  9421. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
  9422. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  9423. BPF_MOV64_IMM(BPF_REG_0, 0),
  9424. BPF_EXIT_INSN(),
  9425. },
  9426. .result = ACCEPT,
  9427. .prog_type = BPF_PROG_TYPE_XDP,
  9428. },
  9429. {
  9430. "meta access, test9",
  9431. .insns = {
  9432. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9433. offsetof(struct xdp_md, data_meta)),
  9434. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9435. offsetof(struct xdp_md, data)),
  9436. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  9437. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
  9438. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
  9439. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
  9440. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  9441. BPF_MOV64_IMM(BPF_REG_0, 0),
  9442. BPF_EXIT_INSN(),
  9443. },
  9444. .result = REJECT,
  9445. .errstr = "invalid access to packet",
  9446. .prog_type = BPF_PROG_TYPE_XDP,
  9447. },
  9448. {
  9449. "meta access, test10",
  9450. .insns = {
  9451. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9452. offsetof(struct xdp_md, data_meta)),
  9453. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9454. offsetof(struct xdp_md, data)),
  9455. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
  9456. offsetof(struct xdp_md, data_end)),
  9457. BPF_MOV64_IMM(BPF_REG_5, 42),
  9458. BPF_MOV64_IMM(BPF_REG_6, 24),
  9459. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
  9460. BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
  9461. BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
  9462. BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
  9463. BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
  9464. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  9465. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  9466. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
  9467. BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
  9468. BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  9469. BPF_MOV64_IMM(BPF_REG_0, 0),
  9470. BPF_EXIT_INSN(),
  9471. },
  9472. .result = REJECT,
  9473. .errstr = "invalid access to packet",
  9474. .prog_type = BPF_PROG_TYPE_XDP,
  9475. },
  9476. {
  9477. "meta access, test11",
  9478. .insns = {
  9479. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9480. offsetof(struct xdp_md, data_meta)),
  9481. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9482. offsetof(struct xdp_md, data)),
  9483. BPF_MOV64_IMM(BPF_REG_5, 42),
  9484. BPF_MOV64_IMM(BPF_REG_6, 24),
  9485. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
  9486. BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
  9487. BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
  9488. BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
  9489. BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
  9490. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  9491. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  9492. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
  9493. BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
  9494. BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
  9495. BPF_MOV64_IMM(BPF_REG_0, 0),
  9496. BPF_EXIT_INSN(),
  9497. },
  9498. .result = ACCEPT,
  9499. .prog_type = BPF_PROG_TYPE_XDP,
  9500. },
  9501. {
  9502. "meta access, test12",
  9503. .insns = {
  9504. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9505. offsetof(struct xdp_md, data_meta)),
  9506. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9507. offsetof(struct xdp_md, data)),
  9508. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
  9509. offsetof(struct xdp_md, data_end)),
  9510. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  9511. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
  9512. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
  9513. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
  9514. BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
  9515. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
  9516. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
  9517. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  9518. BPF_MOV64_IMM(BPF_REG_0, 0),
  9519. BPF_EXIT_INSN(),
  9520. },
  9521. .result = ACCEPT,
  9522. .prog_type = BPF_PROG_TYPE_XDP,
  9523. },
  9524. {
  9525. "arithmetic ops make PTR_TO_CTX unusable",
  9526. .insns = {
  9527. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  9528. offsetof(struct __sk_buff, data) -
  9529. offsetof(struct __sk_buff, mark)),
  9530. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  9531. offsetof(struct __sk_buff, mark)),
  9532. BPF_EXIT_INSN(),
  9533. },
  9534. .errstr = "dereference of modified ctx ptr",
  9535. .result = REJECT,
  9536. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  9537. },
  9538. {
  9539. "pkt_end - pkt_start is allowed",
  9540. .insns = {
  9541. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  9542. offsetof(struct __sk_buff, data_end)),
  9543. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9544. offsetof(struct __sk_buff, data)),
  9545. BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
  9546. BPF_EXIT_INSN(),
  9547. },
  9548. .result = ACCEPT,
  9549. .retval = TEST_DATA_LEN,
  9550. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  9551. },
  9552. {
  9553. "XDP pkt read, pkt_end mangling, bad access 1",
  9554. .insns = {
  9555. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9556. offsetof(struct xdp_md, data)),
  9557. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9558. offsetof(struct xdp_md, data_end)),
  9559. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9560. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9561. BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
  9562. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
  9563. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  9564. BPF_MOV64_IMM(BPF_REG_0, 0),
  9565. BPF_EXIT_INSN(),
  9566. },
  9567. .errstr = "R3 pointer arithmetic on pkt_end",
  9568. .result = REJECT,
  9569. .prog_type = BPF_PROG_TYPE_XDP,
  9570. },
  9571. {
  9572. "XDP pkt read, pkt_end mangling, bad access 2",
  9573. .insns = {
  9574. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9575. offsetof(struct xdp_md, data)),
  9576. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9577. offsetof(struct xdp_md, data_end)),
  9578. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9579. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9580. BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
  9581. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
  9582. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  9583. BPF_MOV64_IMM(BPF_REG_0, 0),
  9584. BPF_EXIT_INSN(),
  9585. },
  9586. .errstr = "R3 pointer arithmetic on pkt_end",
  9587. .result = REJECT,
  9588. .prog_type = BPF_PROG_TYPE_XDP,
  9589. },
  9590. {
  9591. "XDP pkt read, pkt_data' > pkt_end, good access",
  9592. .insns = {
  9593. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9594. offsetof(struct xdp_md, data)),
  9595. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9596. offsetof(struct xdp_md, data_end)),
  9597. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9598. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9599. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
  9600. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  9601. BPF_MOV64_IMM(BPF_REG_0, 0),
  9602. BPF_EXIT_INSN(),
  9603. },
  9604. .result = ACCEPT,
  9605. .prog_type = BPF_PROG_TYPE_XDP,
  9606. },
  9607. {
  9608. "XDP pkt read, pkt_data' > pkt_end, bad access 1",
  9609. .insns = {
  9610. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9611. offsetof(struct xdp_md, data)),
  9612. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9613. offsetof(struct xdp_md, data_end)),
  9614. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9615. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9616. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
  9617. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
  9618. BPF_MOV64_IMM(BPF_REG_0, 0),
  9619. BPF_EXIT_INSN(),
  9620. },
  9621. .errstr = "R1 offset is outside of the packet",
  9622. .result = REJECT,
  9623. .prog_type = BPF_PROG_TYPE_XDP,
  9624. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  9625. },
  9626. {
  9627. "XDP pkt read, pkt_data' > pkt_end, bad access 2",
  9628. .insns = {
  9629. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9630. offsetof(struct xdp_md, data)),
  9631. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9632. offsetof(struct xdp_md, data_end)),
  9633. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9634. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9635. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
  9636. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  9637. BPF_MOV64_IMM(BPF_REG_0, 0),
  9638. BPF_EXIT_INSN(),
  9639. },
  9640. .errstr = "R1 offset is outside of the packet",
  9641. .result = REJECT,
  9642. .prog_type = BPF_PROG_TYPE_XDP,
  9643. },
  9644. {
  9645. "XDP pkt read, pkt_end > pkt_data', good access",
  9646. .insns = {
  9647. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9648. offsetof(struct xdp_md, data)),
  9649. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9650. offsetof(struct xdp_md, data_end)),
  9651. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9652. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9653. BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
  9654. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  9655. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
  9656. BPF_MOV64_IMM(BPF_REG_0, 0),
  9657. BPF_EXIT_INSN(),
  9658. },
  9659. .result = ACCEPT,
  9660. .prog_type = BPF_PROG_TYPE_XDP,
  9661. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  9662. },
  9663. {
  9664. "XDP pkt read, pkt_end > pkt_data', bad access 1",
  9665. .insns = {
  9666. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9667. offsetof(struct xdp_md, data)),
  9668. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9669. offsetof(struct xdp_md, data_end)),
  9670. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9671. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9672. BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
  9673. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  9674. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  9675. BPF_MOV64_IMM(BPF_REG_0, 0),
  9676. BPF_EXIT_INSN(),
  9677. },
  9678. .errstr = "R1 offset is outside of the packet",
  9679. .result = REJECT,
  9680. .prog_type = BPF_PROG_TYPE_XDP,
  9681. },
  9682. {
  9683. "XDP pkt read, pkt_end > pkt_data', bad access 2",
  9684. .insns = {
  9685. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9686. offsetof(struct xdp_md, data)),
  9687. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9688. offsetof(struct xdp_md, data_end)),
  9689. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9690. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9691. BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
  9692. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  9693. BPF_MOV64_IMM(BPF_REG_0, 0),
  9694. BPF_EXIT_INSN(),
  9695. },
  9696. .errstr = "R1 offset is outside of the packet",
  9697. .result = REJECT,
  9698. .prog_type = BPF_PROG_TYPE_XDP,
  9699. },
  9700. {
  9701. "XDP pkt read, pkt_data' < pkt_end, good access",
  9702. .insns = {
  9703. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9704. offsetof(struct xdp_md, data)),
  9705. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9706. offsetof(struct xdp_md, data_end)),
  9707. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9708. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9709. BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
  9710. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  9711. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
  9712. BPF_MOV64_IMM(BPF_REG_0, 0),
  9713. BPF_EXIT_INSN(),
  9714. },
  9715. .result = ACCEPT,
  9716. .prog_type = BPF_PROG_TYPE_XDP,
  9717. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  9718. },
  9719. {
  9720. "XDP pkt read, pkt_data' < pkt_end, bad access 1",
  9721. .insns = {
  9722. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9723. offsetof(struct xdp_md, data)),
  9724. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9725. offsetof(struct xdp_md, data_end)),
  9726. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9727. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9728. BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
  9729. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  9730. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  9731. BPF_MOV64_IMM(BPF_REG_0, 0),
  9732. BPF_EXIT_INSN(),
  9733. },
  9734. .errstr = "R1 offset is outside of the packet",
  9735. .result = REJECT,
  9736. .prog_type = BPF_PROG_TYPE_XDP,
  9737. },
  9738. {
  9739. "XDP pkt read, pkt_data' < pkt_end, bad access 2",
  9740. .insns = {
  9741. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9742. offsetof(struct xdp_md, data)),
  9743. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9744. offsetof(struct xdp_md, data_end)),
  9745. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9746. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9747. BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
  9748. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  9749. BPF_MOV64_IMM(BPF_REG_0, 0),
  9750. BPF_EXIT_INSN(),
  9751. },
  9752. .errstr = "R1 offset is outside of the packet",
  9753. .result = REJECT,
  9754. .prog_type = BPF_PROG_TYPE_XDP,
  9755. },
  9756. {
  9757. "XDP pkt read, pkt_end < pkt_data', good access",
  9758. .insns = {
  9759. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9760. offsetof(struct xdp_md, data)),
  9761. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9762. offsetof(struct xdp_md, data_end)),
  9763. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9764. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9765. BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
  9766. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  9767. BPF_MOV64_IMM(BPF_REG_0, 0),
  9768. BPF_EXIT_INSN(),
  9769. },
  9770. .result = ACCEPT,
  9771. .prog_type = BPF_PROG_TYPE_XDP,
  9772. },
  9773. {
  9774. "XDP pkt read, pkt_end < pkt_data', bad access 1",
  9775. .insns = {
  9776. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9777. offsetof(struct xdp_md, data)),
  9778. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9779. offsetof(struct xdp_md, data_end)),
  9780. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9781. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9782. BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
  9783. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
  9784. BPF_MOV64_IMM(BPF_REG_0, 0),
  9785. BPF_EXIT_INSN(),
  9786. },
  9787. .errstr = "R1 offset is outside of the packet",
  9788. .result = REJECT,
  9789. .prog_type = BPF_PROG_TYPE_XDP,
  9790. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  9791. },
  9792. {
  9793. "XDP pkt read, pkt_end < pkt_data', bad access 2",
  9794. .insns = {
  9795. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9796. offsetof(struct xdp_md, data)),
  9797. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9798. offsetof(struct xdp_md, data_end)),
  9799. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9800. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9801. BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
  9802. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  9803. BPF_MOV64_IMM(BPF_REG_0, 0),
  9804. BPF_EXIT_INSN(),
  9805. },
  9806. .errstr = "R1 offset is outside of the packet",
  9807. .result = REJECT,
  9808. .prog_type = BPF_PROG_TYPE_XDP,
  9809. },
  9810. {
  9811. "XDP pkt read, pkt_data' >= pkt_end, good access",
  9812. .insns = {
  9813. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9814. offsetof(struct xdp_md, data)),
  9815. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9816. offsetof(struct xdp_md, data_end)),
  9817. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9818. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9819. BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
  9820. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
  9821. BPF_MOV64_IMM(BPF_REG_0, 0),
  9822. BPF_EXIT_INSN(),
  9823. },
  9824. .result = ACCEPT,
  9825. .prog_type = BPF_PROG_TYPE_XDP,
  9826. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  9827. },
  9828. {
  9829. "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
  9830. .insns = {
  9831. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9832. offsetof(struct xdp_md, data)),
  9833. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9834. offsetof(struct xdp_md, data_end)),
  9835. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9836. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9837. BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
  9838. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  9839. BPF_MOV64_IMM(BPF_REG_0, 0),
  9840. BPF_EXIT_INSN(),
  9841. },
  9842. .errstr = "R1 offset is outside of the packet",
  9843. .result = REJECT,
  9844. .prog_type = BPF_PROG_TYPE_XDP,
  9845. },
  9846. {
  9847. "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
  9848. .insns = {
  9849. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9850. offsetof(struct xdp_md, data)),
  9851. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9852. offsetof(struct xdp_md, data_end)),
  9853. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9854. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9855. BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
  9856. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
  9857. BPF_MOV64_IMM(BPF_REG_0, 0),
  9858. BPF_EXIT_INSN(),
  9859. },
  9860. .errstr = "R1 offset is outside of the packet",
  9861. .result = REJECT,
  9862. .prog_type = BPF_PROG_TYPE_XDP,
  9863. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  9864. },
  9865. {
  9866. "XDP pkt read, pkt_end >= pkt_data', good access",
  9867. .insns = {
  9868. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9869. offsetof(struct xdp_md, data)),
  9870. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9871. offsetof(struct xdp_md, data_end)),
  9872. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9873. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9874. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
  9875. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  9876. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  9877. BPF_MOV64_IMM(BPF_REG_0, 0),
  9878. BPF_EXIT_INSN(),
  9879. },
  9880. .result = ACCEPT,
  9881. .prog_type = BPF_PROG_TYPE_XDP,
  9882. },
  9883. {
  9884. "XDP pkt read, pkt_end >= pkt_data', bad access 1",
  9885. .insns = {
  9886. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9887. offsetof(struct xdp_md, data)),
  9888. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9889. offsetof(struct xdp_md, data_end)),
  9890. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9891. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9892. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
  9893. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  9894. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
  9895. BPF_MOV64_IMM(BPF_REG_0, 0),
  9896. BPF_EXIT_INSN(),
  9897. },
  9898. .errstr = "R1 offset is outside of the packet",
  9899. .result = REJECT,
  9900. .prog_type = BPF_PROG_TYPE_XDP,
  9901. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  9902. },
  9903. {
  9904. "XDP pkt read, pkt_end >= pkt_data', bad access 2",
  9905. .insns = {
  9906. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9907. offsetof(struct xdp_md, data)),
  9908. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9909. offsetof(struct xdp_md, data_end)),
  9910. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9911. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9912. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
  9913. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  9914. BPF_MOV64_IMM(BPF_REG_0, 0),
  9915. BPF_EXIT_INSN(),
  9916. },
  9917. .errstr = "R1 offset is outside of the packet",
  9918. .result = REJECT,
  9919. .prog_type = BPF_PROG_TYPE_XDP,
  9920. },
  9921. {
  9922. "XDP pkt read, pkt_data' <= pkt_end, good access",
  9923. .insns = {
  9924. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9925. offsetof(struct xdp_md, data)),
  9926. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9927. offsetof(struct xdp_md, data_end)),
  9928. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9929. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9930. BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
  9931. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  9932. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  9933. BPF_MOV64_IMM(BPF_REG_0, 0),
  9934. BPF_EXIT_INSN(),
  9935. },
  9936. .result = ACCEPT,
  9937. .prog_type = BPF_PROG_TYPE_XDP,
  9938. },
  9939. {
  9940. "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
  9941. .insns = {
  9942. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9943. offsetof(struct xdp_md, data)),
  9944. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9945. offsetof(struct xdp_md, data_end)),
  9946. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9947. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9948. BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
  9949. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  9950. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
  9951. BPF_MOV64_IMM(BPF_REG_0, 0),
  9952. BPF_EXIT_INSN(),
  9953. },
  9954. .errstr = "R1 offset is outside of the packet",
  9955. .result = REJECT,
  9956. .prog_type = BPF_PROG_TYPE_XDP,
  9957. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  9958. },
  9959. {
  9960. "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
  9961. .insns = {
  9962. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9963. offsetof(struct xdp_md, data)),
  9964. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9965. offsetof(struct xdp_md, data_end)),
  9966. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9967. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9968. BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
  9969. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  9970. BPF_MOV64_IMM(BPF_REG_0, 0),
  9971. BPF_EXIT_INSN(),
  9972. },
  9973. .errstr = "R1 offset is outside of the packet",
  9974. .result = REJECT,
  9975. .prog_type = BPF_PROG_TYPE_XDP,
  9976. },
  9977. {
  9978. "XDP pkt read, pkt_end <= pkt_data', good access",
  9979. .insns = {
  9980. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9981. offsetof(struct xdp_md, data)),
  9982. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  9983. offsetof(struct xdp_md, data_end)),
  9984. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  9985. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  9986. BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
  9987. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
  9988. BPF_MOV64_IMM(BPF_REG_0, 0),
  9989. BPF_EXIT_INSN(),
  9990. },
  9991. .result = ACCEPT,
  9992. .prog_type = BPF_PROG_TYPE_XDP,
  9993. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  9994. },
  9995. {
  9996. "XDP pkt read, pkt_end <= pkt_data', bad access 1",
  9997. .insns = {
  9998. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  9999. offsetof(struct xdp_md, data)),
  10000. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10001. offsetof(struct xdp_md, data_end)),
  10002. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10003. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10004. BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
  10005. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  10006. BPF_MOV64_IMM(BPF_REG_0, 0),
  10007. BPF_EXIT_INSN(),
  10008. },
  10009. .errstr = "R1 offset is outside of the packet",
  10010. .result = REJECT,
  10011. .prog_type = BPF_PROG_TYPE_XDP,
  10012. },
  10013. {
  10014. "XDP pkt read, pkt_end <= pkt_data', bad access 2",
  10015. .insns = {
  10016. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10017. offsetof(struct xdp_md, data)),
  10018. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10019. offsetof(struct xdp_md, data_end)),
  10020. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10021. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10022. BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
  10023. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
  10024. BPF_MOV64_IMM(BPF_REG_0, 0),
  10025. BPF_EXIT_INSN(),
  10026. },
  10027. .errstr = "R1 offset is outside of the packet",
  10028. .result = REJECT,
  10029. .prog_type = BPF_PROG_TYPE_XDP,
  10030. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  10031. },
  10032. {
  10033. "XDP pkt read, pkt_meta' > pkt_data, good access",
  10034. .insns = {
  10035. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10036. offsetof(struct xdp_md, data_meta)),
  10037. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10038. offsetof(struct xdp_md, data)),
  10039. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10040. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10041. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
  10042. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  10043. BPF_MOV64_IMM(BPF_REG_0, 0),
  10044. BPF_EXIT_INSN(),
  10045. },
  10046. .result = ACCEPT,
  10047. .prog_type = BPF_PROG_TYPE_XDP,
  10048. },
  10049. {
  10050. "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
  10051. .insns = {
  10052. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10053. offsetof(struct xdp_md, data_meta)),
  10054. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10055. offsetof(struct xdp_md, data)),
  10056. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10057. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10058. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
  10059. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
  10060. BPF_MOV64_IMM(BPF_REG_0, 0),
  10061. BPF_EXIT_INSN(),
  10062. },
  10063. .errstr = "R1 offset is outside of the packet",
  10064. .result = REJECT,
  10065. .prog_type = BPF_PROG_TYPE_XDP,
  10066. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  10067. },
  10068. {
  10069. "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
  10070. .insns = {
  10071. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10072. offsetof(struct xdp_md, data_meta)),
  10073. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10074. offsetof(struct xdp_md, data)),
  10075. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10076. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10077. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
  10078. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  10079. BPF_MOV64_IMM(BPF_REG_0, 0),
  10080. BPF_EXIT_INSN(),
  10081. },
  10082. .errstr = "R1 offset is outside of the packet",
  10083. .result = REJECT,
  10084. .prog_type = BPF_PROG_TYPE_XDP,
  10085. },
  10086. {
  10087. "XDP pkt read, pkt_data > pkt_meta', good access",
  10088. .insns = {
  10089. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10090. offsetof(struct xdp_md, data_meta)),
  10091. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10092. offsetof(struct xdp_md, data)),
  10093. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10094. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10095. BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
  10096. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  10097. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
  10098. BPF_MOV64_IMM(BPF_REG_0, 0),
  10099. BPF_EXIT_INSN(),
  10100. },
  10101. .result = ACCEPT,
  10102. .prog_type = BPF_PROG_TYPE_XDP,
  10103. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  10104. },
  10105. {
  10106. "XDP pkt read, pkt_data > pkt_meta', bad access 1",
  10107. .insns = {
  10108. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10109. offsetof(struct xdp_md, data_meta)),
  10110. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10111. offsetof(struct xdp_md, data)),
  10112. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10113. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10114. BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
  10115. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  10116. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  10117. BPF_MOV64_IMM(BPF_REG_0, 0),
  10118. BPF_EXIT_INSN(),
  10119. },
  10120. .errstr = "R1 offset is outside of the packet",
  10121. .result = REJECT,
  10122. .prog_type = BPF_PROG_TYPE_XDP,
  10123. },
  10124. {
  10125. "XDP pkt read, pkt_data > pkt_meta', bad access 2",
  10126. .insns = {
  10127. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10128. offsetof(struct xdp_md, data_meta)),
  10129. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10130. offsetof(struct xdp_md, data)),
  10131. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10132. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10133. BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
  10134. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  10135. BPF_MOV64_IMM(BPF_REG_0, 0),
  10136. BPF_EXIT_INSN(),
  10137. },
  10138. .errstr = "R1 offset is outside of the packet",
  10139. .result = REJECT,
  10140. .prog_type = BPF_PROG_TYPE_XDP,
  10141. },
  10142. {
  10143. "XDP pkt read, pkt_meta' < pkt_data, good access",
  10144. .insns = {
  10145. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10146. offsetof(struct xdp_md, data_meta)),
  10147. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10148. offsetof(struct xdp_md, data)),
  10149. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10150. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10151. BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
  10152. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  10153. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
  10154. BPF_MOV64_IMM(BPF_REG_0, 0),
  10155. BPF_EXIT_INSN(),
  10156. },
  10157. .result = ACCEPT,
  10158. .prog_type = BPF_PROG_TYPE_XDP,
  10159. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  10160. },
  10161. {
  10162. "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
  10163. .insns = {
  10164. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10165. offsetof(struct xdp_md, data_meta)),
  10166. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10167. offsetof(struct xdp_md, data)),
  10168. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10169. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10170. BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
  10171. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  10172. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  10173. BPF_MOV64_IMM(BPF_REG_0, 0),
  10174. BPF_EXIT_INSN(),
  10175. },
  10176. .errstr = "R1 offset is outside of the packet",
  10177. .result = REJECT,
  10178. .prog_type = BPF_PROG_TYPE_XDP,
  10179. },
  10180. {
  10181. "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
  10182. .insns = {
  10183. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10184. offsetof(struct xdp_md, data_meta)),
  10185. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10186. offsetof(struct xdp_md, data)),
  10187. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10188. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10189. BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
  10190. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  10191. BPF_MOV64_IMM(BPF_REG_0, 0),
  10192. BPF_EXIT_INSN(),
  10193. },
  10194. .errstr = "R1 offset is outside of the packet",
  10195. .result = REJECT,
  10196. .prog_type = BPF_PROG_TYPE_XDP,
  10197. },
  10198. {
  10199. "XDP pkt read, pkt_data < pkt_meta', good access",
  10200. .insns = {
  10201. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10202. offsetof(struct xdp_md, data_meta)),
  10203. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10204. offsetof(struct xdp_md, data)),
  10205. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10206. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10207. BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
  10208. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  10209. BPF_MOV64_IMM(BPF_REG_0, 0),
  10210. BPF_EXIT_INSN(),
  10211. },
  10212. .result = ACCEPT,
  10213. .prog_type = BPF_PROG_TYPE_XDP,
  10214. },
  10215. {
  10216. "XDP pkt read, pkt_data < pkt_meta', bad access 1",
  10217. .insns = {
  10218. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10219. offsetof(struct xdp_md, data_meta)),
  10220. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10221. offsetof(struct xdp_md, data)),
  10222. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10223. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10224. BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
  10225. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
  10226. BPF_MOV64_IMM(BPF_REG_0, 0),
  10227. BPF_EXIT_INSN(),
  10228. },
  10229. .errstr = "R1 offset is outside of the packet",
  10230. .result = REJECT,
  10231. .prog_type = BPF_PROG_TYPE_XDP,
  10232. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  10233. },
  10234. {
  10235. "XDP pkt read, pkt_data < pkt_meta', bad access 2",
  10236. .insns = {
  10237. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10238. offsetof(struct xdp_md, data_meta)),
  10239. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10240. offsetof(struct xdp_md, data)),
  10241. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10242. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10243. BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
  10244. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  10245. BPF_MOV64_IMM(BPF_REG_0, 0),
  10246. BPF_EXIT_INSN(),
  10247. },
  10248. .errstr = "R1 offset is outside of the packet",
  10249. .result = REJECT,
  10250. .prog_type = BPF_PROG_TYPE_XDP,
  10251. },
  10252. {
  10253. "XDP pkt read, pkt_meta' >= pkt_data, good access",
  10254. .insns = {
  10255. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10256. offsetof(struct xdp_md, data_meta)),
  10257. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10258. offsetof(struct xdp_md, data)),
  10259. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10260. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10261. BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
  10262. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
  10263. BPF_MOV64_IMM(BPF_REG_0, 0),
  10264. BPF_EXIT_INSN(),
  10265. },
  10266. .result = ACCEPT,
  10267. .prog_type = BPF_PROG_TYPE_XDP,
  10268. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  10269. },
  10270. {
  10271. "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
  10272. .insns = {
  10273. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10274. offsetof(struct xdp_md, data_meta)),
  10275. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10276. offsetof(struct xdp_md, data)),
  10277. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10278. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10279. BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
  10280. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  10281. BPF_MOV64_IMM(BPF_REG_0, 0),
  10282. BPF_EXIT_INSN(),
  10283. },
  10284. .errstr = "R1 offset is outside of the packet",
  10285. .result = REJECT,
  10286. .prog_type = BPF_PROG_TYPE_XDP,
  10287. },
  10288. {
  10289. "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
  10290. .insns = {
  10291. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10292. offsetof(struct xdp_md, data_meta)),
  10293. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10294. offsetof(struct xdp_md, data)),
  10295. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10296. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10297. BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
  10298. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
  10299. BPF_MOV64_IMM(BPF_REG_0, 0),
  10300. BPF_EXIT_INSN(),
  10301. },
  10302. .errstr = "R1 offset is outside of the packet",
  10303. .result = REJECT,
  10304. .prog_type = BPF_PROG_TYPE_XDP,
  10305. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  10306. },
  10307. {
  10308. "XDP pkt read, pkt_data >= pkt_meta', good access",
  10309. .insns = {
  10310. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10311. offsetof(struct xdp_md, data_meta)),
  10312. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10313. offsetof(struct xdp_md, data)),
  10314. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10315. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10316. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
  10317. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  10318. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  10319. BPF_MOV64_IMM(BPF_REG_0, 0),
  10320. BPF_EXIT_INSN(),
  10321. },
  10322. .result = ACCEPT,
  10323. .prog_type = BPF_PROG_TYPE_XDP,
  10324. },
  10325. {
  10326. "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
  10327. .insns = {
  10328. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10329. offsetof(struct xdp_md, data_meta)),
  10330. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10331. offsetof(struct xdp_md, data)),
  10332. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10333. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10334. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
  10335. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  10336. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
  10337. BPF_MOV64_IMM(BPF_REG_0, 0),
  10338. BPF_EXIT_INSN(),
  10339. },
  10340. .errstr = "R1 offset is outside of the packet",
  10341. .result = REJECT,
  10342. .prog_type = BPF_PROG_TYPE_XDP,
  10343. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  10344. },
  10345. {
  10346. "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
  10347. .insns = {
  10348. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10349. offsetof(struct xdp_md, data_meta)),
  10350. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10351. offsetof(struct xdp_md, data)),
  10352. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10353. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10354. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
  10355. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  10356. BPF_MOV64_IMM(BPF_REG_0, 0),
  10357. BPF_EXIT_INSN(),
  10358. },
  10359. .errstr = "R1 offset is outside of the packet",
  10360. .result = REJECT,
  10361. .prog_type = BPF_PROG_TYPE_XDP,
  10362. },
  10363. {
  10364. "XDP pkt read, pkt_meta' <= pkt_data, good access",
  10365. .insns = {
  10366. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10367. offsetof(struct xdp_md, data_meta)),
  10368. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10369. offsetof(struct xdp_md, data)),
  10370. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10371. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10372. BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
  10373. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  10374. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  10375. BPF_MOV64_IMM(BPF_REG_0, 0),
  10376. BPF_EXIT_INSN(),
  10377. },
  10378. .result = ACCEPT,
  10379. .prog_type = BPF_PROG_TYPE_XDP,
  10380. },
  10381. {
  10382. "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
  10383. .insns = {
  10384. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10385. offsetof(struct xdp_md, data_meta)),
  10386. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10387. offsetof(struct xdp_md, data)),
  10388. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10389. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10390. BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
  10391. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  10392. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
  10393. BPF_MOV64_IMM(BPF_REG_0, 0),
  10394. BPF_EXIT_INSN(),
  10395. },
  10396. .errstr = "R1 offset is outside of the packet",
  10397. .result = REJECT,
  10398. .prog_type = BPF_PROG_TYPE_XDP,
  10399. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  10400. },
  10401. {
  10402. "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
  10403. .insns = {
  10404. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10405. offsetof(struct xdp_md, data_meta)),
  10406. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10407. offsetof(struct xdp_md, data)),
  10408. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10409. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10410. BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
  10411. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  10412. BPF_MOV64_IMM(BPF_REG_0, 0),
  10413. BPF_EXIT_INSN(),
  10414. },
  10415. .errstr = "R1 offset is outside of the packet",
  10416. .result = REJECT,
  10417. .prog_type = BPF_PROG_TYPE_XDP,
  10418. },
  10419. {
  10420. "XDP pkt read, pkt_data <= pkt_meta', good access",
  10421. .insns = {
  10422. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10423. offsetof(struct xdp_md, data_meta)),
  10424. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10425. offsetof(struct xdp_md, data)),
  10426. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10427. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10428. BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
  10429. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
  10430. BPF_MOV64_IMM(BPF_REG_0, 0),
  10431. BPF_EXIT_INSN(),
  10432. },
  10433. .result = ACCEPT,
  10434. .prog_type = BPF_PROG_TYPE_XDP,
  10435. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  10436. },
  10437. {
  10438. "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
  10439. .insns = {
  10440. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10441. offsetof(struct xdp_md, data_meta)),
  10442. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10443. offsetof(struct xdp_md, data)),
  10444. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10445. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10446. BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
  10447. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
  10448. BPF_MOV64_IMM(BPF_REG_0, 0),
  10449. BPF_EXIT_INSN(),
  10450. },
  10451. .errstr = "R1 offset is outside of the packet",
  10452. .result = REJECT,
  10453. .prog_type = BPF_PROG_TYPE_XDP,
  10454. },
  10455. {
  10456. "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
  10457. .insns = {
  10458. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  10459. offsetof(struct xdp_md, data_meta)),
  10460. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  10461. offsetof(struct xdp_md, data)),
  10462. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  10463. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  10464. BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
  10465. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
  10466. BPF_MOV64_IMM(BPF_REG_0, 0),
  10467. BPF_EXIT_INSN(),
  10468. },
  10469. .errstr = "R1 offset is outside of the packet",
  10470. .result = REJECT,
  10471. .prog_type = BPF_PROG_TYPE_XDP,
  10472. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  10473. },
  10474. {
  10475. "check deducing bounds from const, 1",
  10476. .insns = {
  10477. BPF_MOV64_IMM(BPF_REG_0, 1),
  10478. BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
  10479. BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
  10480. BPF_EXIT_INSN(),
  10481. },
  10482. .result = REJECT,
  10483. .errstr = "R0 tried to subtract pointer from scalar",
  10484. },
  10485. {
  10486. "check deducing bounds from const, 2",
  10487. .insns = {
  10488. BPF_MOV64_IMM(BPF_REG_0, 1),
  10489. BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
  10490. BPF_EXIT_INSN(),
  10491. BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
  10492. BPF_EXIT_INSN(),
  10493. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
  10494. BPF_EXIT_INSN(),
  10495. },
  10496. .result = ACCEPT,
  10497. .retval = 1,
  10498. },
  10499. {
  10500. "check deducing bounds from const, 3",
  10501. .insns = {
  10502. BPF_MOV64_IMM(BPF_REG_0, 0),
  10503. BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
  10504. BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
  10505. BPF_EXIT_INSN(),
  10506. },
  10507. .result = REJECT,
  10508. .errstr = "R0 tried to subtract pointer from scalar",
  10509. },
  10510. {
  10511. "check deducing bounds from const, 4",
  10512. .insns = {
  10513. BPF_MOV64_IMM(BPF_REG_0, 0),
  10514. BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
  10515. BPF_EXIT_INSN(),
  10516. BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
  10517. BPF_EXIT_INSN(),
  10518. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
  10519. BPF_EXIT_INSN(),
  10520. },
  10521. .result = ACCEPT,
  10522. },
  10523. {
  10524. "check deducing bounds from const, 5",
  10525. .insns = {
  10526. BPF_MOV64_IMM(BPF_REG_0, 0),
  10527. BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
  10528. BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
  10529. BPF_EXIT_INSN(),
  10530. },
  10531. .result = REJECT,
  10532. .errstr = "R0 tried to subtract pointer from scalar",
  10533. },
  10534. {
  10535. "check deducing bounds from const, 6",
  10536. .insns = {
  10537. BPF_MOV64_IMM(BPF_REG_0, 0),
  10538. BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
  10539. BPF_EXIT_INSN(),
  10540. BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
  10541. BPF_EXIT_INSN(),
  10542. },
  10543. .result = REJECT,
  10544. .errstr = "R0 tried to subtract pointer from scalar",
  10545. },
  10546. {
  10547. "check deducing bounds from const, 7",
  10548. .insns = {
  10549. BPF_MOV64_IMM(BPF_REG_0, ~0),
  10550. BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
  10551. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
  10552. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  10553. offsetof(struct __sk_buff, mark)),
  10554. BPF_EXIT_INSN(),
  10555. },
  10556. .result = REJECT,
  10557. .errstr = "dereference of modified ctx ptr",
  10558. },
  10559. {
  10560. "check deducing bounds from const, 8",
  10561. .insns = {
  10562. BPF_MOV64_IMM(BPF_REG_0, ~0),
  10563. BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
  10564. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
  10565. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  10566. offsetof(struct __sk_buff, mark)),
  10567. BPF_EXIT_INSN(),
  10568. },
  10569. .result = REJECT,
  10570. .errstr = "dereference of modified ctx ptr",
  10571. },
  10572. {
  10573. "check deducing bounds from const, 9",
  10574. .insns = {
  10575. BPF_MOV64_IMM(BPF_REG_0, 0),
  10576. BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
  10577. BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
  10578. BPF_EXIT_INSN(),
  10579. },
  10580. .result = REJECT,
  10581. .errstr = "R0 tried to subtract pointer from scalar",
  10582. },
  10583. {
  10584. "check deducing bounds from const, 10",
  10585. .insns = {
  10586. BPF_MOV64_IMM(BPF_REG_0, 0),
  10587. BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
  10588. /* Marks reg as unknown. */
  10589. BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
  10590. BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
  10591. BPF_EXIT_INSN(),
  10592. },
  10593. .result = REJECT,
  10594. .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
  10595. },
  10596. {
  10597. "bpf_exit with invalid return code. test1",
  10598. .insns = {
  10599. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
  10600. BPF_EXIT_INSN(),
  10601. },
  10602. .errstr = "R0 has value (0x0; 0xffffffff)",
  10603. .result = REJECT,
  10604. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  10605. },
  10606. {
  10607. "bpf_exit with invalid return code. test2",
  10608. .insns = {
  10609. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
  10610. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
  10611. BPF_EXIT_INSN(),
  10612. },
  10613. .result = ACCEPT,
  10614. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  10615. },
  10616. {
  10617. "bpf_exit with invalid return code. test3",
  10618. .insns = {
  10619. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
  10620. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
  10621. BPF_EXIT_INSN(),
  10622. },
  10623. .errstr = "R0 has value (0x0; 0x3)",
  10624. .result = REJECT,
  10625. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  10626. },
  10627. {
  10628. "bpf_exit with invalid return code. test4",
  10629. .insns = {
  10630. BPF_MOV64_IMM(BPF_REG_0, 1),
  10631. BPF_EXIT_INSN(),
  10632. },
  10633. .result = ACCEPT,
  10634. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  10635. },
  10636. {
  10637. "bpf_exit with invalid return code. test5",
  10638. .insns = {
  10639. BPF_MOV64_IMM(BPF_REG_0, 2),
  10640. BPF_EXIT_INSN(),
  10641. },
  10642. .errstr = "R0 has value (0x2; 0x0)",
  10643. .result = REJECT,
  10644. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  10645. },
  10646. {
  10647. "bpf_exit with invalid return code. test6",
  10648. .insns = {
  10649. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  10650. BPF_EXIT_INSN(),
  10651. },
  10652. .errstr = "R0 is not a known value (ctx)",
  10653. .result = REJECT,
  10654. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  10655. },
  10656. {
  10657. "bpf_exit with invalid return code. test7",
  10658. .insns = {
  10659. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
  10660. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
  10661. BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
  10662. BPF_EXIT_INSN(),
  10663. },
  10664. .errstr = "R0 has unknown scalar value",
  10665. .result = REJECT,
  10666. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  10667. },
  10668. {
  10669. "calls: basic sanity",
  10670. .insns = {
  10671. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  10672. BPF_MOV64_IMM(BPF_REG_0, 1),
  10673. BPF_EXIT_INSN(),
  10674. BPF_MOV64_IMM(BPF_REG_0, 2),
  10675. BPF_EXIT_INSN(),
  10676. },
  10677. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  10678. .result = ACCEPT,
  10679. },
  10680. {
  10681. "calls: not on unpriviledged",
  10682. .insns = {
  10683. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  10684. BPF_MOV64_IMM(BPF_REG_0, 1),
  10685. BPF_EXIT_INSN(),
  10686. BPF_MOV64_IMM(BPF_REG_0, 2),
  10687. BPF_EXIT_INSN(),
  10688. },
  10689. .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
  10690. .result_unpriv = REJECT,
  10691. .result = ACCEPT,
  10692. .retval = 1,
  10693. },
  10694. {
  10695. "calls: div by 0 in subprog",
  10696. .insns = {
  10697. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  10698. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
  10699. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  10700. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  10701. offsetof(struct __sk_buff, data_end)),
  10702. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  10703. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
  10704. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  10705. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  10706. BPF_MOV64_IMM(BPF_REG_0, 1),
  10707. BPF_EXIT_INSN(),
  10708. BPF_MOV32_IMM(BPF_REG_2, 0),
  10709. BPF_MOV32_IMM(BPF_REG_3, 1),
  10710. BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
  10711. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  10712. offsetof(struct __sk_buff, data)),
  10713. BPF_EXIT_INSN(),
  10714. },
  10715. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  10716. .result = ACCEPT,
  10717. .retval = 1,
  10718. },
  10719. {
  10720. "calls: multiple ret types in subprog 1",
  10721. .insns = {
  10722. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  10723. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
  10724. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  10725. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  10726. offsetof(struct __sk_buff, data_end)),
  10727. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  10728. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
  10729. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  10730. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  10731. BPF_MOV64_IMM(BPF_REG_0, 1),
  10732. BPF_EXIT_INSN(),
  10733. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  10734. offsetof(struct __sk_buff, data)),
  10735. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  10736. BPF_MOV32_IMM(BPF_REG_0, 42),
  10737. BPF_EXIT_INSN(),
  10738. },
  10739. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  10740. .result = REJECT,
  10741. .errstr = "R0 invalid mem access 'inv'",
  10742. },
  10743. {
  10744. "calls: multiple ret types in subprog 2",
  10745. .insns = {
  10746. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  10747. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
  10748. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  10749. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  10750. offsetof(struct __sk_buff, data_end)),
  10751. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  10752. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
  10753. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  10754. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  10755. BPF_MOV64_IMM(BPF_REG_0, 1),
  10756. BPF_EXIT_INSN(),
  10757. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  10758. offsetof(struct __sk_buff, data)),
  10759. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  10760. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
  10761. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  10762. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  10763. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  10764. BPF_LD_MAP_FD(BPF_REG_1, 0),
  10765. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  10766. BPF_FUNC_map_lookup_elem),
  10767. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  10768. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
  10769. offsetof(struct __sk_buff, data)),
  10770. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
  10771. BPF_EXIT_INSN(),
  10772. },
  10773. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  10774. .fixup_map_hash_8b = { 16 },
  10775. .result = REJECT,
  10776. .errstr = "R0 min value is outside of the array range",
  10777. },
  10778. {
  10779. "calls: overlapping caller/callee",
  10780. .insns = {
  10781. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
  10782. BPF_MOV64_IMM(BPF_REG_0, 1),
  10783. BPF_EXIT_INSN(),
  10784. },
  10785. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  10786. .errstr = "last insn is not an exit or jmp",
  10787. .result = REJECT,
  10788. },
  10789. {
  10790. "calls: wrong recursive calls",
  10791. .insns = {
  10792. BPF_JMP_IMM(BPF_JA, 0, 0, 4),
  10793. BPF_JMP_IMM(BPF_JA, 0, 0, 4),
  10794. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
  10795. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
  10796. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
  10797. BPF_MOV64_IMM(BPF_REG_0, 1),
  10798. BPF_EXIT_INSN(),
  10799. },
  10800. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  10801. .errstr = "jump out of range",
  10802. .result = REJECT,
  10803. },
  10804. {
  10805. "calls: wrong src reg",
  10806. .insns = {
  10807. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
  10808. BPF_MOV64_IMM(BPF_REG_0, 1),
  10809. BPF_EXIT_INSN(),
  10810. },
  10811. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  10812. .errstr = "BPF_CALL uses reserved fields",
  10813. .result = REJECT,
  10814. },
  10815. {
  10816. "calls: wrong off value",
  10817. .insns = {
  10818. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
  10819. BPF_MOV64_IMM(BPF_REG_0, 1),
  10820. BPF_EXIT_INSN(),
  10821. BPF_MOV64_IMM(BPF_REG_0, 2),
  10822. BPF_EXIT_INSN(),
  10823. },
  10824. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  10825. .errstr = "BPF_CALL uses reserved fields",
  10826. .result = REJECT,
  10827. },
  10828. {
  10829. "calls: jump back loop",
  10830. .insns = {
  10831. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
  10832. BPF_MOV64_IMM(BPF_REG_0, 1),
  10833. BPF_EXIT_INSN(),
  10834. },
  10835. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  10836. .errstr = "back-edge from insn 0 to 0",
  10837. .result = REJECT,
  10838. },
  10839. {
  10840. "calls: conditional call",
  10841. .insns = {
  10842. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  10843. offsetof(struct __sk_buff, mark)),
  10844. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  10845. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  10846. BPF_MOV64_IMM(BPF_REG_0, 1),
  10847. BPF_EXIT_INSN(),
  10848. BPF_MOV64_IMM(BPF_REG_0, 2),
  10849. BPF_EXIT_INSN(),
  10850. },
  10851. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  10852. .errstr = "jump out of range",
  10853. .result = REJECT,
  10854. },
  10855. {
  10856. "calls: conditional call 2",
  10857. .insns = {
  10858. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  10859. offsetof(struct __sk_buff, mark)),
  10860. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  10861. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  10862. BPF_MOV64_IMM(BPF_REG_0, 1),
  10863. BPF_EXIT_INSN(),
  10864. BPF_MOV64_IMM(BPF_REG_0, 2),
  10865. BPF_EXIT_INSN(),
  10866. BPF_MOV64_IMM(BPF_REG_0, 3),
  10867. BPF_EXIT_INSN(),
  10868. },
  10869. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  10870. .result = ACCEPT,
  10871. },
  10872. {
  10873. "calls: conditional call 3",
  10874. .insns = {
  10875. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  10876. offsetof(struct __sk_buff, mark)),
  10877. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  10878. BPF_JMP_IMM(BPF_JA, 0, 0, 4),
  10879. BPF_MOV64_IMM(BPF_REG_0, 1),
  10880. BPF_EXIT_INSN(),
  10881. BPF_MOV64_IMM(BPF_REG_0, 1),
  10882. BPF_JMP_IMM(BPF_JA, 0, 0, -6),
  10883. BPF_MOV64_IMM(BPF_REG_0, 3),
  10884. BPF_JMP_IMM(BPF_JA, 0, 0, -6),
  10885. },
  10886. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  10887. .errstr = "back-edge from insn",
  10888. .result = REJECT,
  10889. },
  10890. {
  10891. "calls: conditional call 4",
  10892. .insns = {
  10893. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  10894. offsetof(struct __sk_buff, mark)),
  10895. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  10896. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  10897. BPF_MOV64_IMM(BPF_REG_0, 1),
  10898. BPF_EXIT_INSN(),
  10899. BPF_MOV64_IMM(BPF_REG_0, 1),
  10900. BPF_JMP_IMM(BPF_JA, 0, 0, -5),
  10901. BPF_MOV64_IMM(BPF_REG_0, 3),
  10902. BPF_EXIT_INSN(),
  10903. },
  10904. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  10905. .result = ACCEPT,
  10906. },
  10907. {
  10908. "calls: conditional call 5",
  10909. .insns = {
  10910. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  10911. offsetof(struct __sk_buff, mark)),
  10912. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  10913. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  10914. BPF_MOV64_IMM(BPF_REG_0, 1),
  10915. BPF_EXIT_INSN(),
  10916. BPF_MOV64_IMM(BPF_REG_0, 1),
  10917. BPF_JMP_IMM(BPF_JA, 0, 0, -6),
  10918. BPF_MOV64_IMM(BPF_REG_0, 3),
  10919. BPF_EXIT_INSN(),
  10920. },
  10921. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  10922. .errstr = "back-edge from insn",
  10923. .result = REJECT,
  10924. },
  10925. {
  10926. "calls: conditional call 6",
  10927. .insns = {
  10928. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  10929. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
  10930. BPF_EXIT_INSN(),
  10931. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  10932. offsetof(struct __sk_buff, mark)),
  10933. BPF_EXIT_INSN(),
  10934. },
  10935. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  10936. .errstr = "back-edge from insn",
  10937. .result = REJECT,
  10938. },
  10939. {
  10940. "calls: using r0 returned by callee",
  10941. .insns = {
  10942. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  10943. BPF_EXIT_INSN(),
  10944. BPF_MOV64_IMM(BPF_REG_0, 2),
  10945. BPF_EXIT_INSN(),
  10946. },
  10947. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  10948. .result = ACCEPT,
  10949. },
  10950. {
  10951. "calls: using uninit r0 from callee",
  10952. .insns = {
  10953. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  10954. BPF_EXIT_INSN(),
  10955. BPF_EXIT_INSN(),
  10956. },
  10957. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  10958. .errstr = "!read_ok",
  10959. .result = REJECT,
  10960. },
  10961. {
  10962. "calls: callee is using r1",
  10963. .insns = {
  10964. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  10965. BPF_EXIT_INSN(),
  10966. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  10967. offsetof(struct __sk_buff, len)),
  10968. BPF_EXIT_INSN(),
  10969. },
  10970. .prog_type = BPF_PROG_TYPE_SCHED_ACT,
  10971. .result = ACCEPT,
  10972. .retval = TEST_DATA_LEN,
  10973. },
  10974. {
  10975. "calls: callee using args1",
  10976. .insns = {
  10977. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  10978. BPF_EXIT_INSN(),
  10979. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  10980. BPF_EXIT_INSN(),
  10981. },
  10982. .errstr_unpriv = "allowed for root only",
  10983. .result_unpriv = REJECT,
  10984. .result = ACCEPT,
  10985. .retval = POINTER_VALUE,
  10986. },
  10987. {
  10988. "calls: callee using wrong args2",
  10989. .insns = {
  10990. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  10991. BPF_EXIT_INSN(),
  10992. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  10993. BPF_EXIT_INSN(),
  10994. },
  10995. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  10996. .errstr = "R2 !read_ok",
  10997. .result = REJECT,
  10998. },
  10999. {
  11000. "calls: callee using two args",
  11001. .insns = {
  11002. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11003. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
  11004. offsetof(struct __sk_buff, len)),
  11005. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
  11006. offsetof(struct __sk_buff, len)),
  11007. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11008. BPF_EXIT_INSN(),
  11009. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  11010. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  11011. BPF_EXIT_INSN(),
  11012. },
  11013. .errstr_unpriv = "allowed for root only",
  11014. .result_unpriv = REJECT,
  11015. .result = ACCEPT,
  11016. .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
  11017. },
  11018. {
  11019. "calls: callee changing pkt pointers",
  11020. .insns = {
  11021. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  11022. offsetof(struct xdp_md, data)),
  11023. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  11024. offsetof(struct xdp_md, data_end)),
  11025. BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
  11026. BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
  11027. BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
  11028. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  11029. /* clear_all_pkt_pointers() has to walk all frames
  11030. * to make sure that pkt pointers in the caller
  11031. * are cleared when callee is calling a helper that
  11032. * adjusts packet size
  11033. */
  11034. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  11035. BPF_MOV32_IMM(BPF_REG_0, 0),
  11036. BPF_EXIT_INSN(),
  11037. BPF_MOV64_IMM(BPF_REG_2, 0),
  11038. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  11039. BPF_FUNC_xdp_adjust_head),
  11040. BPF_EXIT_INSN(),
  11041. },
  11042. .result = REJECT,
  11043. .errstr = "R6 invalid mem access 'inv'",
  11044. .prog_type = BPF_PROG_TYPE_XDP,
  11045. },
  11046. {
  11047. "calls: two calls with args",
  11048. .insns = {
  11049. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11050. BPF_EXIT_INSN(),
  11051. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11052. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
  11053. BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
  11054. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  11055. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  11056. BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
  11057. BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
  11058. BPF_EXIT_INSN(),
  11059. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  11060. offsetof(struct __sk_buff, len)),
  11061. BPF_EXIT_INSN(),
  11062. },
  11063. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  11064. .result = ACCEPT,
  11065. .retval = TEST_DATA_LEN + TEST_DATA_LEN,
  11066. },
  11067. {
  11068. "calls: calls with stack arith",
  11069. .insns = {
  11070. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11071. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
  11072. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11073. BPF_EXIT_INSN(),
  11074. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
  11075. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11076. BPF_EXIT_INSN(),
  11077. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
  11078. BPF_MOV64_IMM(BPF_REG_0, 42),
  11079. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
  11080. BPF_EXIT_INSN(),
  11081. },
  11082. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  11083. .result = ACCEPT,
  11084. .retval = 42,
  11085. },
  11086. {
  11087. "calls: calls with misaligned stack access",
  11088. .insns = {
  11089. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11090. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
  11091. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11092. BPF_EXIT_INSN(),
  11093. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
  11094. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11095. BPF_EXIT_INSN(),
  11096. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
  11097. BPF_MOV64_IMM(BPF_REG_0, 42),
  11098. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
  11099. BPF_EXIT_INSN(),
  11100. },
  11101. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  11102. .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
  11103. .errstr = "misaligned stack access",
  11104. .result = REJECT,
  11105. },
  11106. {
  11107. "calls: calls control flow, jump test",
  11108. .insns = {
  11109. BPF_MOV64_IMM(BPF_REG_0, 42),
  11110. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  11111. BPF_MOV64_IMM(BPF_REG_0, 43),
  11112. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  11113. BPF_JMP_IMM(BPF_JA, 0, 0, -3),
  11114. BPF_EXIT_INSN(),
  11115. },
  11116. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  11117. .result = ACCEPT,
  11118. .retval = 43,
  11119. },
  11120. {
  11121. "calls: calls control flow, jump test 2",
  11122. .insns = {
  11123. BPF_MOV64_IMM(BPF_REG_0, 42),
  11124. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  11125. BPF_MOV64_IMM(BPF_REG_0, 43),
  11126. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  11127. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
  11128. BPF_EXIT_INSN(),
  11129. },
  11130. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  11131. .errstr = "jump out of range from insn 1 to 4",
  11132. .result = REJECT,
  11133. },
  11134. {
  11135. "calls: two calls with bad jump",
  11136. .insns = {
  11137. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11138. BPF_EXIT_INSN(),
  11139. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11140. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
  11141. BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
  11142. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  11143. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  11144. BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
  11145. BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
  11146. BPF_EXIT_INSN(),
  11147. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  11148. offsetof(struct __sk_buff, len)),
  11149. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
  11150. BPF_EXIT_INSN(),
  11151. },
  11152. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  11153. .errstr = "jump out of range from insn 11 to 9",
  11154. .result = REJECT,
  11155. },
  11156. {
  11157. "calls: recursive call. test1",
  11158. .insns = {
  11159. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11160. BPF_EXIT_INSN(),
  11161. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
  11162. BPF_EXIT_INSN(),
  11163. },
  11164. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  11165. .errstr = "back-edge",
  11166. .result = REJECT,
  11167. },
  11168. {
  11169. "calls: recursive call. test2",
  11170. .insns = {
  11171. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11172. BPF_EXIT_INSN(),
  11173. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
  11174. BPF_EXIT_INSN(),
  11175. },
  11176. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  11177. .errstr = "back-edge",
  11178. .result = REJECT,
  11179. },
  11180. {
  11181. "calls: unreachable code",
  11182. .insns = {
  11183. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11184. BPF_EXIT_INSN(),
  11185. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11186. BPF_EXIT_INSN(),
  11187. BPF_MOV64_IMM(BPF_REG_0, 0),
  11188. BPF_EXIT_INSN(),
  11189. BPF_MOV64_IMM(BPF_REG_0, 0),
  11190. BPF_EXIT_INSN(),
  11191. },
  11192. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  11193. .errstr = "unreachable insn 6",
  11194. .result = REJECT,
  11195. },
  11196. {
  11197. "calls: invalid call",
  11198. .insns = {
  11199. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11200. BPF_EXIT_INSN(),
  11201. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
  11202. BPF_EXIT_INSN(),
  11203. },
  11204. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  11205. .errstr = "invalid destination",
  11206. .result = REJECT,
  11207. },
  11208. {
  11209. "calls: invalid call 2",
  11210. .insns = {
  11211. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11212. BPF_EXIT_INSN(),
  11213. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
  11214. BPF_EXIT_INSN(),
  11215. },
  11216. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  11217. .errstr = "invalid destination",
  11218. .result = REJECT,
  11219. },
  11220. {
  11221. "calls: jumping across function bodies. test1",
  11222. .insns = {
  11223. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  11224. BPF_MOV64_IMM(BPF_REG_0, 0),
  11225. BPF_EXIT_INSN(),
  11226. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
  11227. BPF_EXIT_INSN(),
  11228. },
  11229. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  11230. .errstr = "jump out of range",
  11231. .result = REJECT,
  11232. },
  11233. {
  11234. "calls: jumping across function bodies. test2",
  11235. .insns = {
  11236. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  11237. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  11238. BPF_MOV64_IMM(BPF_REG_0, 0),
  11239. BPF_EXIT_INSN(),
  11240. BPF_EXIT_INSN(),
  11241. },
  11242. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  11243. .errstr = "jump out of range",
  11244. .result = REJECT,
  11245. },
  11246. {
  11247. "calls: call without exit",
  11248. .insns = {
  11249. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11250. BPF_EXIT_INSN(),
  11251. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11252. BPF_EXIT_INSN(),
  11253. BPF_MOV64_IMM(BPF_REG_0, 0),
  11254. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
  11255. },
  11256. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  11257. .errstr = "not an exit",
  11258. .result = REJECT,
  11259. },
  11260. {
  11261. "calls: call into middle of ld_imm64",
  11262. .insns = {
  11263. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  11264. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  11265. BPF_MOV64_IMM(BPF_REG_0, 0),
  11266. BPF_EXIT_INSN(),
  11267. BPF_LD_IMM64(BPF_REG_0, 0),
  11268. BPF_EXIT_INSN(),
  11269. },
  11270. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  11271. .errstr = "last insn",
  11272. .result = REJECT,
  11273. },
  11274. {
  11275. "calls: call into middle of other call",
  11276. .insns = {
  11277. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  11278. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  11279. BPF_MOV64_IMM(BPF_REG_0, 0),
  11280. BPF_EXIT_INSN(),
  11281. BPF_MOV64_IMM(BPF_REG_0, 0),
  11282. BPF_MOV64_IMM(BPF_REG_0, 0),
  11283. BPF_EXIT_INSN(),
  11284. },
  11285. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  11286. .errstr = "last insn",
  11287. .result = REJECT,
  11288. },
  11289. {
  11290. "calls: ld_abs with changing ctx data in callee",
  11291. .insns = {
  11292. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11293. BPF_LD_ABS(BPF_B, 0),
  11294. BPF_LD_ABS(BPF_H, 0),
  11295. BPF_LD_ABS(BPF_W, 0),
  11296. BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
  11297. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
  11298. BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
  11299. BPF_LD_ABS(BPF_B, 0),
  11300. BPF_LD_ABS(BPF_H, 0),
  11301. BPF_LD_ABS(BPF_W, 0),
  11302. BPF_EXIT_INSN(),
  11303. BPF_MOV64_IMM(BPF_REG_2, 1),
  11304. BPF_MOV64_IMM(BPF_REG_3, 2),
  11305. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  11306. BPF_FUNC_skb_vlan_push),
  11307. BPF_EXIT_INSN(),
  11308. },
  11309. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  11310. .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
  11311. .result = REJECT,
  11312. },
  11313. {
  11314. "calls: two calls with bad fallthrough",
  11315. .insns = {
  11316. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11317. BPF_EXIT_INSN(),
  11318. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11319. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
  11320. BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
  11321. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  11322. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  11323. BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
  11324. BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
  11325. BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
  11326. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  11327. offsetof(struct __sk_buff, len)),
  11328. BPF_EXIT_INSN(),
  11329. },
  11330. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  11331. .errstr = "not an exit",
  11332. .result = REJECT,
  11333. },
  11334. {
  11335. "calls: two calls with stack read",
  11336. .insns = {
  11337. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  11338. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  11339. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  11340. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11341. BPF_EXIT_INSN(),
  11342. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11343. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
  11344. BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
  11345. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  11346. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  11347. BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
  11348. BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
  11349. BPF_EXIT_INSN(),
  11350. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
  11351. BPF_EXIT_INSN(),
  11352. },
  11353. .prog_type = BPF_PROG_TYPE_XDP,
  11354. .result = ACCEPT,
  11355. },
  11356. {
  11357. "calls: two calls with stack write",
  11358. .insns = {
  11359. /* main prog */
  11360. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  11361. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  11362. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  11363. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11364. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  11365. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  11366. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
  11367. BPF_EXIT_INSN(),
  11368. /* subprog 1 */
  11369. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11370. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  11371. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
  11372. BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
  11373. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  11374. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  11375. BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
  11376. BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
  11377. /* write into stack frame of main prog */
  11378. BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  11379. BPF_EXIT_INSN(),
  11380. /* subprog 2 */
  11381. /* read from stack frame of main prog */
  11382. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
  11383. BPF_EXIT_INSN(),
  11384. },
  11385. .prog_type = BPF_PROG_TYPE_XDP,
  11386. .result = ACCEPT,
  11387. },
  11388. {
  11389. "calls: stack overflow using two frames (pre-call access)",
  11390. .insns = {
  11391. /* prog 1 */
  11392. BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
  11393. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
  11394. BPF_EXIT_INSN(),
  11395. /* prog 2 */
  11396. BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
  11397. BPF_MOV64_IMM(BPF_REG_0, 0),
  11398. BPF_EXIT_INSN(),
  11399. },
  11400. .prog_type = BPF_PROG_TYPE_XDP,
  11401. .errstr = "combined stack size",
  11402. .result = REJECT,
  11403. },
  11404. {
  11405. "calls: stack overflow using two frames (post-call access)",
  11406. .insns = {
  11407. /* prog 1 */
  11408. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
  11409. BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
  11410. BPF_EXIT_INSN(),
  11411. /* prog 2 */
  11412. BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
  11413. BPF_MOV64_IMM(BPF_REG_0, 0),
  11414. BPF_EXIT_INSN(),
  11415. },
  11416. .prog_type = BPF_PROG_TYPE_XDP,
  11417. .errstr = "combined stack size",
  11418. .result = REJECT,
  11419. },
  11420. {
  11421. "calls: stack depth check using three frames. test1",
  11422. .insns = {
  11423. /* main */
  11424. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
  11425. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
  11426. BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
  11427. BPF_MOV64_IMM(BPF_REG_0, 0),
  11428. BPF_EXIT_INSN(),
  11429. /* A */
  11430. BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
  11431. BPF_EXIT_INSN(),
  11432. /* B */
  11433. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
  11434. BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
  11435. BPF_EXIT_INSN(),
  11436. },
  11437. .prog_type = BPF_PROG_TYPE_XDP,
  11438. /* stack_main=32, stack_A=256, stack_B=64
  11439. * and max(main+A, main+A+B) < 512
  11440. */
  11441. .result = ACCEPT,
  11442. },
  11443. {
  11444. "calls: stack depth check using three frames. test2",
  11445. .insns = {
  11446. /* main */
  11447. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
  11448. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
  11449. BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
  11450. BPF_MOV64_IMM(BPF_REG_0, 0),
  11451. BPF_EXIT_INSN(),
  11452. /* A */
  11453. BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
  11454. BPF_EXIT_INSN(),
  11455. /* B */
  11456. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
  11457. BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
  11458. BPF_EXIT_INSN(),
  11459. },
  11460. .prog_type = BPF_PROG_TYPE_XDP,
  11461. /* stack_main=32, stack_A=64, stack_B=256
  11462. * and max(main+A, main+A+B) < 512
  11463. */
  11464. .result = ACCEPT,
  11465. },
  11466. {
  11467. "calls: stack depth check using three frames. test3",
  11468. .insns = {
  11469. /* main */
  11470. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11471. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
  11472. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  11473. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
  11474. BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
  11475. BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
  11476. BPF_MOV64_IMM(BPF_REG_0, 0),
  11477. BPF_EXIT_INSN(),
  11478. /* A */
  11479. BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
  11480. BPF_EXIT_INSN(),
  11481. BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
  11482. BPF_JMP_IMM(BPF_JA, 0, 0, -3),
  11483. /* B */
  11484. BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
  11485. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
  11486. BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
  11487. BPF_EXIT_INSN(),
  11488. },
  11489. .prog_type = BPF_PROG_TYPE_XDP,
  11490. /* stack_main=64, stack_A=224, stack_B=256
  11491. * and max(main+A, main+A+B) > 512
  11492. */
  11493. .errstr = "combined stack",
  11494. .result = REJECT,
  11495. },
  11496. {
  11497. "calls: stack depth check using three frames. test4",
  11498. /* void main(void) {
  11499. * func1(0);
  11500. * func1(1);
  11501. * func2(1);
  11502. * }
  11503. * void func1(int alloc_or_recurse) {
  11504. * if (alloc_or_recurse) {
  11505. * frame_pointer[-300] = 1;
  11506. * } else {
  11507. * func2(alloc_or_recurse);
  11508. * }
  11509. * }
  11510. * void func2(int alloc_or_recurse) {
  11511. * if (alloc_or_recurse) {
  11512. * frame_pointer[-300] = 1;
  11513. * }
  11514. * }
  11515. */
  11516. .insns = {
  11517. /* main */
  11518. BPF_MOV64_IMM(BPF_REG_1, 0),
  11519. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
  11520. BPF_MOV64_IMM(BPF_REG_1, 1),
  11521. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
  11522. BPF_MOV64_IMM(BPF_REG_1, 1),
  11523. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
  11524. BPF_MOV64_IMM(BPF_REG_0, 0),
  11525. BPF_EXIT_INSN(),
  11526. /* A */
  11527. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
  11528. BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
  11529. BPF_EXIT_INSN(),
  11530. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
  11531. BPF_EXIT_INSN(),
  11532. /* B */
  11533. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  11534. BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
  11535. BPF_EXIT_INSN(),
  11536. },
  11537. .prog_type = BPF_PROG_TYPE_XDP,
  11538. .result = REJECT,
  11539. .errstr = "combined stack",
  11540. },
  11541. {
  11542. "calls: stack depth check using three frames. test5",
  11543. .insns = {
  11544. /* main */
  11545. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
  11546. BPF_EXIT_INSN(),
  11547. /* A */
  11548. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
  11549. BPF_EXIT_INSN(),
  11550. /* B */
  11551. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
  11552. BPF_EXIT_INSN(),
  11553. /* C */
  11554. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
  11555. BPF_EXIT_INSN(),
  11556. /* D */
  11557. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
  11558. BPF_EXIT_INSN(),
  11559. /* E */
  11560. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
  11561. BPF_EXIT_INSN(),
  11562. /* F */
  11563. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
  11564. BPF_EXIT_INSN(),
  11565. /* G */
  11566. BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
  11567. BPF_EXIT_INSN(),
  11568. /* H */
  11569. BPF_MOV64_IMM(BPF_REG_0, 0),
  11570. BPF_EXIT_INSN(),
  11571. },
  11572. .prog_type = BPF_PROG_TYPE_XDP,
  11573. .errstr = "call stack",
  11574. .result = REJECT,
  11575. },
  11576. {
  11577. "calls: spill into caller stack frame",
  11578. .insns = {
  11579. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  11580. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  11581. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  11582. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11583. BPF_EXIT_INSN(),
  11584. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
  11585. BPF_MOV64_IMM(BPF_REG_0, 0),
  11586. BPF_EXIT_INSN(),
  11587. },
  11588. .prog_type = BPF_PROG_TYPE_XDP,
  11589. .errstr = "cannot spill",
  11590. .result = REJECT,
  11591. },
  11592. {
  11593. "calls: write into caller stack frame",
  11594. .insns = {
  11595. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  11596. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  11597. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11598. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  11599. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  11600. BPF_EXIT_INSN(),
  11601. BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
  11602. BPF_MOV64_IMM(BPF_REG_0, 0),
  11603. BPF_EXIT_INSN(),
  11604. },
  11605. .prog_type = BPF_PROG_TYPE_XDP,
  11606. .result = ACCEPT,
  11607. .retval = 42,
  11608. },
  11609. {
  11610. "calls: write into callee stack frame",
  11611. .insns = {
  11612. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  11613. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
  11614. BPF_EXIT_INSN(),
  11615. BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
  11616. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
  11617. BPF_EXIT_INSN(),
  11618. },
  11619. .prog_type = BPF_PROG_TYPE_XDP,
  11620. .errstr = "cannot return stack pointer",
  11621. .result = REJECT,
  11622. },
  11623. {
  11624. "calls: two calls with stack write and void return",
  11625. .insns = {
  11626. /* main prog */
  11627. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  11628. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  11629. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  11630. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11631. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  11632. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  11633. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
  11634. BPF_EXIT_INSN(),
  11635. /* subprog 1 */
  11636. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11637. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  11638. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  11639. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  11640. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11641. BPF_EXIT_INSN(),
  11642. /* subprog 2 */
  11643. /* write into stack frame of main prog */
  11644. BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
  11645. BPF_EXIT_INSN(), /* void return */
  11646. },
  11647. .prog_type = BPF_PROG_TYPE_XDP,
  11648. .result = ACCEPT,
  11649. },
  11650. {
  11651. "calls: ambiguous return value",
  11652. .insns = {
  11653. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11654. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
  11655. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  11656. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  11657. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  11658. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  11659. BPF_EXIT_INSN(),
  11660. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  11661. BPF_MOV64_IMM(BPF_REG_0, 0),
  11662. BPF_EXIT_INSN(),
  11663. },
  11664. .errstr_unpriv = "allowed for root only",
  11665. .result_unpriv = REJECT,
  11666. .errstr = "R0 !read_ok",
  11667. .result = REJECT,
  11668. },
  11669. {
  11670. "calls: two calls that return map_value",
  11671. .insns = {
  11672. /* main prog */
  11673. /* pass fp-16, fp-8 into a function */
  11674. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  11675. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  11676. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11677. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  11678. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
  11679. /* fetch map_value_ptr from the stack of this function */
  11680. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  11681. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  11682. /* write into map value */
  11683. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  11684. /* fetch secound map_value_ptr from the stack */
  11685. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
  11686. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  11687. /* write into map value */
  11688. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  11689. BPF_MOV64_IMM(BPF_REG_0, 0),
  11690. BPF_EXIT_INSN(),
  11691. /* subprog 1 */
  11692. /* call 3rd function twice */
  11693. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11694. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  11695. /* first time with fp-8 */
  11696. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  11697. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  11698. /* second time with fp-16 */
  11699. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  11700. BPF_EXIT_INSN(),
  11701. /* subprog 2 */
  11702. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11703. /* lookup from map */
  11704. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  11705. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11706. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  11707. BPF_LD_MAP_FD(BPF_REG_1, 0),
  11708. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  11709. BPF_FUNC_map_lookup_elem),
  11710. /* write map_value_ptr into stack frame of main prog */
  11711. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  11712. BPF_MOV64_IMM(BPF_REG_0, 0),
  11713. BPF_EXIT_INSN(), /* return 0 */
  11714. },
  11715. .prog_type = BPF_PROG_TYPE_XDP,
  11716. .fixup_map_hash_8b = { 23 },
  11717. .result = ACCEPT,
  11718. },
  11719. {
  11720. "calls: two calls that return map_value with bool condition",
  11721. .insns = {
  11722. /* main prog */
  11723. /* pass fp-16, fp-8 into a function */
  11724. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  11725. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  11726. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11727. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  11728. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  11729. BPF_MOV64_IMM(BPF_REG_0, 0),
  11730. BPF_EXIT_INSN(),
  11731. /* subprog 1 */
  11732. /* call 3rd function twice */
  11733. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11734. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  11735. /* first time with fp-8 */
  11736. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
  11737. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
  11738. /* fetch map_value_ptr from the stack of this function */
  11739. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  11740. /* write into map value */
  11741. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  11742. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  11743. /* second time with fp-16 */
  11744. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  11745. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
  11746. /* fetch secound map_value_ptr from the stack */
  11747. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
  11748. /* write into map value */
  11749. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  11750. BPF_EXIT_INSN(),
  11751. /* subprog 2 */
  11752. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11753. /* lookup from map */
  11754. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  11755. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11756. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  11757. BPF_LD_MAP_FD(BPF_REG_1, 0),
  11758. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  11759. BPF_FUNC_map_lookup_elem),
  11760. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  11761. BPF_MOV64_IMM(BPF_REG_0, 0),
  11762. BPF_EXIT_INSN(), /* return 0 */
  11763. /* write map_value_ptr into stack frame of main prog */
  11764. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  11765. BPF_MOV64_IMM(BPF_REG_0, 1),
  11766. BPF_EXIT_INSN(), /* return 1 */
  11767. },
  11768. .prog_type = BPF_PROG_TYPE_XDP,
  11769. .fixup_map_hash_8b = { 23 },
  11770. .result = ACCEPT,
  11771. },
  11772. {
  11773. "calls: two calls that return map_value with incorrect bool check",
  11774. .insns = {
  11775. /* main prog */
  11776. /* pass fp-16, fp-8 into a function */
  11777. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  11778. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  11779. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11780. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  11781. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  11782. BPF_MOV64_IMM(BPF_REG_0, 0),
  11783. BPF_EXIT_INSN(),
  11784. /* subprog 1 */
  11785. /* call 3rd function twice */
  11786. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11787. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  11788. /* first time with fp-8 */
  11789. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
  11790. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
  11791. /* fetch map_value_ptr from the stack of this function */
  11792. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  11793. /* write into map value */
  11794. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  11795. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  11796. /* second time with fp-16 */
  11797. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  11798. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  11799. /* fetch secound map_value_ptr from the stack */
  11800. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
  11801. /* write into map value */
  11802. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  11803. BPF_EXIT_INSN(),
  11804. /* subprog 2 */
  11805. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11806. /* lookup from map */
  11807. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  11808. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11809. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  11810. BPF_LD_MAP_FD(BPF_REG_1, 0),
  11811. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  11812. BPF_FUNC_map_lookup_elem),
  11813. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  11814. BPF_MOV64_IMM(BPF_REG_0, 0),
  11815. BPF_EXIT_INSN(), /* return 0 */
  11816. /* write map_value_ptr into stack frame of main prog */
  11817. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  11818. BPF_MOV64_IMM(BPF_REG_0, 1),
  11819. BPF_EXIT_INSN(), /* return 1 */
  11820. },
  11821. .prog_type = BPF_PROG_TYPE_XDP,
  11822. .fixup_map_hash_8b = { 23 },
  11823. .result = REJECT,
  11824. .errstr = "invalid read from stack off -16+0 size 8",
  11825. },
  11826. {
  11827. "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
  11828. .insns = {
  11829. /* main prog */
  11830. /* pass fp-16, fp-8 into a function */
  11831. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  11832. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  11833. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11834. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  11835. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  11836. BPF_MOV64_IMM(BPF_REG_0, 0),
  11837. BPF_EXIT_INSN(),
  11838. /* subprog 1 */
  11839. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11840. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  11841. /* 1st lookup from map */
  11842. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  11843. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11844. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  11845. BPF_LD_MAP_FD(BPF_REG_1, 0),
  11846. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  11847. BPF_FUNC_map_lookup_elem),
  11848. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  11849. BPF_MOV64_IMM(BPF_REG_8, 0),
  11850. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  11851. /* write map_value_ptr into stack frame of main prog at fp-8 */
  11852. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  11853. BPF_MOV64_IMM(BPF_REG_8, 1),
  11854. /* 2nd lookup from map */
  11855. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
  11856. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  11857. BPF_LD_MAP_FD(BPF_REG_1, 0),
  11858. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
  11859. BPF_FUNC_map_lookup_elem),
  11860. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  11861. BPF_MOV64_IMM(BPF_REG_9, 0),
  11862. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  11863. /* write map_value_ptr into stack frame of main prog at fp-16 */
  11864. BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  11865. BPF_MOV64_IMM(BPF_REG_9, 1),
  11866. /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
  11867. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
  11868. BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
  11869. BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
  11870. BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
  11871. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
  11872. BPF_EXIT_INSN(),
  11873. /* subprog 2 */
  11874. /* if arg2 == 1 do *arg1 = 0 */
  11875. BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
  11876. /* fetch map_value_ptr from the stack of this function */
  11877. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
  11878. /* write into map value */
  11879. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  11880. /* if arg4 == 1 do *arg3 = 0 */
  11881. BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
  11882. /* fetch map_value_ptr from the stack of this function */
  11883. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
  11884. /* write into map value */
  11885. BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
  11886. BPF_EXIT_INSN(),
  11887. },
  11888. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  11889. .fixup_map_hash_8b = { 12, 22 },
  11890. .result = REJECT,
  11891. .errstr = "invalid access to map value, value_size=8 off=2 size=8",
  11892. },
  11893. {
  11894. "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
  11895. .insns = {
  11896. /* main prog */
  11897. /* pass fp-16, fp-8 into a function */
  11898. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  11899. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  11900. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11901. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  11902. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  11903. BPF_MOV64_IMM(BPF_REG_0, 0),
  11904. BPF_EXIT_INSN(),
  11905. /* subprog 1 */
  11906. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11907. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  11908. /* 1st lookup from map */
  11909. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  11910. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11911. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  11912. BPF_LD_MAP_FD(BPF_REG_1, 0),
  11913. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  11914. BPF_FUNC_map_lookup_elem),
  11915. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  11916. BPF_MOV64_IMM(BPF_REG_8, 0),
  11917. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  11918. /* write map_value_ptr into stack frame of main prog at fp-8 */
  11919. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  11920. BPF_MOV64_IMM(BPF_REG_8, 1),
  11921. /* 2nd lookup from map */
  11922. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
  11923. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  11924. BPF_LD_MAP_FD(BPF_REG_1, 0),
  11925. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
  11926. BPF_FUNC_map_lookup_elem),
  11927. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  11928. BPF_MOV64_IMM(BPF_REG_9, 0),
  11929. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  11930. /* write map_value_ptr into stack frame of main prog at fp-16 */
  11931. BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  11932. BPF_MOV64_IMM(BPF_REG_9, 1),
  11933. /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
  11934. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
  11935. BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
  11936. BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
  11937. BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
  11938. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
  11939. BPF_EXIT_INSN(),
  11940. /* subprog 2 */
  11941. /* if arg2 == 1 do *arg1 = 0 */
  11942. BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
  11943. /* fetch map_value_ptr from the stack of this function */
  11944. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
  11945. /* write into map value */
  11946. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  11947. /* if arg4 == 1 do *arg3 = 0 */
  11948. BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
  11949. /* fetch map_value_ptr from the stack of this function */
  11950. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
  11951. /* write into map value */
  11952. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  11953. BPF_EXIT_INSN(),
  11954. },
  11955. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  11956. .fixup_map_hash_8b = { 12, 22 },
  11957. .result = ACCEPT,
  11958. },
  11959. {
  11960. "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
  11961. .insns = {
  11962. /* main prog */
  11963. /* pass fp-16, fp-8 into a function */
  11964. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  11965. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  11966. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11967. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  11968. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
  11969. BPF_MOV64_IMM(BPF_REG_0, 0),
  11970. BPF_EXIT_INSN(),
  11971. /* subprog 1 */
  11972. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  11973. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  11974. /* 1st lookup from map */
  11975. BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
  11976. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11977. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
  11978. BPF_LD_MAP_FD(BPF_REG_1, 0),
  11979. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  11980. BPF_FUNC_map_lookup_elem),
  11981. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  11982. BPF_MOV64_IMM(BPF_REG_8, 0),
  11983. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  11984. /* write map_value_ptr into stack frame of main prog at fp-8 */
  11985. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  11986. BPF_MOV64_IMM(BPF_REG_8, 1),
  11987. /* 2nd lookup from map */
  11988. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  11989. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
  11990. BPF_LD_MAP_FD(BPF_REG_1, 0),
  11991. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  11992. BPF_FUNC_map_lookup_elem),
  11993. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  11994. BPF_MOV64_IMM(BPF_REG_9, 0), // 26
  11995. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  11996. /* write map_value_ptr into stack frame of main prog at fp-16 */
  11997. BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  11998. BPF_MOV64_IMM(BPF_REG_9, 1),
  11999. /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
  12000. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
  12001. BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
  12002. BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
  12003. BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
  12004. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
  12005. BPF_JMP_IMM(BPF_JA, 0, 0, -30),
  12006. /* subprog 2 */
  12007. /* if arg2 == 1 do *arg1 = 0 */
  12008. BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
  12009. /* fetch map_value_ptr from the stack of this function */
  12010. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
  12011. /* write into map value */
  12012. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  12013. /* if arg4 == 1 do *arg3 = 0 */
  12014. BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
  12015. /* fetch map_value_ptr from the stack of this function */
  12016. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
  12017. /* write into map value */
  12018. BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
  12019. BPF_JMP_IMM(BPF_JA, 0, 0, -8),
  12020. },
  12021. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12022. .fixup_map_hash_8b = { 12, 22 },
  12023. .result = REJECT,
  12024. .errstr = "invalid access to map value, value_size=8 off=2 size=8",
  12025. },
  12026. {
  12027. "calls: two calls that receive map_value_ptr_or_null via arg. test1",
  12028. .insns = {
  12029. /* main prog */
  12030. /* pass fp-16, fp-8 into a function */
  12031. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  12032. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  12033. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  12034. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  12035. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  12036. BPF_MOV64_IMM(BPF_REG_0, 0),
  12037. BPF_EXIT_INSN(),
  12038. /* subprog 1 */
  12039. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  12040. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  12041. /* 1st lookup from map */
  12042. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  12043. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  12044. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  12045. BPF_LD_MAP_FD(BPF_REG_1, 0),
  12046. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  12047. BPF_FUNC_map_lookup_elem),
  12048. /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
  12049. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  12050. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  12051. BPF_MOV64_IMM(BPF_REG_8, 0),
  12052. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  12053. BPF_MOV64_IMM(BPF_REG_8, 1),
  12054. /* 2nd lookup from map */
  12055. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  12056. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  12057. BPF_LD_MAP_FD(BPF_REG_1, 0),
  12058. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  12059. BPF_FUNC_map_lookup_elem),
  12060. /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
  12061. BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  12062. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  12063. BPF_MOV64_IMM(BPF_REG_9, 0),
  12064. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  12065. BPF_MOV64_IMM(BPF_REG_9, 1),
  12066. /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
  12067. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  12068. BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
  12069. BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
  12070. BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
  12071. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  12072. BPF_EXIT_INSN(),
  12073. /* subprog 2 */
  12074. /* if arg2 == 1 do *arg1 = 0 */
  12075. BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
  12076. /* fetch map_value_ptr from the stack of this function */
  12077. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
  12078. /* write into map value */
  12079. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  12080. /* if arg4 == 1 do *arg3 = 0 */
  12081. BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
  12082. /* fetch map_value_ptr from the stack of this function */
  12083. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
  12084. /* write into map value */
  12085. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  12086. BPF_EXIT_INSN(),
  12087. },
  12088. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12089. .fixup_map_hash_8b = { 12, 22 },
  12090. .result = ACCEPT,
  12091. },
  12092. {
  12093. "calls: two calls that receive map_value_ptr_or_null via arg. test2",
  12094. .insns = {
  12095. /* main prog */
  12096. /* pass fp-16, fp-8 into a function */
  12097. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  12098. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  12099. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  12100. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  12101. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  12102. BPF_MOV64_IMM(BPF_REG_0, 0),
  12103. BPF_EXIT_INSN(),
  12104. /* subprog 1 */
  12105. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  12106. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  12107. /* 1st lookup from map */
  12108. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  12109. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  12110. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  12111. BPF_LD_MAP_FD(BPF_REG_1, 0),
  12112. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  12113. BPF_FUNC_map_lookup_elem),
  12114. /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
  12115. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  12116. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  12117. BPF_MOV64_IMM(BPF_REG_8, 0),
  12118. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  12119. BPF_MOV64_IMM(BPF_REG_8, 1),
  12120. /* 2nd lookup from map */
  12121. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  12122. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  12123. BPF_LD_MAP_FD(BPF_REG_1, 0),
  12124. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  12125. BPF_FUNC_map_lookup_elem),
  12126. /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
  12127. BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  12128. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  12129. BPF_MOV64_IMM(BPF_REG_9, 0),
  12130. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  12131. BPF_MOV64_IMM(BPF_REG_9, 1),
  12132. /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
  12133. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  12134. BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
  12135. BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
  12136. BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
  12137. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  12138. BPF_EXIT_INSN(),
  12139. /* subprog 2 */
  12140. /* if arg2 == 1 do *arg1 = 0 */
  12141. BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
  12142. /* fetch map_value_ptr from the stack of this function */
  12143. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
  12144. /* write into map value */
  12145. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  12146. /* if arg4 == 0 do *arg3 = 0 */
  12147. BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
  12148. /* fetch map_value_ptr from the stack of this function */
  12149. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
  12150. /* write into map value */
  12151. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  12152. BPF_EXIT_INSN(),
  12153. },
  12154. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12155. .fixup_map_hash_8b = { 12, 22 },
  12156. .result = REJECT,
  12157. .errstr = "R0 invalid mem access 'inv'",
  12158. },
  12159. {
  12160. "calls: pkt_ptr spill into caller stack",
  12161. .insns = {
  12162. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  12163. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  12164. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
  12165. BPF_EXIT_INSN(),
  12166. /* subprog 1 */
  12167. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  12168. offsetof(struct __sk_buff, data)),
  12169. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  12170. offsetof(struct __sk_buff, data_end)),
  12171. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  12172. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  12173. /* spill unchecked pkt_ptr into stack of caller */
  12174. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  12175. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
  12176. /* now the pkt range is verified, read pkt_ptr from stack */
  12177. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
  12178. /* write 4 bytes into packet */
  12179. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  12180. BPF_EXIT_INSN(),
  12181. },
  12182. .result = ACCEPT,
  12183. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12184. .retval = POINTER_VALUE,
  12185. },
  12186. {
  12187. "calls: pkt_ptr spill into caller stack 2",
  12188. .insns = {
  12189. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  12190. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  12191. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  12192. /* Marking is still kept, but not in all cases safe. */
  12193. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  12194. BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
  12195. BPF_EXIT_INSN(),
  12196. /* subprog 1 */
  12197. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  12198. offsetof(struct __sk_buff, data)),
  12199. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  12200. offsetof(struct __sk_buff, data_end)),
  12201. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  12202. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  12203. /* spill unchecked pkt_ptr into stack of caller */
  12204. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  12205. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
  12206. /* now the pkt range is verified, read pkt_ptr from stack */
  12207. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
  12208. /* write 4 bytes into packet */
  12209. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  12210. BPF_EXIT_INSN(),
  12211. },
  12212. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12213. .errstr = "invalid access to packet",
  12214. .result = REJECT,
  12215. },
  12216. {
  12217. "calls: pkt_ptr spill into caller stack 3",
  12218. .insns = {
  12219. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  12220. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  12221. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  12222. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  12223. /* Marking is still kept and safe here. */
  12224. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  12225. BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
  12226. BPF_EXIT_INSN(),
  12227. /* subprog 1 */
  12228. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  12229. offsetof(struct __sk_buff, data)),
  12230. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  12231. offsetof(struct __sk_buff, data_end)),
  12232. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  12233. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  12234. /* spill unchecked pkt_ptr into stack of caller */
  12235. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  12236. BPF_MOV64_IMM(BPF_REG_5, 0),
  12237. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
  12238. BPF_MOV64_IMM(BPF_REG_5, 1),
  12239. /* now the pkt range is verified, read pkt_ptr from stack */
  12240. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
  12241. /* write 4 bytes into packet */
  12242. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  12243. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  12244. BPF_EXIT_INSN(),
  12245. },
  12246. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12247. .result = ACCEPT,
  12248. .retval = 1,
  12249. },
  12250. {
  12251. "calls: pkt_ptr spill into caller stack 4",
  12252. .insns = {
  12253. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  12254. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  12255. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  12256. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  12257. /* Check marking propagated. */
  12258. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  12259. BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
  12260. BPF_EXIT_INSN(),
  12261. /* subprog 1 */
  12262. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  12263. offsetof(struct __sk_buff, data)),
  12264. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  12265. offsetof(struct __sk_buff, data_end)),
  12266. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  12267. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  12268. /* spill unchecked pkt_ptr into stack of caller */
  12269. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  12270. BPF_MOV64_IMM(BPF_REG_5, 0),
  12271. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
  12272. BPF_MOV64_IMM(BPF_REG_5, 1),
  12273. /* don't read back pkt_ptr from stack here */
  12274. /* write 4 bytes into packet */
  12275. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  12276. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  12277. BPF_EXIT_INSN(),
  12278. },
  12279. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12280. .result = ACCEPT,
  12281. .retval = 1,
  12282. },
  12283. {
  12284. "calls: pkt_ptr spill into caller stack 5",
  12285. .insns = {
  12286. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  12287. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  12288. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
  12289. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  12290. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  12291. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
  12292. BPF_EXIT_INSN(),
  12293. /* subprog 1 */
  12294. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  12295. offsetof(struct __sk_buff, data)),
  12296. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  12297. offsetof(struct __sk_buff, data_end)),
  12298. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  12299. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  12300. BPF_MOV64_IMM(BPF_REG_5, 0),
  12301. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
  12302. /* spill checked pkt_ptr into stack of caller */
  12303. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  12304. BPF_MOV64_IMM(BPF_REG_5, 1),
  12305. /* don't read back pkt_ptr from stack here */
  12306. /* write 4 bytes into packet */
  12307. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  12308. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  12309. BPF_EXIT_INSN(),
  12310. },
  12311. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12312. .errstr = "same insn cannot be used with different",
  12313. .result = REJECT,
  12314. },
  12315. {
  12316. "calls: pkt_ptr spill into caller stack 6",
  12317. .insns = {
  12318. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  12319. offsetof(struct __sk_buff, data_end)),
  12320. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  12321. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  12322. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  12323. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  12324. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  12325. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
  12326. BPF_EXIT_INSN(),
  12327. /* subprog 1 */
  12328. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  12329. offsetof(struct __sk_buff, data)),
  12330. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  12331. offsetof(struct __sk_buff, data_end)),
  12332. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  12333. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  12334. BPF_MOV64_IMM(BPF_REG_5, 0),
  12335. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
  12336. /* spill checked pkt_ptr into stack of caller */
  12337. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  12338. BPF_MOV64_IMM(BPF_REG_5, 1),
  12339. /* don't read back pkt_ptr from stack here */
  12340. /* write 4 bytes into packet */
  12341. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  12342. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  12343. BPF_EXIT_INSN(),
  12344. },
  12345. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12346. .errstr = "R4 invalid mem access",
  12347. .result = REJECT,
  12348. },
  12349. {
  12350. "calls: pkt_ptr spill into caller stack 7",
  12351. .insns = {
  12352. BPF_MOV64_IMM(BPF_REG_2, 0),
  12353. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  12354. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  12355. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  12356. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  12357. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  12358. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
  12359. BPF_EXIT_INSN(),
  12360. /* subprog 1 */
  12361. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  12362. offsetof(struct __sk_buff, data)),
  12363. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  12364. offsetof(struct __sk_buff, data_end)),
  12365. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  12366. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  12367. BPF_MOV64_IMM(BPF_REG_5, 0),
  12368. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
  12369. /* spill checked pkt_ptr into stack of caller */
  12370. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  12371. BPF_MOV64_IMM(BPF_REG_5, 1),
  12372. /* don't read back pkt_ptr from stack here */
  12373. /* write 4 bytes into packet */
  12374. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  12375. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  12376. BPF_EXIT_INSN(),
  12377. },
  12378. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12379. .errstr = "R4 invalid mem access",
  12380. .result = REJECT,
  12381. },
  12382. {
  12383. "calls: pkt_ptr spill into caller stack 8",
  12384. .insns = {
  12385. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  12386. offsetof(struct __sk_buff, data)),
  12387. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  12388. offsetof(struct __sk_buff, data_end)),
  12389. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  12390. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  12391. BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
  12392. BPF_EXIT_INSN(),
  12393. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  12394. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  12395. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  12396. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  12397. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  12398. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
  12399. BPF_EXIT_INSN(),
  12400. /* subprog 1 */
  12401. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  12402. offsetof(struct __sk_buff, data)),
  12403. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  12404. offsetof(struct __sk_buff, data_end)),
  12405. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  12406. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  12407. BPF_MOV64_IMM(BPF_REG_5, 0),
  12408. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
  12409. /* spill checked pkt_ptr into stack of caller */
  12410. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  12411. BPF_MOV64_IMM(BPF_REG_5, 1),
  12412. /* don't read back pkt_ptr from stack here */
  12413. /* write 4 bytes into packet */
  12414. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  12415. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  12416. BPF_EXIT_INSN(),
  12417. },
  12418. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12419. .result = ACCEPT,
  12420. },
  12421. {
  12422. "calls: pkt_ptr spill into caller stack 9",
  12423. .insns = {
  12424. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  12425. offsetof(struct __sk_buff, data)),
  12426. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  12427. offsetof(struct __sk_buff, data_end)),
  12428. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  12429. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  12430. BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
  12431. BPF_EXIT_INSN(),
  12432. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  12433. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  12434. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  12435. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  12436. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  12437. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
  12438. BPF_EXIT_INSN(),
  12439. /* subprog 1 */
  12440. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  12441. offsetof(struct __sk_buff, data)),
  12442. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  12443. offsetof(struct __sk_buff, data_end)),
  12444. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  12445. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  12446. BPF_MOV64_IMM(BPF_REG_5, 0),
  12447. /* spill unchecked pkt_ptr into stack of caller */
  12448. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  12449. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
  12450. BPF_MOV64_IMM(BPF_REG_5, 1),
  12451. /* don't read back pkt_ptr from stack here */
  12452. /* write 4 bytes into packet */
  12453. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  12454. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  12455. BPF_EXIT_INSN(),
  12456. },
  12457. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12458. .errstr = "invalid access to packet",
  12459. .result = REJECT,
  12460. },
  12461. {
  12462. "calls: caller stack init to zero or map_value_or_null",
  12463. .insns = {
  12464. BPF_MOV64_IMM(BPF_REG_0, 0),
  12465. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  12466. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  12467. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  12468. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  12469. /* fetch map_value_or_null or const_zero from stack */
  12470. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  12471. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  12472. /* store into map_value */
  12473. BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
  12474. BPF_EXIT_INSN(),
  12475. /* subprog 1 */
  12476. /* if (ctx == 0) return; */
  12477. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
  12478. /* else bpf_map_lookup() and *(fp - 8) = r0 */
  12479. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  12480. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  12481. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  12482. BPF_LD_MAP_FD(BPF_REG_1, 0),
  12483. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  12484. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  12485. BPF_FUNC_map_lookup_elem),
  12486. /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
  12487. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  12488. BPF_EXIT_INSN(),
  12489. },
  12490. .fixup_map_hash_8b = { 13 },
  12491. .result = ACCEPT,
  12492. .prog_type = BPF_PROG_TYPE_XDP,
  12493. },
  12494. {
  12495. "calls: stack init to zero and pruning",
  12496. .insns = {
  12497. /* first make allocated_stack 16 byte */
  12498. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
  12499. /* now fork the execution such that the false branch
  12500. * of JGT insn will be verified second and it skisp zero
  12501. * init of fp-8 stack slot. If stack liveness marking
  12502. * is missing live_read marks from call map_lookup
  12503. * processing then pruning will incorrectly assume
  12504. * that fp-8 stack slot was unused in the fall-through
  12505. * branch and will accept the program incorrectly
  12506. */
  12507. BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
  12508. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  12509. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  12510. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  12511. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  12512. BPF_LD_MAP_FD(BPF_REG_1, 0),
  12513. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  12514. BPF_FUNC_map_lookup_elem),
  12515. BPF_EXIT_INSN(),
  12516. },
  12517. .fixup_map_hash_48b = { 6 },
  12518. .errstr = "invalid indirect read from stack off -8+0 size 8",
  12519. .result = REJECT,
  12520. .prog_type = BPF_PROG_TYPE_XDP,
  12521. },
  12522. {
  12523. "calls: two calls returning different map pointers for lookup (hash, array)",
  12524. .insns = {
  12525. /* main prog */
  12526. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
  12527. BPF_CALL_REL(11),
  12528. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  12529. BPF_CALL_REL(12),
  12530. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  12531. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  12532. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  12533. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  12534. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  12535. BPF_FUNC_map_lookup_elem),
  12536. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  12537. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  12538. offsetof(struct test_val, foo)),
  12539. BPF_MOV64_IMM(BPF_REG_0, 1),
  12540. BPF_EXIT_INSN(),
  12541. /* subprog 1 */
  12542. BPF_LD_MAP_FD(BPF_REG_0, 0),
  12543. BPF_EXIT_INSN(),
  12544. /* subprog 2 */
  12545. BPF_LD_MAP_FD(BPF_REG_0, 0),
  12546. BPF_EXIT_INSN(),
  12547. },
  12548. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12549. .fixup_map_hash_48b = { 13 },
  12550. .fixup_map_array_48b = { 16 },
  12551. .result = ACCEPT,
  12552. .retval = 1,
  12553. },
  12554. {
  12555. "calls: two calls returning different map pointers for lookup (hash, map in map)",
  12556. .insns = {
  12557. /* main prog */
  12558. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
  12559. BPF_CALL_REL(11),
  12560. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  12561. BPF_CALL_REL(12),
  12562. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  12563. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  12564. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  12565. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  12566. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  12567. BPF_FUNC_map_lookup_elem),
  12568. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  12569. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  12570. offsetof(struct test_val, foo)),
  12571. BPF_MOV64_IMM(BPF_REG_0, 1),
  12572. BPF_EXIT_INSN(),
  12573. /* subprog 1 */
  12574. BPF_LD_MAP_FD(BPF_REG_0, 0),
  12575. BPF_EXIT_INSN(),
  12576. /* subprog 2 */
  12577. BPF_LD_MAP_FD(BPF_REG_0, 0),
  12578. BPF_EXIT_INSN(),
  12579. },
  12580. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12581. .fixup_map_in_map = { 16 },
  12582. .fixup_map_array_48b = { 13 },
  12583. .result = REJECT,
  12584. .errstr = "R0 invalid mem access 'map_ptr'",
  12585. },
  12586. {
  12587. "cond: two branches returning different map pointers for lookup (tail, tail)",
  12588. .insns = {
  12589. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  12590. offsetof(struct __sk_buff, mark)),
  12591. BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
  12592. BPF_LD_MAP_FD(BPF_REG_2, 0),
  12593. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  12594. BPF_LD_MAP_FD(BPF_REG_2, 0),
  12595. BPF_MOV64_IMM(BPF_REG_3, 7),
  12596. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  12597. BPF_FUNC_tail_call),
  12598. BPF_MOV64_IMM(BPF_REG_0, 1),
  12599. BPF_EXIT_INSN(),
  12600. },
  12601. .fixup_prog1 = { 5 },
  12602. .fixup_prog2 = { 2 },
  12603. .result_unpriv = REJECT,
  12604. .errstr_unpriv = "tail_call abusing map_ptr",
  12605. .result = ACCEPT,
  12606. .retval = 42,
  12607. },
  12608. {
  12609. "cond: two branches returning same map pointers for lookup (tail, tail)",
  12610. .insns = {
  12611. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  12612. offsetof(struct __sk_buff, mark)),
  12613. BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
  12614. BPF_LD_MAP_FD(BPF_REG_2, 0),
  12615. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  12616. BPF_LD_MAP_FD(BPF_REG_2, 0),
  12617. BPF_MOV64_IMM(BPF_REG_3, 7),
  12618. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  12619. BPF_FUNC_tail_call),
  12620. BPF_MOV64_IMM(BPF_REG_0, 1),
  12621. BPF_EXIT_INSN(),
  12622. },
  12623. .fixup_prog2 = { 2, 5 },
  12624. .result_unpriv = ACCEPT,
  12625. .result = ACCEPT,
  12626. .retval = 42,
  12627. },
  12628. {
  12629. "search pruning: all branches should be verified (nop operation)",
  12630. .insns = {
  12631. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  12632. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  12633. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  12634. BPF_LD_MAP_FD(BPF_REG_1, 0),
  12635. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  12636. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
  12637. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
  12638. BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
  12639. BPF_MOV64_IMM(BPF_REG_4, 0),
  12640. BPF_JMP_A(1),
  12641. BPF_MOV64_IMM(BPF_REG_4, 1),
  12642. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
  12643. BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
  12644. BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
  12645. BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
  12646. BPF_MOV64_IMM(BPF_REG_6, 0),
  12647. BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
  12648. BPF_EXIT_INSN(),
  12649. },
  12650. .fixup_map_hash_8b = { 3 },
  12651. .errstr = "R6 invalid mem access 'inv'",
  12652. .result = REJECT,
  12653. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  12654. },
  12655. {
  12656. "search pruning: all branches should be verified (invalid stack access)",
  12657. .insns = {
  12658. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  12659. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  12660. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  12661. BPF_LD_MAP_FD(BPF_REG_1, 0),
  12662. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  12663. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
  12664. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
  12665. BPF_MOV64_IMM(BPF_REG_4, 0),
  12666. BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
  12667. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
  12668. BPF_JMP_A(1),
  12669. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
  12670. BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
  12671. BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
  12672. BPF_EXIT_INSN(),
  12673. },
  12674. .fixup_map_hash_8b = { 3 },
  12675. .errstr = "invalid read from stack off -16+0 size 8",
  12676. .result = REJECT,
  12677. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  12678. },
  12679. {
  12680. "jit: lsh, rsh, arsh by 1",
  12681. .insns = {
  12682. BPF_MOV64_IMM(BPF_REG_0, 1),
  12683. BPF_MOV64_IMM(BPF_REG_1, 0xff),
  12684. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
  12685. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
  12686. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
  12687. BPF_EXIT_INSN(),
  12688. BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
  12689. BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
  12690. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
  12691. BPF_EXIT_INSN(),
  12692. BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
  12693. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
  12694. BPF_EXIT_INSN(),
  12695. BPF_MOV64_IMM(BPF_REG_0, 2),
  12696. BPF_EXIT_INSN(),
  12697. },
  12698. .result = ACCEPT,
  12699. .retval = 2,
  12700. },
  12701. {
  12702. "jit: mov32 for ldimm64, 1",
  12703. .insns = {
  12704. BPF_MOV64_IMM(BPF_REG_0, 2),
  12705. BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
  12706. BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
  12707. BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
  12708. BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
  12709. BPF_MOV64_IMM(BPF_REG_0, 1),
  12710. BPF_EXIT_INSN(),
  12711. },
  12712. .result = ACCEPT,
  12713. .retval = 2,
  12714. },
  12715. {
  12716. "jit: mov32 for ldimm64, 2",
  12717. .insns = {
  12718. BPF_MOV64_IMM(BPF_REG_0, 1),
  12719. BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
  12720. BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
  12721. BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
  12722. BPF_MOV64_IMM(BPF_REG_0, 2),
  12723. BPF_EXIT_INSN(),
  12724. },
  12725. .result = ACCEPT,
  12726. .retval = 2,
  12727. },
  12728. {
  12729. "jit: various mul tests",
  12730. .insns = {
  12731. BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
  12732. BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
  12733. BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
  12734. BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
  12735. BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
  12736. BPF_MOV64_IMM(BPF_REG_0, 1),
  12737. BPF_EXIT_INSN(),
  12738. BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
  12739. BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
  12740. BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
  12741. BPF_MOV64_IMM(BPF_REG_0, 1),
  12742. BPF_EXIT_INSN(),
  12743. BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
  12744. BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
  12745. BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
  12746. BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
  12747. BPF_MOV64_IMM(BPF_REG_0, 1),
  12748. BPF_EXIT_INSN(),
  12749. BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
  12750. BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
  12751. BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
  12752. BPF_MOV64_IMM(BPF_REG_0, 1),
  12753. BPF_EXIT_INSN(),
  12754. BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
  12755. BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
  12756. BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
  12757. BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
  12758. BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
  12759. BPF_MOV64_IMM(BPF_REG_0, 1),
  12760. BPF_EXIT_INSN(),
  12761. BPF_MOV64_IMM(BPF_REG_0, 2),
  12762. BPF_EXIT_INSN(),
  12763. },
  12764. .result = ACCEPT,
  12765. .retval = 2,
  12766. },
  12767. {
  12768. "xadd/w check unaligned stack",
  12769. .insns = {
  12770. BPF_MOV64_IMM(BPF_REG_0, 1),
  12771. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  12772. BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
  12773. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  12774. BPF_EXIT_INSN(),
  12775. },
  12776. .result = REJECT,
  12777. .errstr = "misaligned stack access off",
  12778. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12779. },
  12780. {
  12781. "xadd/w check unaligned map",
  12782. .insns = {
  12783. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  12784. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  12785. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  12786. BPF_LD_MAP_FD(BPF_REG_1, 0),
  12787. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  12788. BPF_FUNC_map_lookup_elem),
  12789. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  12790. BPF_EXIT_INSN(),
  12791. BPF_MOV64_IMM(BPF_REG_1, 1),
  12792. BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
  12793. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
  12794. BPF_EXIT_INSN(),
  12795. },
  12796. .fixup_map_hash_8b = { 3 },
  12797. .result = REJECT,
  12798. .errstr = "misaligned value access off",
  12799. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12800. },
  12801. {
  12802. "xadd/w check unaligned pkt",
  12803. .insns = {
  12804. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  12805. offsetof(struct xdp_md, data)),
  12806. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  12807. offsetof(struct xdp_md, data_end)),
  12808. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  12809. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  12810. BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
  12811. BPF_MOV64_IMM(BPF_REG_0, 99),
  12812. BPF_JMP_IMM(BPF_JA, 0, 0, 6),
  12813. BPF_MOV64_IMM(BPF_REG_0, 1),
  12814. BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
  12815. BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
  12816. BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
  12817. BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
  12818. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
  12819. BPF_EXIT_INSN(),
  12820. },
  12821. .result = REJECT,
  12822. .errstr = "BPF_XADD stores into R2 pkt is not allowed",
  12823. .prog_type = BPF_PROG_TYPE_XDP,
  12824. },
  12825. {
  12826. "xadd/w check whether src/dst got mangled, 1",
  12827. .insns = {
  12828. BPF_MOV64_IMM(BPF_REG_0, 1),
  12829. BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
  12830. BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
  12831. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  12832. BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  12833. BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  12834. BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
  12835. BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
  12836. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  12837. BPF_EXIT_INSN(),
  12838. BPF_MOV64_IMM(BPF_REG_0, 42),
  12839. BPF_EXIT_INSN(),
  12840. },
  12841. .result = ACCEPT,
  12842. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12843. .retval = 3,
  12844. },
  12845. {
  12846. "xadd/w check whether src/dst got mangled, 2",
  12847. .insns = {
  12848. BPF_MOV64_IMM(BPF_REG_0, 1),
  12849. BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
  12850. BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
  12851. BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
  12852. BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
  12853. BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
  12854. BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
  12855. BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
  12856. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
  12857. BPF_EXIT_INSN(),
  12858. BPF_MOV64_IMM(BPF_REG_0, 42),
  12859. BPF_EXIT_INSN(),
  12860. },
  12861. .result = ACCEPT,
  12862. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12863. .retval = 3,
  12864. },
  12865. {
  12866. "bpf_get_stack return R0 within range",
  12867. .insns = {
  12868. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  12869. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  12870. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  12871. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  12872. BPF_LD_MAP_FD(BPF_REG_1, 0),
  12873. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  12874. BPF_FUNC_map_lookup_elem),
  12875. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
  12876. BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
  12877. BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
  12878. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  12879. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  12880. BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
  12881. BPF_MOV64_IMM(BPF_REG_4, 256),
  12882. BPF_EMIT_CALL(BPF_FUNC_get_stack),
  12883. BPF_MOV64_IMM(BPF_REG_1, 0),
  12884. BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
  12885. BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
  12886. BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
  12887. BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
  12888. BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
  12889. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  12890. BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
  12891. BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
  12892. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
  12893. BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
  12894. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  12895. BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
  12896. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  12897. BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
  12898. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
  12899. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
  12900. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  12901. BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
  12902. BPF_MOV64_IMM(BPF_REG_4, 0),
  12903. BPF_EMIT_CALL(BPF_FUNC_get_stack),
  12904. BPF_EXIT_INSN(),
  12905. },
  12906. .fixup_map_hash_48b = { 4 },
  12907. .result = ACCEPT,
  12908. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  12909. },
  12910. {
  12911. "ld_abs: invalid op 1",
  12912. .insns = {
  12913. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  12914. BPF_LD_ABS(BPF_DW, 0),
  12915. BPF_EXIT_INSN(),
  12916. },
  12917. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12918. .result = REJECT,
  12919. .errstr = "unknown opcode",
  12920. },
  12921. {
  12922. "ld_abs: invalid op 2",
  12923. .insns = {
  12924. BPF_MOV32_IMM(BPF_REG_0, 256),
  12925. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  12926. BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
  12927. BPF_EXIT_INSN(),
  12928. },
  12929. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12930. .result = REJECT,
  12931. .errstr = "unknown opcode",
  12932. },
  12933. {
  12934. "ld_abs: nmap reduced",
  12935. .insns = {
  12936. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  12937. BPF_LD_ABS(BPF_H, 12),
  12938. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
  12939. BPF_LD_ABS(BPF_H, 12),
  12940. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
  12941. BPF_MOV32_IMM(BPF_REG_0, 18),
  12942. BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
  12943. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
  12944. BPF_LD_IND(BPF_W, BPF_REG_7, 14),
  12945. BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
  12946. BPF_MOV32_IMM(BPF_REG_0, 280971478),
  12947. BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
  12948. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
  12949. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
  12950. BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
  12951. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
  12952. BPF_LD_ABS(BPF_H, 12),
  12953. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
  12954. BPF_MOV32_IMM(BPF_REG_0, 22),
  12955. BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
  12956. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
  12957. BPF_LD_IND(BPF_H, BPF_REG_7, 14),
  12958. BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
  12959. BPF_MOV32_IMM(BPF_REG_0, 17366),
  12960. BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
  12961. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
  12962. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
  12963. BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
  12964. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
  12965. BPF_MOV32_IMM(BPF_REG_0, 256),
  12966. BPF_EXIT_INSN(),
  12967. BPF_MOV32_IMM(BPF_REG_0, 0),
  12968. BPF_EXIT_INSN(),
  12969. },
  12970. .data = {
  12971. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
  12972. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  12973. 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
  12974. },
  12975. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12976. .result = ACCEPT,
  12977. .retval = 256,
  12978. },
  12979. {
  12980. "ld_abs: div + abs, test 1",
  12981. .insns = {
  12982. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
  12983. BPF_LD_ABS(BPF_B, 3),
  12984. BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
  12985. BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
  12986. BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
  12987. BPF_LD_ABS(BPF_B, 4),
  12988. BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
  12989. BPF_LD_IND(BPF_B, BPF_REG_8, -70),
  12990. BPF_EXIT_INSN(),
  12991. },
  12992. .data = {
  12993. 10, 20, 30, 40, 50,
  12994. },
  12995. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  12996. .result = ACCEPT,
  12997. .retval = 10,
  12998. },
  12999. {
  13000. "ld_abs: div + abs, test 2",
  13001. .insns = {
  13002. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
  13003. BPF_LD_ABS(BPF_B, 3),
  13004. BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
  13005. BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
  13006. BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
  13007. BPF_LD_ABS(BPF_B, 128),
  13008. BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
  13009. BPF_LD_IND(BPF_B, BPF_REG_8, -70),
  13010. BPF_EXIT_INSN(),
  13011. },
  13012. .data = {
  13013. 10, 20, 30, 40, 50,
  13014. },
  13015. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13016. .result = ACCEPT,
  13017. .retval = 0,
  13018. },
  13019. {
  13020. "ld_abs: div + abs, test 3",
  13021. .insns = {
  13022. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
  13023. BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
  13024. BPF_LD_ABS(BPF_B, 3),
  13025. BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
  13026. BPF_EXIT_INSN(),
  13027. },
  13028. .data = {
  13029. 10, 20, 30, 40, 50,
  13030. },
  13031. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13032. .result = ACCEPT,
  13033. .retval = 0,
  13034. },
  13035. {
  13036. "ld_abs: div + abs, test 4",
  13037. .insns = {
  13038. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
  13039. BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
  13040. BPF_LD_ABS(BPF_B, 256),
  13041. BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
  13042. BPF_EXIT_INSN(),
  13043. },
  13044. .data = {
  13045. 10, 20, 30, 40, 50,
  13046. },
  13047. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13048. .result = ACCEPT,
  13049. .retval = 0,
  13050. },
  13051. {
  13052. "ld_abs: vlan + abs, test 1",
  13053. .insns = { },
  13054. .data = {
  13055. 0x34,
  13056. },
  13057. .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
  13058. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13059. .result = ACCEPT,
  13060. .retval = 0xbef,
  13061. },
  13062. {
  13063. "ld_abs: vlan + abs, test 2",
  13064. .insns = {
  13065. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  13066. BPF_LD_ABS(BPF_B, 0),
  13067. BPF_LD_ABS(BPF_H, 0),
  13068. BPF_LD_ABS(BPF_W, 0),
  13069. BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
  13070. BPF_MOV64_IMM(BPF_REG_6, 0),
  13071. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  13072. BPF_MOV64_IMM(BPF_REG_2, 1),
  13073. BPF_MOV64_IMM(BPF_REG_3, 2),
  13074. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  13075. BPF_FUNC_skb_vlan_push),
  13076. BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
  13077. BPF_LD_ABS(BPF_B, 0),
  13078. BPF_LD_ABS(BPF_H, 0),
  13079. BPF_LD_ABS(BPF_W, 0),
  13080. BPF_MOV64_IMM(BPF_REG_0, 42),
  13081. BPF_EXIT_INSN(),
  13082. },
  13083. .data = {
  13084. 0x34,
  13085. },
  13086. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13087. .result = ACCEPT,
  13088. .retval = 42,
  13089. },
  13090. {
  13091. "ld_abs: jump around ld_abs",
  13092. .insns = { },
  13093. .data = {
  13094. 10, 11,
  13095. },
  13096. .fill_helper = bpf_fill_jump_around_ld_abs,
  13097. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13098. .result = ACCEPT,
  13099. .retval = 10,
  13100. },
  13101. {
  13102. "ld_dw: xor semi-random 64 bit imms, test 1",
  13103. .insns = { },
  13104. .data = { },
  13105. .fill_helper = bpf_fill_rand_ld_dw,
  13106. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13107. .result = ACCEPT,
  13108. .retval = 4090,
  13109. },
  13110. {
  13111. "ld_dw: xor semi-random 64 bit imms, test 2",
  13112. .insns = { },
  13113. .data = { },
  13114. .fill_helper = bpf_fill_rand_ld_dw,
  13115. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13116. .result = ACCEPT,
  13117. .retval = 2047,
  13118. },
  13119. {
  13120. "ld_dw: xor semi-random 64 bit imms, test 3",
  13121. .insns = { },
  13122. .data = { },
  13123. .fill_helper = bpf_fill_rand_ld_dw,
  13124. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13125. .result = ACCEPT,
  13126. .retval = 511,
  13127. },
  13128. {
  13129. "ld_dw: xor semi-random 64 bit imms, test 4",
  13130. .insns = { },
  13131. .data = { },
  13132. .fill_helper = bpf_fill_rand_ld_dw,
  13133. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13134. .result = ACCEPT,
  13135. .retval = 5,
  13136. },
  13137. {
  13138. "pass unmodified ctx pointer to helper",
  13139. .insns = {
  13140. BPF_MOV64_IMM(BPF_REG_2, 0),
  13141. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  13142. BPF_FUNC_csum_update),
  13143. BPF_MOV64_IMM(BPF_REG_0, 0),
  13144. BPF_EXIT_INSN(),
  13145. },
  13146. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13147. .result = ACCEPT,
  13148. },
  13149. {
  13150. "reference tracking: leak potential reference",
  13151. .insns = {
  13152. BPF_SK_LOOKUP,
  13153. BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
  13154. BPF_EXIT_INSN(),
  13155. },
  13156. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13157. .errstr = "Unreleased reference",
  13158. .result = REJECT,
  13159. },
  13160. {
  13161. "reference tracking: leak potential reference on stack",
  13162. .insns = {
  13163. BPF_SK_LOOKUP,
  13164. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  13165. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  13166. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
  13167. BPF_MOV64_IMM(BPF_REG_0, 0),
  13168. BPF_EXIT_INSN(),
  13169. },
  13170. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13171. .errstr = "Unreleased reference",
  13172. .result = REJECT,
  13173. },
  13174. {
  13175. "reference tracking: leak potential reference on stack 2",
  13176. .insns = {
  13177. BPF_SK_LOOKUP,
  13178. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  13179. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  13180. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
  13181. BPF_MOV64_IMM(BPF_REG_0, 0),
  13182. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  13183. BPF_EXIT_INSN(),
  13184. },
  13185. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13186. .errstr = "Unreleased reference",
  13187. .result = REJECT,
  13188. },
  13189. {
  13190. "reference tracking: zero potential reference",
  13191. .insns = {
  13192. BPF_SK_LOOKUP,
  13193. BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
  13194. BPF_EXIT_INSN(),
  13195. },
  13196. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13197. .errstr = "Unreleased reference",
  13198. .result = REJECT,
  13199. },
  13200. {
  13201. "reference tracking: copy and zero potential references",
  13202. .insns = {
  13203. BPF_SK_LOOKUP,
  13204. BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
  13205. BPF_MOV64_IMM(BPF_REG_0, 0),
  13206. BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
  13207. BPF_EXIT_INSN(),
  13208. },
  13209. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13210. .errstr = "Unreleased reference",
  13211. .result = REJECT,
  13212. },
  13213. {
  13214. "reference tracking: release reference without check",
  13215. .insns = {
  13216. BPF_SK_LOOKUP,
  13217. /* reference in r0 may be NULL */
  13218. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13219. BPF_MOV64_IMM(BPF_REG_2, 0),
  13220. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13221. BPF_EXIT_INSN(),
  13222. },
  13223. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13224. .errstr = "type=sock_or_null expected=sock",
  13225. .result = REJECT,
  13226. },
  13227. {
  13228. "reference tracking: release reference",
  13229. .insns = {
  13230. BPF_SK_LOOKUP,
  13231. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13232. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  13233. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13234. BPF_EXIT_INSN(),
  13235. },
  13236. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13237. .result = ACCEPT,
  13238. },
  13239. {
  13240. "reference tracking: release reference 2",
  13241. .insns = {
  13242. BPF_SK_LOOKUP,
  13243. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13244. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  13245. BPF_EXIT_INSN(),
  13246. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13247. BPF_EXIT_INSN(),
  13248. },
  13249. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13250. .result = ACCEPT,
  13251. },
  13252. {
  13253. "reference tracking: release reference twice",
  13254. .insns = {
  13255. BPF_SK_LOOKUP,
  13256. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13257. BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
  13258. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  13259. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13260. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  13261. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13262. BPF_EXIT_INSN(),
  13263. },
  13264. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13265. .errstr = "type=inv expected=sock",
  13266. .result = REJECT,
  13267. },
  13268. {
  13269. "reference tracking: release reference twice inside branch",
  13270. .insns = {
  13271. BPF_SK_LOOKUP,
  13272. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13273. BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
  13274. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
  13275. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13276. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  13277. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13278. BPF_EXIT_INSN(),
  13279. },
  13280. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13281. .errstr = "type=inv expected=sock",
  13282. .result = REJECT,
  13283. },
  13284. {
  13285. "reference tracking: alloc, check, free in one subbranch",
  13286. .insns = {
  13287. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  13288. offsetof(struct __sk_buff, data)),
  13289. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  13290. offsetof(struct __sk_buff, data_end)),
  13291. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  13292. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
  13293. /* if (offsetof(skb, mark) > data_len) exit; */
  13294. BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
  13295. BPF_EXIT_INSN(),
  13296. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
  13297. offsetof(struct __sk_buff, mark)),
  13298. BPF_SK_LOOKUP,
  13299. BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
  13300. /* Leak reference in R0 */
  13301. BPF_EXIT_INSN(),
  13302. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
  13303. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13304. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13305. BPF_EXIT_INSN(),
  13306. },
  13307. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13308. .errstr = "Unreleased reference",
  13309. .result = REJECT,
  13310. },
  13311. {
  13312. "reference tracking: alloc, check, free in both subbranches",
  13313. .insns = {
  13314. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  13315. offsetof(struct __sk_buff, data)),
  13316. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  13317. offsetof(struct __sk_buff, data_end)),
  13318. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  13319. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
  13320. /* if (offsetof(skb, mark) > data_len) exit; */
  13321. BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
  13322. BPF_EXIT_INSN(),
  13323. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
  13324. offsetof(struct __sk_buff, mark)),
  13325. BPF_SK_LOOKUP,
  13326. BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
  13327. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
  13328. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13329. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13330. BPF_EXIT_INSN(),
  13331. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
  13332. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13333. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13334. BPF_EXIT_INSN(),
  13335. },
  13336. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13337. .result = ACCEPT,
  13338. },
  13339. {
  13340. "reference tracking in call: free reference in subprog",
  13341. .insns = {
  13342. BPF_SK_LOOKUP,
  13343. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
  13344. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  13345. BPF_MOV64_IMM(BPF_REG_0, 0),
  13346. BPF_EXIT_INSN(),
  13347. /* subprog 1 */
  13348. BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
  13349. BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
  13350. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13351. BPF_EXIT_INSN(),
  13352. },
  13353. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13354. .result = ACCEPT,
  13355. },
  13356. {
  13357. "pass modified ctx pointer to helper, 1",
  13358. .insns = {
  13359. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
  13360. BPF_MOV64_IMM(BPF_REG_2, 0),
  13361. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  13362. BPF_FUNC_csum_update),
  13363. BPF_MOV64_IMM(BPF_REG_0, 0),
  13364. BPF_EXIT_INSN(),
  13365. },
  13366. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13367. .result = REJECT,
  13368. .errstr = "dereference of modified ctx ptr",
  13369. },
  13370. {
  13371. "pass modified ctx pointer to helper, 2",
  13372. .insns = {
  13373. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
  13374. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  13375. BPF_FUNC_get_socket_cookie),
  13376. BPF_MOV64_IMM(BPF_REG_0, 0),
  13377. BPF_EXIT_INSN(),
  13378. },
  13379. .result_unpriv = REJECT,
  13380. .result = REJECT,
  13381. .errstr_unpriv = "dereference of modified ctx ptr",
  13382. .errstr = "dereference of modified ctx ptr",
  13383. },
  13384. {
  13385. "pass modified ctx pointer to helper, 3",
  13386. .insns = {
  13387. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
  13388. BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
  13389. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  13390. BPF_MOV64_IMM(BPF_REG_2, 0),
  13391. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  13392. BPF_FUNC_csum_update),
  13393. BPF_MOV64_IMM(BPF_REG_0, 0),
  13394. BPF_EXIT_INSN(),
  13395. },
  13396. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13397. .result = REJECT,
  13398. .errstr = "variable ctx access var_off=(0x0; 0x4)",
  13399. },
  13400. {
  13401. "mov64 src == dst",
  13402. .insns = {
  13403. BPF_MOV64_IMM(BPF_REG_2, 0),
  13404. BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
  13405. // Check bounds are OK
  13406. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
  13407. BPF_MOV64_IMM(BPF_REG_0, 0),
  13408. BPF_EXIT_INSN(),
  13409. },
  13410. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13411. .result = ACCEPT,
  13412. },
  13413. {
  13414. "mov64 src != dst",
  13415. .insns = {
  13416. BPF_MOV64_IMM(BPF_REG_3, 0),
  13417. BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
  13418. // Check bounds are OK
  13419. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
  13420. BPF_MOV64_IMM(BPF_REG_0, 0),
  13421. BPF_EXIT_INSN(),
  13422. },
  13423. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13424. .result = ACCEPT,
  13425. },
  13426. {
  13427. "reference tracking in call: free reference in subprog and outside",
  13428. .insns = {
  13429. BPF_SK_LOOKUP,
  13430. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
  13431. BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
  13432. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  13433. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  13434. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13435. BPF_EXIT_INSN(),
  13436. /* subprog 1 */
  13437. BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
  13438. BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
  13439. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13440. BPF_EXIT_INSN(),
  13441. },
  13442. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13443. .errstr = "type=inv expected=sock",
  13444. .result = REJECT,
  13445. },
  13446. {
  13447. "reference tracking in call: alloc & leak reference in subprog",
  13448. .insns = {
  13449. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  13450. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  13451. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
  13452. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13453. BPF_MOV64_IMM(BPF_REG_0, 0),
  13454. BPF_EXIT_INSN(),
  13455. /* subprog 1 */
  13456. BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
  13457. BPF_SK_LOOKUP,
  13458. /* spill unchecked sk_ptr into stack of caller */
  13459. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
  13460. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13461. BPF_EXIT_INSN(),
  13462. },
  13463. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13464. .errstr = "Unreleased reference",
  13465. .result = REJECT,
  13466. },
  13467. {
  13468. "reference tracking in call: alloc in subprog, release outside",
  13469. .insns = {
  13470. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  13471. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
  13472. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13473. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  13474. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13475. BPF_EXIT_INSN(),
  13476. /* subprog 1 */
  13477. BPF_SK_LOOKUP,
  13478. BPF_EXIT_INSN(), /* return sk */
  13479. },
  13480. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13481. .retval = POINTER_VALUE,
  13482. .result = ACCEPT,
  13483. },
  13484. {
  13485. "reference tracking in call: sk_ptr leak into caller stack",
  13486. .insns = {
  13487. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  13488. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  13489. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  13490. BPF_MOV64_IMM(BPF_REG_0, 0),
  13491. BPF_EXIT_INSN(),
  13492. /* subprog 1 */
  13493. BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
  13494. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
  13495. BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
  13496. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
  13497. /* spill unchecked sk_ptr into stack of caller */
  13498. BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
  13499. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
  13500. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
  13501. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
  13502. BPF_EXIT_INSN(),
  13503. /* subprog 2 */
  13504. BPF_SK_LOOKUP,
  13505. BPF_EXIT_INSN(),
  13506. },
  13507. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13508. .errstr = "Unreleased reference",
  13509. .result = REJECT,
  13510. },
  13511. {
  13512. "reference tracking in call: sk_ptr spill into caller stack",
  13513. .insns = {
  13514. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  13515. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  13516. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  13517. BPF_MOV64_IMM(BPF_REG_0, 0),
  13518. BPF_EXIT_INSN(),
  13519. /* subprog 1 */
  13520. BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
  13521. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
  13522. BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
  13523. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
  13524. /* spill unchecked sk_ptr into stack of caller */
  13525. BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
  13526. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
  13527. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
  13528. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
  13529. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  13530. /* now the sk_ptr is verified, free the reference */
  13531. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
  13532. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13533. BPF_EXIT_INSN(),
  13534. /* subprog 2 */
  13535. BPF_SK_LOOKUP,
  13536. BPF_EXIT_INSN(),
  13537. },
  13538. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13539. .result = ACCEPT,
  13540. },
  13541. {
  13542. "reference tracking: allow LD_ABS",
  13543. .insns = {
  13544. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  13545. BPF_SK_LOOKUP,
  13546. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13547. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  13548. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13549. BPF_LD_ABS(BPF_B, 0),
  13550. BPF_LD_ABS(BPF_H, 0),
  13551. BPF_LD_ABS(BPF_W, 0),
  13552. BPF_EXIT_INSN(),
  13553. },
  13554. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13555. .result = ACCEPT,
  13556. },
  13557. {
  13558. "reference tracking: forbid LD_ABS while holding reference",
  13559. .insns = {
  13560. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  13561. BPF_SK_LOOKUP,
  13562. BPF_LD_ABS(BPF_B, 0),
  13563. BPF_LD_ABS(BPF_H, 0),
  13564. BPF_LD_ABS(BPF_W, 0),
  13565. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13566. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  13567. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13568. BPF_EXIT_INSN(),
  13569. },
  13570. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13571. .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
  13572. .result = REJECT,
  13573. },
  13574. {
  13575. "reference tracking: allow LD_IND",
  13576. .insns = {
  13577. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  13578. BPF_SK_LOOKUP,
  13579. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13580. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  13581. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13582. BPF_MOV64_IMM(BPF_REG_7, 1),
  13583. BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
  13584. BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
  13585. BPF_EXIT_INSN(),
  13586. },
  13587. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13588. .result = ACCEPT,
  13589. .retval = 1,
  13590. },
  13591. {
  13592. "reference tracking: forbid LD_IND while holding reference",
  13593. .insns = {
  13594. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  13595. BPF_SK_LOOKUP,
  13596. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  13597. BPF_MOV64_IMM(BPF_REG_7, 1),
  13598. BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
  13599. BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
  13600. BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
  13601. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  13602. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13603. BPF_EXIT_INSN(),
  13604. },
  13605. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13606. .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
  13607. .result = REJECT,
  13608. },
  13609. {
  13610. "reference tracking: check reference or tail call",
  13611. .insns = {
  13612. BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
  13613. BPF_SK_LOOKUP,
  13614. /* if (sk) bpf_sk_release() */
  13615. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13616. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
  13617. /* bpf_tail_call() */
  13618. BPF_MOV64_IMM(BPF_REG_3, 2),
  13619. BPF_LD_MAP_FD(BPF_REG_2, 0),
  13620. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  13621. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  13622. BPF_FUNC_tail_call),
  13623. BPF_MOV64_IMM(BPF_REG_0, 0),
  13624. BPF_EXIT_INSN(),
  13625. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13626. BPF_EXIT_INSN(),
  13627. },
  13628. .fixup_prog1 = { 17 },
  13629. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13630. .result = ACCEPT,
  13631. },
  13632. {
  13633. "reference tracking: release reference then tail call",
  13634. .insns = {
  13635. BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
  13636. BPF_SK_LOOKUP,
  13637. /* if (sk) bpf_sk_release() */
  13638. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13639. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  13640. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13641. /* bpf_tail_call() */
  13642. BPF_MOV64_IMM(BPF_REG_3, 2),
  13643. BPF_LD_MAP_FD(BPF_REG_2, 0),
  13644. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  13645. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  13646. BPF_FUNC_tail_call),
  13647. BPF_MOV64_IMM(BPF_REG_0, 0),
  13648. BPF_EXIT_INSN(),
  13649. },
  13650. .fixup_prog1 = { 18 },
  13651. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13652. .result = ACCEPT,
  13653. },
  13654. {
  13655. "reference tracking: leak possible reference over tail call",
  13656. .insns = {
  13657. BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
  13658. /* Look up socket and store in REG_6 */
  13659. BPF_SK_LOOKUP,
  13660. /* bpf_tail_call() */
  13661. BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
  13662. BPF_MOV64_IMM(BPF_REG_3, 2),
  13663. BPF_LD_MAP_FD(BPF_REG_2, 0),
  13664. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  13665. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  13666. BPF_FUNC_tail_call),
  13667. BPF_MOV64_IMM(BPF_REG_0, 0),
  13668. /* if (sk) bpf_sk_release() */
  13669. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  13670. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  13671. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13672. BPF_EXIT_INSN(),
  13673. },
  13674. .fixup_prog1 = { 16 },
  13675. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13676. .errstr = "tail_call would lead to reference leak",
  13677. .result = REJECT,
  13678. },
  13679. {
  13680. "reference tracking: leak checked reference over tail call",
  13681. .insns = {
  13682. BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
  13683. /* Look up socket and store in REG_6 */
  13684. BPF_SK_LOOKUP,
  13685. BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
  13686. /* if (!sk) goto end */
  13687. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  13688. /* bpf_tail_call() */
  13689. BPF_MOV64_IMM(BPF_REG_3, 0),
  13690. BPF_LD_MAP_FD(BPF_REG_2, 0),
  13691. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  13692. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  13693. BPF_FUNC_tail_call),
  13694. BPF_MOV64_IMM(BPF_REG_0, 0),
  13695. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  13696. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13697. BPF_EXIT_INSN(),
  13698. },
  13699. .fixup_prog1 = { 17 },
  13700. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13701. .errstr = "tail_call would lead to reference leak",
  13702. .result = REJECT,
  13703. },
  13704. {
  13705. "reference tracking: mangle and release sock_or_null",
  13706. .insns = {
  13707. BPF_SK_LOOKUP,
  13708. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13709. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
  13710. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  13711. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13712. BPF_EXIT_INSN(),
  13713. },
  13714. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13715. .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
  13716. .result = REJECT,
  13717. },
  13718. {
  13719. "reference tracking: mangle and release sock",
  13720. .insns = {
  13721. BPF_SK_LOOKUP,
  13722. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13723. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  13724. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
  13725. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13726. BPF_EXIT_INSN(),
  13727. },
  13728. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13729. .errstr = "R1 pointer arithmetic on sock prohibited",
  13730. .result = REJECT,
  13731. },
  13732. {
  13733. "reference tracking: access member",
  13734. .insns = {
  13735. BPF_SK_LOOKUP,
  13736. BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
  13737. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  13738. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
  13739. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  13740. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13741. BPF_EXIT_INSN(),
  13742. },
  13743. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13744. .result = ACCEPT,
  13745. },
  13746. {
  13747. "reference tracking: write to member",
  13748. .insns = {
  13749. BPF_SK_LOOKUP,
  13750. BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
  13751. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  13752. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  13753. BPF_LD_IMM64(BPF_REG_2, 42),
  13754. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
  13755. offsetof(struct bpf_sock, mark)),
  13756. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  13757. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13758. BPF_LD_IMM64(BPF_REG_0, 0),
  13759. BPF_EXIT_INSN(),
  13760. },
  13761. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13762. .errstr = "cannot write into socket",
  13763. .result = REJECT,
  13764. },
  13765. {
  13766. "reference tracking: invalid 64-bit access of member",
  13767. .insns = {
  13768. BPF_SK_LOOKUP,
  13769. BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
  13770. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  13771. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
  13772. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  13773. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13774. BPF_EXIT_INSN(),
  13775. },
  13776. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13777. .errstr = "invalid bpf_sock access off=0 size=8",
  13778. .result = REJECT,
  13779. },
  13780. {
  13781. "reference tracking: access after release",
  13782. .insns = {
  13783. BPF_SK_LOOKUP,
  13784. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13785. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  13786. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13787. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
  13788. BPF_EXIT_INSN(),
  13789. },
  13790. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13791. .errstr = "!read_ok",
  13792. .result = REJECT,
  13793. },
  13794. {
  13795. "reference tracking: direct access for lookup",
  13796. .insns = {
  13797. /* Check that the packet is at least 64B long */
  13798. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  13799. offsetof(struct __sk_buff, data)),
  13800. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  13801. offsetof(struct __sk_buff, data_end)),
  13802. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  13803. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
  13804. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
  13805. /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
  13806. BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
  13807. BPF_MOV64_IMM(BPF_REG_4, 0),
  13808. BPF_MOV64_IMM(BPF_REG_5, 0),
  13809. BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
  13810. BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
  13811. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  13812. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
  13813. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  13814. BPF_EMIT_CALL(BPF_FUNC_sk_release),
  13815. BPF_EXIT_INSN(),
  13816. },
  13817. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  13818. .result = ACCEPT,
  13819. },
  13820. {
  13821. "calls: ctx read at start of subprog",
  13822. .insns = {
  13823. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  13824. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
  13825. BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
  13826. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  13827. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
  13828. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  13829. BPF_EXIT_INSN(),
  13830. BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
  13831. BPF_MOV64_IMM(BPF_REG_0, 0),
  13832. BPF_EXIT_INSN(),
  13833. },
  13834. .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
  13835. .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
  13836. .result_unpriv = REJECT,
  13837. .result = ACCEPT,
  13838. },
  13839. };
  13840. static int probe_filter_length(const struct bpf_insn *fp)
  13841. {
  13842. int len;
  13843. for (len = MAX_INSNS - 1; len > 0; --len)
  13844. if (fp[len].code != 0 || fp[len].imm != 0)
  13845. break;
  13846. return len + 1;
  13847. }
  13848. static int create_map(uint32_t type, uint32_t size_key,
  13849. uint32_t size_value, uint32_t max_elem)
  13850. {
  13851. int fd;
  13852. fd = bpf_create_map(type, size_key, size_value, max_elem,
  13853. type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
  13854. if (fd < 0)
  13855. printf("Failed to create hash map '%s'!\n", strerror(errno));
  13856. return fd;
  13857. }
  13858. static int create_prog_dummy1(enum bpf_map_type prog_type)
  13859. {
  13860. struct bpf_insn prog[] = {
  13861. BPF_MOV64_IMM(BPF_REG_0, 42),
  13862. BPF_EXIT_INSN(),
  13863. };
  13864. return bpf_load_program(prog_type, prog,
  13865. ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
  13866. }
  13867. static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
  13868. {
  13869. struct bpf_insn prog[] = {
  13870. BPF_MOV64_IMM(BPF_REG_3, idx),
  13871. BPF_LD_MAP_FD(BPF_REG_2, mfd),
  13872. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  13873. BPF_FUNC_tail_call),
  13874. BPF_MOV64_IMM(BPF_REG_0, 41),
  13875. BPF_EXIT_INSN(),
  13876. };
  13877. return bpf_load_program(prog_type, prog,
  13878. ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
  13879. }
  13880. static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
  13881. int p1key)
  13882. {
  13883. int p2key = 1;
  13884. int mfd, p1fd, p2fd;
  13885. mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
  13886. sizeof(int), max_elem, 0);
  13887. if (mfd < 0) {
  13888. printf("Failed to create prog array '%s'!\n", strerror(errno));
  13889. return -1;
  13890. }
  13891. p1fd = create_prog_dummy1(prog_type);
  13892. p2fd = create_prog_dummy2(prog_type, mfd, p2key);
  13893. if (p1fd < 0 || p2fd < 0)
  13894. goto out;
  13895. if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
  13896. goto out;
  13897. if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
  13898. goto out;
  13899. close(p2fd);
  13900. close(p1fd);
  13901. return mfd;
  13902. out:
  13903. close(p2fd);
  13904. close(p1fd);
  13905. close(mfd);
  13906. return -1;
  13907. }
  13908. static int create_map_in_map(void)
  13909. {
  13910. int inner_map_fd, outer_map_fd;
  13911. inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
  13912. sizeof(int), 1, 0);
  13913. if (inner_map_fd < 0) {
  13914. printf("Failed to create array '%s'!\n", strerror(errno));
  13915. return inner_map_fd;
  13916. }
  13917. outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
  13918. sizeof(int), inner_map_fd, 1, 0);
  13919. if (outer_map_fd < 0)
  13920. printf("Failed to create array of maps '%s'!\n",
  13921. strerror(errno));
  13922. close(inner_map_fd);
  13923. return outer_map_fd;
  13924. }
  13925. static int create_cgroup_storage(bool percpu)
  13926. {
  13927. enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
  13928. BPF_MAP_TYPE_CGROUP_STORAGE;
  13929. int fd;
  13930. fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
  13931. TEST_DATA_LEN, 0, 0);
  13932. if (fd < 0)
  13933. printf("Failed to create cgroup storage '%s'!\n",
  13934. strerror(errno));
  13935. return fd;
  13936. }
  13937. static char bpf_vlog[UINT_MAX >> 8];
  13938. static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
  13939. struct bpf_insn *prog, int *map_fds)
  13940. {
  13941. int *fixup_map_hash_8b = test->fixup_map_hash_8b;
  13942. int *fixup_map_hash_48b = test->fixup_map_hash_48b;
  13943. int *fixup_map_hash_16b = test->fixup_map_hash_16b;
  13944. int *fixup_map_array_48b = test->fixup_map_array_48b;
  13945. int *fixup_map_sockmap = test->fixup_map_sockmap;
  13946. int *fixup_map_sockhash = test->fixup_map_sockhash;
  13947. int *fixup_map_xskmap = test->fixup_map_xskmap;
  13948. int *fixup_map_stacktrace = test->fixup_map_stacktrace;
  13949. int *fixup_prog1 = test->fixup_prog1;
  13950. int *fixup_prog2 = test->fixup_prog2;
  13951. int *fixup_map_in_map = test->fixup_map_in_map;
  13952. int *fixup_cgroup_storage = test->fixup_cgroup_storage;
  13953. int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
  13954. if (test->fill_helper)
  13955. test->fill_helper(test);
  13956. /* Allocating HTs with 1 elem is fine here, since we only test
  13957. * for verifier and not do a runtime lookup, so the only thing
  13958. * that really matters is value size in this case.
  13959. */
  13960. if (*fixup_map_hash_8b) {
  13961. map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
  13962. sizeof(long long), 1);
  13963. do {
  13964. prog[*fixup_map_hash_8b].imm = map_fds[0];
  13965. fixup_map_hash_8b++;
  13966. } while (*fixup_map_hash_8b);
  13967. }
  13968. if (*fixup_map_hash_48b) {
  13969. map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
  13970. sizeof(struct test_val), 1);
  13971. do {
  13972. prog[*fixup_map_hash_48b].imm = map_fds[1];
  13973. fixup_map_hash_48b++;
  13974. } while (*fixup_map_hash_48b);
  13975. }
  13976. if (*fixup_map_hash_16b) {
  13977. map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
  13978. sizeof(struct other_val), 1);
  13979. do {
  13980. prog[*fixup_map_hash_16b].imm = map_fds[2];
  13981. fixup_map_hash_16b++;
  13982. } while (*fixup_map_hash_16b);
  13983. }
  13984. if (*fixup_map_array_48b) {
  13985. map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
  13986. sizeof(struct test_val), 1);
  13987. do {
  13988. prog[*fixup_map_array_48b].imm = map_fds[3];
  13989. fixup_map_array_48b++;
  13990. } while (*fixup_map_array_48b);
  13991. }
  13992. if (*fixup_prog1) {
  13993. map_fds[4] = create_prog_array(prog_type, 4, 0);
  13994. do {
  13995. prog[*fixup_prog1].imm = map_fds[4];
  13996. fixup_prog1++;
  13997. } while (*fixup_prog1);
  13998. }
  13999. if (*fixup_prog2) {
  14000. map_fds[5] = create_prog_array(prog_type, 8, 7);
  14001. do {
  14002. prog[*fixup_prog2].imm = map_fds[5];
  14003. fixup_prog2++;
  14004. } while (*fixup_prog2);
  14005. }
  14006. if (*fixup_map_in_map) {
  14007. map_fds[6] = create_map_in_map();
  14008. do {
  14009. prog[*fixup_map_in_map].imm = map_fds[6];
  14010. fixup_map_in_map++;
  14011. } while (*fixup_map_in_map);
  14012. }
  14013. if (*fixup_cgroup_storage) {
  14014. map_fds[7] = create_cgroup_storage(false);
  14015. do {
  14016. prog[*fixup_cgroup_storage].imm = map_fds[7];
  14017. fixup_cgroup_storage++;
  14018. } while (*fixup_cgroup_storage);
  14019. }
  14020. if (*fixup_percpu_cgroup_storage) {
  14021. map_fds[8] = create_cgroup_storage(true);
  14022. do {
  14023. prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
  14024. fixup_percpu_cgroup_storage++;
  14025. } while (*fixup_percpu_cgroup_storage);
  14026. }
  14027. if (*fixup_map_sockmap) {
  14028. map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
  14029. sizeof(int), 1);
  14030. do {
  14031. prog[*fixup_map_sockmap].imm = map_fds[9];
  14032. fixup_map_sockmap++;
  14033. } while (*fixup_map_sockmap);
  14034. }
  14035. if (*fixup_map_sockhash) {
  14036. map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
  14037. sizeof(int), 1);
  14038. do {
  14039. prog[*fixup_map_sockhash].imm = map_fds[10];
  14040. fixup_map_sockhash++;
  14041. } while (*fixup_map_sockhash);
  14042. }
  14043. if (*fixup_map_xskmap) {
  14044. map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
  14045. sizeof(int), 1);
  14046. do {
  14047. prog[*fixup_map_xskmap].imm = map_fds[11];
  14048. fixup_map_xskmap++;
  14049. } while (*fixup_map_xskmap);
  14050. }
  14051. if (*fixup_map_stacktrace) {
  14052. map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
  14053. sizeof(u64), 1);
  14054. do {
  14055. prog[*fixup_map_stacktrace].imm = map_fds[12];
  14056. fixup_map_stacktrace++;
  14057. } while (fixup_map_stacktrace);
  14058. }
  14059. }
  14060. static int set_admin(bool admin)
  14061. {
  14062. cap_t caps;
  14063. const cap_value_t cap_val = CAP_SYS_ADMIN;
  14064. int ret = -1;
  14065. caps = cap_get_proc();
  14066. if (!caps) {
  14067. perror("cap_get_proc");
  14068. return -1;
  14069. }
  14070. if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
  14071. admin ? CAP_SET : CAP_CLEAR)) {
  14072. perror("cap_set_flag");
  14073. goto out;
  14074. }
  14075. if (cap_set_proc(caps)) {
  14076. perror("cap_set_proc");
  14077. goto out;
  14078. }
  14079. ret = 0;
  14080. out:
  14081. if (cap_free(caps))
  14082. perror("cap_free");
  14083. return ret;
  14084. }
  14085. static void do_test_single(struct bpf_test *test, bool unpriv,
  14086. int *passes, int *errors)
  14087. {
  14088. int fd_prog, expected_ret, reject_from_alignment;
  14089. int prog_len, prog_type = test->prog_type;
  14090. struct bpf_insn *prog = test->insns;
  14091. int map_fds[MAX_NR_MAPS];
  14092. const char *expected_err;
  14093. uint32_t expected_val;
  14094. uint32_t retval;
  14095. int i, err;
  14096. for (i = 0; i < MAX_NR_MAPS; i++)
  14097. map_fds[i] = -1;
  14098. if (!prog_type)
  14099. prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
  14100. do_test_fixup(test, prog_type, prog, map_fds);
  14101. prog_len = probe_filter_length(prog);
  14102. fd_prog = bpf_verify_program(prog_type, prog, prog_len,
  14103. test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
  14104. "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
  14105. expected_ret = unpriv && test->result_unpriv != UNDEF ?
  14106. test->result_unpriv : test->result;
  14107. expected_err = unpriv && test->errstr_unpriv ?
  14108. test->errstr_unpriv : test->errstr;
  14109. expected_val = unpriv && test->retval_unpriv ?
  14110. test->retval_unpriv : test->retval;
  14111. reject_from_alignment = fd_prog < 0 &&
  14112. (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
  14113. strstr(bpf_vlog, "misaligned");
  14114. #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  14115. if (reject_from_alignment) {
  14116. printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
  14117. strerror(errno));
  14118. goto fail_log;
  14119. }
  14120. #endif
  14121. if (expected_ret == ACCEPT) {
  14122. if (fd_prog < 0 && !reject_from_alignment) {
  14123. printf("FAIL\nFailed to load prog '%s'!\n",
  14124. strerror(errno));
  14125. goto fail_log;
  14126. }
  14127. } else {
  14128. if (fd_prog >= 0) {
  14129. printf("FAIL\nUnexpected success to load!\n");
  14130. goto fail_log;
  14131. }
  14132. if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
  14133. printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
  14134. expected_err, bpf_vlog);
  14135. goto fail_log;
  14136. }
  14137. }
  14138. if (fd_prog >= 0) {
  14139. __u8 tmp[TEST_DATA_LEN << 2];
  14140. __u32 size_tmp = sizeof(tmp);
  14141. if (unpriv)
  14142. set_admin(true);
  14143. err = bpf_prog_test_run(fd_prog, 1, test->data,
  14144. sizeof(test->data), tmp, &size_tmp,
  14145. &retval, NULL);
  14146. if (unpriv)
  14147. set_admin(false);
  14148. if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
  14149. printf("Unexpected bpf_prog_test_run error\n");
  14150. goto fail_log;
  14151. }
  14152. if (!err && retval != expected_val &&
  14153. expected_val != POINTER_VALUE) {
  14154. printf("FAIL retval %d != %d\n", retval, expected_val);
  14155. goto fail_log;
  14156. }
  14157. }
  14158. (*passes)++;
  14159. printf("OK%s\n", reject_from_alignment ?
  14160. " (NOTE: reject due to unknown alignment)" : "");
  14161. close_fds:
  14162. close(fd_prog);
  14163. for (i = 0; i < MAX_NR_MAPS; i++)
  14164. close(map_fds[i]);
  14165. sched_yield();
  14166. return;
  14167. fail_log:
  14168. (*errors)++;
  14169. printf("%s", bpf_vlog);
  14170. goto close_fds;
  14171. }
  14172. static bool is_admin(void)
  14173. {
  14174. cap_t caps;
  14175. cap_flag_value_t sysadmin = CAP_CLEAR;
  14176. const cap_value_t cap_val = CAP_SYS_ADMIN;
  14177. #ifdef CAP_IS_SUPPORTED
  14178. if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
  14179. perror("cap_get_flag");
  14180. return false;
  14181. }
  14182. #endif
  14183. caps = cap_get_proc();
  14184. if (!caps) {
  14185. perror("cap_get_proc");
  14186. return false;
  14187. }
  14188. if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
  14189. perror("cap_get_flag");
  14190. if (cap_free(caps))
  14191. perror("cap_free");
  14192. return (sysadmin == CAP_SET);
  14193. }
  14194. static void get_unpriv_disabled()
  14195. {
  14196. char buf[2];
  14197. FILE *fd;
  14198. fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
  14199. if (!fd) {
  14200. perror("fopen /proc/sys/"UNPRIV_SYSCTL);
  14201. unpriv_disabled = true;
  14202. return;
  14203. }
  14204. if (fgets(buf, 2, fd) == buf && atoi(buf))
  14205. unpriv_disabled = true;
  14206. fclose(fd);
  14207. }
  14208. static bool test_as_unpriv(struct bpf_test *test)
  14209. {
  14210. return !test->prog_type ||
  14211. test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
  14212. test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
  14213. }
  14214. static int do_test(bool unpriv, unsigned int from, unsigned int to)
  14215. {
  14216. int i, passes = 0, errors = 0, skips = 0;
  14217. for (i = from; i < to; i++) {
  14218. struct bpf_test *test = &tests[i];
  14219. /* Program types that are not supported by non-root we
  14220. * skip right away.
  14221. */
  14222. if (test_as_unpriv(test) && unpriv_disabled) {
  14223. printf("#%d/u %s SKIP\n", i, test->descr);
  14224. skips++;
  14225. } else if (test_as_unpriv(test)) {
  14226. if (!unpriv)
  14227. set_admin(false);
  14228. printf("#%d/u %s ", i, test->descr);
  14229. do_test_single(test, true, &passes, &errors);
  14230. if (!unpriv)
  14231. set_admin(true);
  14232. }
  14233. if (unpriv) {
  14234. printf("#%d/p %s SKIP\n", i, test->descr);
  14235. skips++;
  14236. } else {
  14237. printf("#%d/p %s ", i, test->descr);
  14238. do_test_single(test, false, &passes, &errors);
  14239. }
  14240. }
  14241. printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
  14242. skips, errors);
  14243. return errors ? EXIT_FAILURE : EXIT_SUCCESS;
  14244. }
  14245. int main(int argc, char **argv)
  14246. {
  14247. unsigned int from = 0, to = ARRAY_SIZE(tests);
  14248. bool unpriv = !is_admin();
  14249. if (argc == 3) {
  14250. unsigned int l = atoi(argv[argc - 2]);
  14251. unsigned int u = atoi(argv[argc - 1]);
  14252. if (l < to && u < to) {
  14253. from = l;
  14254. to = u + 1;
  14255. }
  14256. } else if (argc == 2) {
  14257. unsigned int t = atoi(argv[argc - 1]);
  14258. if (t < to) {
  14259. from = t;
  14260. to = t + 1;
  14261. }
  14262. }
  14263. get_unpriv_disabled();
  14264. if (unpriv && unpriv_disabled) {
  14265. printf("Cannot run as unprivileged user with sysctl %s.\n",
  14266. UNPRIV_SYSCTL);
  14267. return EXIT_FAILURE;
  14268. }
  14269. bpf_semi_rand_init();
  14270. return do_test(unpriv, from, to);
  14271. }