test_verifier.c 189 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289
  1. /*
  2. * Testsuite for eBPF verifier
  3. *
  4. * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. */
  10. #include <asm/types.h>
  11. #include <linux/types.h>
  12. #include <stdint.h>
  13. #include <stdio.h>
  14. #include <stdlib.h>
  15. #include <unistd.h>
  16. #include <errno.h>
  17. #include <string.h>
  18. #include <stddef.h>
  19. #include <stdbool.h>
  20. #include <sched.h>
  21. #include <sys/capability.h>
  22. #include <sys/resource.h>
  23. #include <linux/unistd.h>
  24. #include <linux/filter.h>
  25. #include <linux/bpf_perf_event.h>
  26. #include <linux/bpf.h>
  27. #include <bpf/bpf.h>
  28. #ifdef HAVE_GENHDR
  29. # include "autoconf.h"
  30. #else
  31. # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
  32. # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
  33. # endif
  34. #endif
  35. #include "../../../include/linux/filter.h"
  36. #ifndef ARRAY_SIZE
  37. # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
  38. #endif
  39. #define MAX_INSNS 512
  40. #define MAX_FIXUPS 8
  41. #define MAX_NR_MAPS 4
  42. #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
  43. #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
  44. struct bpf_test {
  45. const char *descr;
  46. struct bpf_insn insns[MAX_INSNS];
  47. int fixup_map1[MAX_FIXUPS];
  48. int fixup_map2[MAX_FIXUPS];
  49. int fixup_prog[MAX_FIXUPS];
  50. int fixup_map_in_map[MAX_FIXUPS];
  51. const char *errstr;
  52. const char *errstr_unpriv;
  53. enum {
  54. UNDEF,
  55. ACCEPT,
  56. REJECT
  57. } result, result_unpriv;
  58. enum bpf_prog_type prog_type;
  59. uint8_t flags;
  60. };
  61. /* Note we want this to be 64 bit aligned so that the end of our array is
  62. * actually the end of the structure.
  63. */
  64. #define MAX_ENTRIES 11
  65. struct test_val {
  66. unsigned int index;
  67. int foo[MAX_ENTRIES];
  68. };
  69. static struct bpf_test tests[] = {
  70. {
  71. "add+sub+mul",
  72. .insns = {
  73. BPF_MOV64_IMM(BPF_REG_1, 1),
  74. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
  75. BPF_MOV64_IMM(BPF_REG_2, 3),
  76. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
  77. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
  78. BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
  79. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  80. BPF_EXIT_INSN(),
  81. },
  82. .result = ACCEPT,
  83. },
  84. {
  85. "unreachable",
  86. .insns = {
  87. BPF_EXIT_INSN(),
  88. BPF_EXIT_INSN(),
  89. },
  90. .errstr = "unreachable",
  91. .result = REJECT,
  92. },
  93. {
  94. "unreachable2",
  95. .insns = {
  96. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  97. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  98. BPF_EXIT_INSN(),
  99. },
  100. .errstr = "unreachable",
  101. .result = REJECT,
  102. },
  103. {
  104. "out of range jump",
  105. .insns = {
  106. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  107. BPF_EXIT_INSN(),
  108. },
  109. .errstr = "jump out of range",
  110. .result = REJECT,
  111. },
  112. {
  113. "out of range jump2",
  114. .insns = {
  115. BPF_JMP_IMM(BPF_JA, 0, 0, -2),
  116. BPF_EXIT_INSN(),
  117. },
  118. .errstr = "jump out of range",
  119. .result = REJECT,
  120. },
  121. {
  122. "test1 ld_imm64",
  123. .insns = {
  124. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  125. BPF_LD_IMM64(BPF_REG_0, 0),
  126. BPF_LD_IMM64(BPF_REG_0, 0),
  127. BPF_LD_IMM64(BPF_REG_0, 1),
  128. BPF_LD_IMM64(BPF_REG_0, 1),
  129. BPF_MOV64_IMM(BPF_REG_0, 2),
  130. BPF_EXIT_INSN(),
  131. },
  132. .errstr = "invalid BPF_LD_IMM insn",
  133. .errstr_unpriv = "R1 pointer comparison",
  134. .result = REJECT,
  135. },
  136. {
  137. "test2 ld_imm64",
  138. .insns = {
  139. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  140. BPF_LD_IMM64(BPF_REG_0, 0),
  141. BPF_LD_IMM64(BPF_REG_0, 0),
  142. BPF_LD_IMM64(BPF_REG_0, 1),
  143. BPF_LD_IMM64(BPF_REG_0, 1),
  144. BPF_EXIT_INSN(),
  145. },
  146. .errstr = "invalid BPF_LD_IMM insn",
  147. .errstr_unpriv = "R1 pointer comparison",
  148. .result = REJECT,
  149. },
  150. {
  151. "test3 ld_imm64",
  152. .insns = {
  153. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  154. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  155. BPF_LD_IMM64(BPF_REG_0, 0),
  156. BPF_LD_IMM64(BPF_REG_0, 0),
  157. BPF_LD_IMM64(BPF_REG_0, 1),
  158. BPF_LD_IMM64(BPF_REG_0, 1),
  159. BPF_EXIT_INSN(),
  160. },
  161. .errstr = "invalid bpf_ld_imm64 insn",
  162. .result = REJECT,
  163. },
  164. {
  165. "test4 ld_imm64",
  166. .insns = {
  167. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  168. BPF_EXIT_INSN(),
  169. },
  170. .errstr = "invalid bpf_ld_imm64 insn",
  171. .result = REJECT,
  172. },
  173. {
  174. "test5 ld_imm64",
  175. .insns = {
  176. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  177. },
  178. .errstr = "invalid bpf_ld_imm64 insn",
  179. .result = REJECT,
  180. },
  181. {
  182. "test6 ld_imm64",
  183. .insns = {
  184. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
  185. BPF_RAW_INSN(0, 0, 0, 0, 0),
  186. BPF_EXIT_INSN(),
  187. },
  188. .result = ACCEPT,
  189. },
  190. {
  191. "test7 ld_imm64",
  192. .insns = {
  193. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
  194. BPF_RAW_INSN(0, 0, 0, 0, 1),
  195. BPF_EXIT_INSN(),
  196. },
  197. .result = ACCEPT,
  198. },
  199. {
  200. "test8 ld_imm64",
  201. .insns = {
  202. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
  203. BPF_RAW_INSN(0, 0, 0, 0, 1),
  204. BPF_EXIT_INSN(),
  205. },
  206. .errstr = "uses reserved fields",
  207. .result = REJECT,
  208. },
  209. {
  210. "test9 ld_imm64",
  211. .insns = {
  212. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
  213. BPF_RAW_INSN(0, 0, 0, 1, 1),
  214. BPF_EXIT_INSN(),
  215. },
  216. .errstr = "invalid bpf_ld_imm64 insn",
  217. .result = REJECT,
  218. },
  219. {
  220. "test10 ld_imm64",
  221. .insns = {
  222. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
  223. BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
  224. BPF_EXIT_INSN(),
  225. },
  226. .errstr = "invalid bpf_ld_imm64 insn",
  227. .result = REJECT,
  228. },
  229. {
  230. "test11 ld_imm64",
  231. .insns = {
  232. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
  233. BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
  234. BPF_EXIT_INSN(),
  235. },
  236. .errstr = "invalid bpf_ld_imm64 insn",
  237. .result = REJECT,
  238. },
  239. {
  240. "test12 ld_imm64",
  241. .insns = {
  242. BPF_MOV64_IMM(BPF_REG_1, 0),
  243. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
  244. BPF_RAW_INSN(0, 0, 0, 0, 1),
  245. BPF_EXIT_INSN(),
  246. },
  247. .errstr = "not pointing to valid bpf_map",
  248. .result = REJECT,
  249. },
  250. {
  251. "test13 ld_imm64",
  252. .insns = {
  253. BPF_MOV64_IMM(BPF_REG_1, 0),
  254. BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
  255. BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
  256. BPF_EXIT_INSN(),
  257. },
  258. .errstr = "invalid bpf_ld_imm64 insn",
  259. .result = REJECT,
  260. },
  261. {
  262. "no bpf_exit",
  263. .insns = {
  264. BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
  265. },
  266. .errstr = "jump out of range",
  267. .result = REJECT,
  268. },
  269. {
  270. "loop (back-edge)",
  271. .insns = {
  272. BPF_JMP_IMM(BPF_JA, 0, 0, -1),
  273. BPF_EXIT_INSN(),
  274. },
  275. .errstr = "back-edge",
  276. .result = REJECT,
  277. },
  278. {
  279. "loop2 (back-edge)",
  280. .insns = {
  281. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  282. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  283. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  284. BPF_JMP_IMM(BPF_JA, 0, 0, -4),
  285. BPF_EXIT_INSN(),
  286. },
  287. .errstr = "back-edge",
  288. .result = REJECT,
  289. },
  290. {
  291. "conditional loop",
  292. .insns = {
  293. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  294. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  295. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  296. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
  297. BPF_EXIT_INSN(),
  298. },
  299. .errstr = "back-edge",
  300. .result = REJECT,
  301. },
  302. {
  303. "read uninitialized register",
  304. .insns = {
  305. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  306. BPF_EXIT_INSN(),
  307. },
  308. .errstr = "R2 !read_ok",
  309. .result = REJECT,
  310. },
  311. {
  312. "read invalid register",
  313. .insns = {
  314. BPF_MOV64_REG(BPF_REG_0, -1),
  315. BPF_EXIT_INSN(),
  316. },
  317. .errstr = "R15 is invalid",
  318. .result = REJECT,
  319. },
  320. {
  321. "program doesn't init R0 before exit",
  322. .insns = {
  323. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
  324. BPF_EXIT_INSN(),
  325. },
  326. .errstr = "R0 !read_ok",
  327. .result = REJECT,
  328. },
  329. {
  330. "program doesn't init R0 before exit in all branches",
  331. .insns = {
  332. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  333. BPF_MOV64_IMM(BPF_REG_0, 1),
  334. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
  335. BPF_EXIT_INSN(),
  336. },
  337. .errstr = "R0 !read_ok",
  338. .errstr_unpriv = "R1 pointer comparison",
  339. .result = REJECT,
  340. },
  341. {
  342. "stack out of bounds",
  343. .insns = {
  344. BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
  345. BPF_EXIT_INSN(),
  346. },
  347. .errstr = "invalid stack",
  348. .result = REJECT,
  349. },
  350. {
  351. "invalid call insn1",
  352. .insns = {
  353. BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
  354. BPF_EXIT_INSN(),
  355. },
  356. .errstr = "BPF_CALL uses reserved",
  357. .result = REJECT,
  358. },
  359. {
  360. "invalid call insn2",
  361. .insns = {
  362. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
  363. BPF_EXIT_INSN(),
  364. },
  365. .errstr = "BPF_CALL uses reserved",
  366. .result = REJECT,
  367. },
  368. {
  369. "invalid function call",
  370. .insns = {
  371. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
  372. BPF_EXIT_INSN(),
  373. },
  374. .errstr = "invalid func unknown#1234567",
  375. .result = REJECT,
  376. },
  377. {
  378. "uninitialized stack1",
  379. .insns = {
  380. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  381. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  382. BPF_LD_MAP_FD(BPF_REG_1, 0),
  383. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  384. BPF_FUNC_map_lookup_elem),
  385. BPF_EXIT_INSN(),
  386. },
  387. .fixup_map1 = { 2 },
  388. .errstr = "invalid indirect read from stack",
  389. .result = REJECT,
  390. },
  391. {
  392. "uninitialized stack2",
  393. .insns = {
  394. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  395. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
  396. BPF_EXIT_INSN(),
  397. },
  398. .errstr = "invalid read from stack",
  399. .result = REJECT,
  400. },
  401. {
  402. "invalid fp arithmetic",
  403. /* If this gets ever changed, make sure JITs can deal with it. */
  404. .insns = {
  405. BPF_MOV64_IMM(BPF_REG_0, 0),
  406. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  407. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
  408. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  409. BPF_EXIT_INSN(),
  410. },
  411. .errstr_unpriv = "R1 pointer arithmetic",
  412. .result_unpriv = REJECT,
  413. .errstr = "R1 invalid mem access",
  414. .result = REJECT,
  415. },
  416. {
  417. "non-invalid fp arithmetic",
  418. .insns = {
  419. BPF_MOV64_IMM(BPF_REG_0, 0),
  420. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  421. BPF_EXIT_INSN(),
  422. },
  423. .result = ACCEPT,
  424. },
  425. {
  426. "invalid argument register",
  427. .insns = {
  428. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  429. BPF_FUNC_get_cgroup_classid),
  430. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  431. BPF_FUNC_get_cgroup_classid),
  432. BPF_EXIT_INSN(),
  433. },
  434. .errstr = "R1 !read_ok",
  435. .result = REJECT,
  436. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  437. },
  438. {
  439. "non-invalid argument register",
  440. .insns = {
  441. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
  442. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  443. BPF_FUNC_get_cgroup_classid),
  444. BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
  445. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  446. BPF_FUNC_get_cgroup_classid),
  447. BPF_EXIT_INSN(),
  448. },
  449. .result = ACCEPT,
  450. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  451. },
  452. {
  453. "check valid spill/fill",
  454. .insns = {
  455. /* spill R1(ctx) into stack */
  456. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  457. /* fill it back into R2 */
  458. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
  459. /* should be able to access R0 = *(R2 + 8) */
  460. /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
  461. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  462. BPF_EXIT_INSN(),
  463. },
  464. .errstr_unpriv = "R0 leaks addr",
  465. .result = ACCEPT,
  466. .result_unpriv = REJECT,
  467. },
  468. {
  469. "check valid spill/fill, skb mark",
  470. .insns = {
  471. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
  472. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
  473. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  474. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  475. offsetof(struct __sk_buff, mark)),
  476. BPF_EXIT_INSN(),
  477. },
  478. .result = ACCEPT,
  479. .result_unpriv = ACCEPT,
  480. },
  481. {
  482. "check corrupted spill/fill",
  483. .insns = {
  484. /* spill R1(ctx) into stack */
  485. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  486. /* mess up with R1 pointer on stack */
  487. BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
  488. /* fill back into R0 should fail */
  489. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  490. BPF_EXIT_INSN(),
  491. },
  492. .errstr_unpriv = "attempt to corrupt spilled",
  493. .errstr = "corrupted spill",
  494. .result = REJECT,
  495. },
  496. {
  497. "invalid src register in STX",
  498. .insns = {
  499. BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
  500. BPF_EXIT_INSN(),
  501. },
  502. .errstr = "R15 is invalid",
  503. .result = REJECT,
  504. },
  505. {
  506. "invalid dst register in STX",
  507. .insns = {
  508. BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
  509. BPF_EXIT_INSN(),
  510. },
  511. .errstr = "R14 is invalid",
  512. .result = REJECT,
  513. },
  514. {
  515. "invalid dst register in ST",
  516. .insns = {
  517. BPF_ST_MEM(BPF_B, 14, -1, -1),
  518. BPF_EXIT_INSN(),
  519. },
  520. .errstr = "R14 is invalid",
  521. .result = REJECT,
  522. },
  523. {
  524. "invalid src register in LDX",
  525. .insns = {
  526. BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
  527. BPF_EXIT_INSN(),
  528. },
  529. .errstr = "R12 is invalid",
  530. .result = REJECT,
  531. },
  532. {
  533. "invalid dst register in LDX",
  534. .insns = {
  535. BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
  536. BPF_EXIT_INSN(),
  537. },
  538. .errstr = "R11 is invalid",
  539. .result = REJECT,
  540. },
  541. {
  542. "junk insn",
  543. .insns = {
  544. BPF_RAW_INSN(0, 0, 0, 0, 0),
  545. BPF_EXIT_INSN(),
  546. },
  547. .errstr = "invalid BPF_LD_IMM",
  548. .result = REJECT,
  549. },
  550. {
  551. "junk insn2",
  552. .insns = {
  553. BPF_RAW_INSN(1, 0, 0, 0, 0),
  554. BPF_EXIT_INSN(),
  555. },
  556. .errstr = "BPF_LDX uses reserved fields",
  557. .result = REJECT,
  558. },
  559. {
  560. "junk insn3",
  561. .insns = {
  562. BPF_RAW_INSN(-1, 0, 0, 0, 0),
  563. BPF_EXIT_INSN(),
  564. },
  565. .errstr = "invalid BPF_ALU opcode f0",
  566. .result = REJECT,
  567. },
  568. {
  569. "junk insn4",
  570. .insns = {
  571. BPF_RAW_INSN(-1, -1, -1, -1, -1),
  572. BPF_EXIT_INSN(),
  573. },
  574. .errstr = "invalid BPF_ALU opcode f0",
  575. .result = REJECT,
  576. },
  577. {
  578. "junk insn5",
  579. .insns = {
  580. BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
  581. BPF_EXIT_INSN(),
  582. },
  583. .errstr = "BPF_ALU uses reserved fields",
  584. .result = REJECT,
  585. },
  586. {
  587. "misaligned read from stack",
  588. .insns = {
  589. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  590. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
  591. BPF_EXIT_INSN(),
  592. },
  593. .errstr = "misaligned access",
  594. .result = REJECT,
  595. },
  596. {
  597. "invalid map_fd for function call",
  598. .insns = {
  599. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  600. BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
  601. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  602. BPF_LD_MAP_FD(BPF_REG_1, 0),
  603. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  604. BPF_FUNC_map_delete_elem),
  605. BPF_EXIT_INSN(),
  606. },
  607. .errstr = "fd 0 is not pointing to valid bpf_map",
  608. .result = REJECT,
  609. },
  610. {
  611. "don't check return value before access",
  612. .insns = {
  613. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  614. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  615. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  616. BPF_LD_MAP_FD(BPF_REG_1, 0),
  617. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  618. BPF_FUNC_map_lookup_elem),
  619. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  620. BPF_EXIT_INSN(),
  621. },
  622. .fixup_map1 = { 3 },
  623. .errstr = "R0 invalid mem access 'map_value_or_null'",
  624. .result = REJECT,
  625. },
  626. {
  627. "access memory with incorrect alignment",
  628. .insns = {
  629. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  630. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  631. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  632. BPF_LD_MAP_FD(BPF_REG_1, 0),
  633. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  634. BPF_FUNC_map_lookup_elem),
  635. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  636. BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
  637. BPF_EXIT_INSN(),
  638. },
  639. .fixup_map1 = { 3 },
  640. .errstr = "misaligned access",
  641. .result = REJECT,
  642. },
  643. {
  644. "sometimes access memory with incorrect alignment",
  645. .insns = {
  646. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  647. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  648. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  649. BPF_LD_MAP_FD(BPF_REG_1, 0),
  650. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  651. BPF_FUNC_map_lookup_elem),
  652. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  653. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
  654. BPF_EXIT_INSN(),
  655. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
  656. BPF_EXIT_INSN(),
  657. },
  658. .fixup_map1 = { 3 },
  659. .errstr = "R0 invalid mem access",
  660. .errstr_unpriv = "R0 leaks addr",
  661. .result = REJECT,
  662. },
  663. {
  664. "jump test 1",
  665. .insns = {
  666. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  667. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
  668. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
  669. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  670. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
  671. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
  672. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
  673. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
  674. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
  675. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
  676. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
  677. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
  678. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  679. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
  680. BPF_MOV64_IMM(BPF_REG_0, 0),
  681. BPF_EXIT_INSN(),
  682. },
  683. .errstr_unpriv = "R1 pointer comparison",
  684. .result_unpriv = REJECT,
  685. .result = ACCEPT,
  686. },
  687. {
  688. "jump test 2",
  689. .insns = {
  690. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  691. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
  692. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  693. BPF_JMP_IMM(BPF_JA, 0, 0, 14),
  694. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
  695. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  696. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  697. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
  698. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  699. BPF_JMP_IMM(BPF_JA, 0, 0, 8),
  700. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
  701. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  702. BPF_JMP_IMM(BPF_JA, 0, 0, 5),
  703. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
  704. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  705. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  706. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
  707. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  708. BPF_MOV64_IMM(BPF_REG_0, 0),
  709. BPF_EXIT_INSN(),
  710. },
  711. .errstr_unpriv = "R1 pointer comparison",
  712. .result_unpriv = REJECT,
  713. .result = ACCEPT,
  714. },
  715. {
  716. "jump test 3",
  717. .insns = {
  718. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  719. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  720. BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
  721. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  722. BPF_JMP_IMM(BPF_JA, 0, 0, 19),
  723. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
  724. BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
  725. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  726. BPF_JMP_IMM(BPF_JA, 0, 0, 15),
  727. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
  728. BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
  729. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
  730. BPF_JMP_IMM(BPF_JA, 0, 0, 11),
  731. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
  732. BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
  733. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
  734. BPF_JMP_IMM(BPF_JA, 0, 0, 7),
  735. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
  736. BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
  737. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
  738. BPF_JMP_IMM(BPF_JA, 0, 0, 3),
  739. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
  740. BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
  741. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
  742. BPF_LD_MAP_FD(BPF_REG_1, 0),
  743. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  744. BPF_FUNC_map_delete_elem),
  745. BPF_EXIT_INSN(),
  746. },
  747. .fixup_map1 = { 24 },
  748. .errstr_unpriv = "R1 pointer comparison",
  749. .result_unpriv = REJECT,
  750. .result = ACCEPT,
  751. },
  752. {
  753. "jump test 4",
  754. .insns = {
  755. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  756. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  757. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  758. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  759. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  760. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  761. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  762. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  763. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  764. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  765. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  766. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  767. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  768. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  769. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  770. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  771. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  772. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  773. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  774. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  775. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  776. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  777. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  778. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  779. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  780. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  781. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  782. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  783. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  784. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  785. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  786. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  787. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
  788. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
  789. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
  790. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
  791. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  792. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  793. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  794. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  795. BPF_MOV64_IMM(BPF_REG_0, 0),
  796. BPF_EXIT_INSN(),
  797. },
  798. .errstr_unpriv = "R1 pointer comparison",
  799. .result_unpriv = REJECT,
  800. .result = ACCEPT,
  801. },
  802. {
  803. "jump test 5",
  804. .insns = {
  805. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  806. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  807. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  808. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  809. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  810. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  811. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  812. BPF_MOV64_IMM(BPF_REG_0, 0),
  813. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  814. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  815. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  816. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  817. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  818. BPF_MOV64_IMM(BPF_REG_0, 0),
  819. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  820. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  821. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  822. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  823. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  824. BPF_MOV64_IMM(BPF_REG_0, 0),
  825. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  826. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  827. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  828. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  829. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  830. BPF_MOV64_IMM(BPF_REG_0, 0),
  831. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  832. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
  833. BPF_JMP_IMM(BPF_JA, 0, 0, 2),
  834. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
  835. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  836. BPF_MOV64_IMM(BPF_REG_0, 0),
  837. BPF_EXIT_INSN(),
  838. },
  839. .errstr_unpriv = "R1 pointer comparison",
  840. .result_unpriv = REJECT,
  841. .result = ACCEPT,
  842. },
  843. {
  844. "access skb fields ok",
  845. .insns = {
  846. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  847. offsetof(struct __sk_buff, len)),
  848. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  849. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  850. offsetof(struct __sk_buff, mark)),
  851. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  852. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  853. offsetof(struct __sk_buff, pkt_type)),
  854. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  855. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  856. offsetof(struct __sk_buff, queue_mapping)),
  857. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  858. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  859. offsetof(struct __sk_buff, protocol)),
  860. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  861. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  862. offsetof(struct __sk_buff, vlan_present)),
  863. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  864. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  865. offsetof(struct __sk_buff, vlan_tci)),
  866. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  867. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  868. offsetof(struct __sk_buff, napi_id)),
  869. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
  870. BPF_EXIT_INSN(),
  871. },
  872. .result = ACCEPT,
  873. },
  874. {
  875. "access skb fields bad1",
  876. .insns = {
  877. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
  878. BPF_EXIT_INSN(),
  879. },
  880. .errstr = "invalid bpf_context access",
  881. .result = REJECT,
  882. },
  883. {
  884. "access skb fields bad2",
  885. .insns = {
  886. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
  887. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  888. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  889. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  890. BPF_LD_MAP_FD(BPF_REG_1, 0),
  891. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  892. BPF_FUNC_map_lookup_elem),
  893. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  894. BPF_EXIT_INSN(),
  895. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  896. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  897. offsetof(struct __sk_buff, pkt_type)),
  898. BPF_EXIT_INSN(),
  899. },
  900. .fixup_map1 = { 4 },
  901. .errstr = "different pointers",
  902. .errstr_unpriv = "R1 pointer comparison",
  903. .result = REJECT,
  904. },
  905. {
  906. "access skb fields bad3",
  907. .insns = {
  908. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
  909. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  910. offsetof(struct __sk_buff, pkt_type)),
  911. BPF_EXIT_INSN(),
  912. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  913. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  914. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  915. BPF_LD_MAP_FD(BPF_REG_1, 0),
  916. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  917. BPF_FUNC_map_lookup_elem),
  918. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  919. BPF_EXIT_INSN(),
  920. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  921. BPF_JMP_IMM(BPF_JA, 0, 0, -12),
  922. },
  923. .fixup_map1 = { 6 },
  924. .errstr = "different pointers",
  925. .errstr_unpriv = "R1 pointer comparison",
  926. .result = REJECT,
  927. },
  928. {
  929. "access skb fields bad4",
  930. .insns = {
  931. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
  932. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  933. offsetof(struct __sk_buff, len)),
  934. BPF_MOV64_IMM(BPF_REG_0, 0),
  935. BPF_EXIT_INSN(),
  936. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  937. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  938. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  939. BPF_LD_MAP_FD(BPF_REG_1, 0),
  940. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  941. BPF_FUNC_map_lookup_elem),
  942. BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
  943. BPF_EXIT_INSN(),
  944. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  945. BPF_JMP_IMM(BPF_JA, 0, 0, -13),
  946. },
  947. .fixup_map1 = { 7 },
  948. .errstr = "different pointers",
  949. .errstr_unpriv = "R1 pointer comparison",
  950. .result = REJECT,
  951. },
  952. {
  953. "check skb->mark is not writeable by sockets",
  954. .insns = {
  955. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  956. offsetof(struct __sk_buff, mark)),
  957. BPF_EXIT_INSN(),
  958. },
  959. .errstr = "invalid bpf_context access",
  960. .errstr_unpriv = "R1 leaks addr",
  961. .result = REJECT,
  962. },
  963. {
  964. "check skb->tc_index is not writeable by sockets",
  965. .insns = {
  966. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  967. offsetof(struct __sk_buff, tc_index)),
  968. BPF_EXIT_INSN(),
  969. },
  970. .errstr = "invalid bpf_context access",
  971. .errstr_unpriv = "R1 leaks addr",
  972. .result = REJECT,
  973. },
  974. {
  975. "check cb access: byte",
  976. .insns = {
  977. BPF_MOV64_IMM(BPF_REG_0, 0),
  978. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  979. offsetof(struct __sk_buff, cb[0])),
  980. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  981. offsetof(struct __sk_buff, cb[0]) + 1),
  982. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  983. offsetof(struct __sk_buff, cb[0]) + 2),
  984. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  985. offsetof(struct __sk_buff, cb[0]) + 3),
  986. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  987. offsetof(struct __sk_buff, cb[1])),
  988. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  989. offsetof(struct __sk_buff, cb[1]) + 1),
  990. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  991. offsetof(struct __sk_buff, cb[1]) + 2),
  992. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  993. offsetof(struct __sk_buff, cb[1]) + 3),
  994. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  995. offsetof(struct __sk_buff, cb[2])),
  996. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  997. offsetof(struct __sk_buff, cb[2]) + 1),
  998. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  999. offsetof(struct __sk_buff, cb[2]) + 2),
  1000. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1001. offsetof(struct __sk_buff, cb[2]) + 3),
  1002. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1003. offsetof(struct __sk_buff, cb[3])),
  1004. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1005. offsetof(struct __sk_buff, cb[3]) + 1),
  1006. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1007. offsetof(struct __sk_buff, cb[3]) + 2),
  1008. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1009. offsetof(struct __sk_buff, cb[3]) + 3),
  1010. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1011. offsetof(struct __sk_buff, cb[4])),
  1012. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1013. offsetof(struct __sk_buff, cb[4]) + 1),
  1014. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1015. offsetof(struct __sk_buff, cb[4]) + 2),
  1016. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1017. offsetof(struct __sk_buff, cb[4]) + 3),
  1018. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1019. offsetof(struct __sk_buff, cb[0])),
  1020. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1021. offsetof(struct __sk_buff, cb[0]) + 1),
  1022. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1023. offsetof(struct __sk_buff, cb[0]) + 2),
  1024. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1025. offsetof(struct __sk_buff, cb[0]) + 3),
  1026. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1027. offsetof(struct __sk_buff, cb[1])),
  1028. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1029. offsetof(struct __sk_buff, cb[1]) + 1),
  1030. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1031. offsetof(struct __sk_buff, cb[1]) + 2),
  1032. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1033. offsetof(struct __sk_buff, cb[1]) + 3),
  1034. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1035. offsetof(struct __sk_buff, cb[2])),
  1036. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1037. offsetof(struct __sk_buff, cb[2]) + 1),
  1038. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1039. offsetof(struct __sk_buff, cb[2]) + 2),
  1040. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1041. offsetof(struct __sk_buff, cb[2]) + 3),
  1042. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1043. offsetof(struct __sk_buff, cb[3])),
  1044. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1045. offsetof(struct __sk_buff, cb[3]) + 1),
  1046. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1047. offsetof(struct __sk_buff, cb[3]) + 2),
  1048. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1049. offsetof(struct __sk_buff, cb[3]) + 3),
  1050. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1051. offsetof(struct __sk_buff, cb[4])),
  1052. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1053. offsetof(struct __sk_buff, cb[4]) + 1),
  1054. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1055. offsetof(struct __sk_buff, cb[4]) + 2),
  1056. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1057. offsetof(struct __sk_buff, cb[4]) + 3),
  1058. BPF_EXIT_INSN(),
  1059. },
  1060. .result = ACCEPT,
  1061. },
  1062. {
  1063. "__sk_buff->hash, offset 0, byte store not permitted",
  1064. .insns = {
  1065. BPF_MOV64_IMM(BPF_REG_0, 0),
  1066. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1067. offsetof(struct __sk_buff, hash)),
  1068. BPF_EXIT_INSN(),
  1069. },
  1070. .errstr = "invalid bpf_context access",
  1071. .result = REJECT,
  1072. },
  1073. {
  1074. "__sk_buff->tc_index, offset 3, byte store not permitted",
  1075. .insns = {
  1076. BPF_MOV64_IMM(BPF_REG_0, 0),
  1077. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1078. offsetof(struct __sk_buff, tc_index) + 3),
  1079. BPF_EXIT_INSN(),
  1080. },
  1081. .errstr = "invalid bpf_context access",
  1082. .result = REJECT,
  1083. },
  1084. {
  1085. "check skb->hash byte load permitted",
  1086. .insns = {
  1087. BPF_MOV64_IMM(BPF_REG_0, 0),
  1088. #ifdef __LITTLE_ENDIAN
  1089. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1090. offsetof(struct __sk_buff, hash)),
  1091. #else
  1092. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1093. offsetof(struct __sk_buff, hash) + 3),
  1094. #endif
  1095. BPF_EXIT_INSN(),
  1096. },
  1097. .result = ACCEPT,
  1098. },
  1099. {
  1100. "check skb->hash byte load not permitted 1",
  1101. .insns = {
  1102. BPF_MOV64_IMM(BPF_REG_0, 0),
  1103. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1104. offsetof(struct __sk_buff, hash) + 1),
  1105. BPF_EXIT_INSN(),
  1106. },
  1107. .errstr = "invalid bpf_context access",
  1108. .result = REJECT,
  1109. },
  1110. {
  1111. "check skb->hash byte load not permitted 2",
  1112. .insns = {
  1113. BPF_MOV64_IMM(BPF_REG_0, 0),
  1114. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1115. offsetof(struct __sk_buff, hash) + 2),
  1116. BPF_EXIT_INSN(),
  1117. },
  1118. .errstr = "invalid bpf_context access",
  1119. .result = REJECT,
  1120. },
  1121. {
  1122. "check skb->hash byte load not permitted 3",
  1123. .insns = {
  1124. BPF_MOV64_IMM(BPF_REG_0, 0),
  1125. #ifdef __LITTLE_ENDIAN
  1126. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1127. offsetof(struct __sk_buff, hash) + 3),
  1128. #else
  1129. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  1130. offsetof(struct __sk_buff, hash)),
  1131. #endif
  1132. BPF_EXIT_INSN(),
  1133. },
  1134. .errstr = "invalid bpf_context access",
  1135. .result = REJECT,
  1136. },
  1137. {
  1138. "check cb access: byte, wrong type",
  1139. .insns = {
  1140. BPF_MOV64_IMM(BPF_REG_0, 0),
  1141. BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
  1142. offsetof(struct __sk_buff, cb[0])),
  1143. BPF_EXIT_INSN(),
  1144. },
  1145. .errstr = "invalid bpf_context access",
  1146. .result = REJECT,
  1147. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  1148. },
  1149. {
  1150. "check cb access: half",
  1151. .insns = {
  1152. BPF_MOV64_IMM(BPF_REG_0, 0),
  1153. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1154. offsetof(struct __sk_buff, cb[0])),
  1155. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1156. offsetof(struct __sk_buff, cb[0]) + 2),
  1157. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1158. offsetof(struct __sk_buff, cb[1])),
  1159. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1160. offsetof(struct __sk_buff, cb[1]) + 2),
  1161. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1162. offsetof(struct __sk_buff, cb[2])),
  1163. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1164. offsetof(struct __sk_buff, cb[2]) + 2),
  1165. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1166. offsetof(struct __sk_buff, cb[3])),
  1167. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1168. offsetof(struct __sk_buff, cb[3]) + 2),
  1169. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1170. offsetof(struct __sk_buff, cb[4])),
  1171. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1172. offsetof(struct __sk_buff, cb[4]) + 2),
  1173. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1174. offsetof(struct __sk_buff, cb[0])),
  1175. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1176. offsetof(struct __sk_buff, cb[0]) + 2),
  1177. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1178. offsetof(struct __sk_buff, cb[1])),
  1179. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1180. offsetof(struct __sk_buff, cb[1]) + 2),
  1181. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1182. offsetof(struct __sk_buff, cb[2])),
  1183. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1184. offsetof(struct __sk_buff, cb[2]) + 2),
  1185. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1186. offsetof(struct __sk_buff, cb[3])),
  1187. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1188. offsetof(struct __sk_buff, cb[3]) + 2),
  1189. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1190. offsetof(struct __sk_buff, cb[4])),
  1191. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1192. offsetof(struct __sk_buff, cb[4]) + 2),
  1193. BPF_EXIT_INSN(),
  1194. },
  1195. .result = ACCEPT,
  1196. },
  1197. {
  1198. "check cb access: half, unaligned",
  1199. .insns = {
  1200. BPF_MOV64_IMM(BPF_REG_0, 0),
  1201. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1202. offsetof(struct __sk_buff, cb[0]) + 1),
  1203. BPF_EXIT_INSN(),
  1204. },
  1205. .errstr = "misaligned access",
  1206. .result = REJECT,
  1207. },
  1208. {
  1209. "check __sk_buff->hash, offset 0, half store not permitted",
  1210. .insns = {
  1211. BPF_MOV64_IMM(BPF_REG_0, 0),
  1212. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1213. offsetof(struct __sk_buff, hash)),
  1214. BPF_EXIT_INSN(),
  1215. },
  1216. .errstr = "invalid bpf_context access",
  1217. .result = REJECT,
  1218. },
  1219. {
  1220. "check __sk_buff->tc_index, offset 2, half store not permitted",
  1221. .insns = {
  1222. BPF_MOV64_IMM(BPF_REG_0, 0),
  1223. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1224. offsetof(struct __sk_buff, tc_index) + 2),
  1225. BPF_EXIT_INSN(),
  1226. },
  1227. .errstr = "invalid bpf_context access",
  1228. .result = REJECT,
  1229. },
  1230. {
  1231. "check skb->hash half load permitted",
  1232. .insns = {
  1233. BPF_MOV64_IMM(BPF_REG_0, 0),
  1234. #ifdef __LITTLE_ENDIAN
  1235. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1236. offsetof(struct __sk_buff, hash)),
  1237. #else
  1238. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1239. offsetof(struct __sk_buff, hash) + 2),
  1240. #endif
  1241. BPF_EXIT_INSN(),
  1242. },
  1243. .result = ACCEPT,
  1244. },
  1245. {
  1246. "check skb->hash half load not permitted",
  1247. .insns = {
  1248. BPF_MOV64_IMM(BPF_REG_0, 0),
  1249. #ifdef __LITTLE_ENDIAN
  1250. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1251. offsetof(struct __sk_buff, hash) + 2),
  1252. #else
  1253. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  1254. offsetof(struct __sk_buff, hash)),
  1255. #endif
  1256. BPF_EXIT_INSN(),
  1257. },
  1258. .errstr = "invalid bpf_context access",
  1259. .result = REJECT,
  1260. },
  1261. {
  1262. "check cb access: half, wrong type",
  1263. .insns = {
  1264. BPF_MOV64_IMM(BPF_REG_0, 0),
  1265. BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
  1266. offsetof(struct __sk_buff, cb[0])),
  1267. BPF_EXIT_INSN(),
  1268. },
  1269. .errstr = "invalid bpf_context access",
  1270. .result = REJECT,
  1271. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  1272. },
  1273. {
  1274. "check cb access: word",
  1275. .insns = {
  1276. BPF_MOV64_IMM(BPF_REG_0, 0),
  1277. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1278. offsetof(struct __sk_buff, cb[0])),
  1279. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1280. offsetof(struct __sk_buff, cb[1])),
  1281. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1282. offsetof(struct __sk_buff, cb[2])),
  1283. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1284. offsetof(struct __sk_buff, cb[3])),
  1285. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1286. offsetof(struct __sk_buff, cb[4])),
  1287. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1288. offsetof(struct __sk_buff, cb[0])),
  1289. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1290. offsetof(struct __sk_buff, cb[1])),
  1291. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1292. offsetof(struct __sk_buff, cb[2])),
  1293. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1294. offsetof(struct __sk_buff, cb[3])),
  1295. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1296. offsetof(struct __sk_buff, cb[4])),
  1297. BPF_EXIT_INSN(),
  1298. },
  1299. .result = ACCEPT,
  1300. },
  1301. {
  1302. "check cb access: word, unaligned 1",
  1303. .insns = {
  1304. BPF_MOV64_IMM(BPF_REG_0, 0),
  1305. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1306. offsetof(struct __sk_buff, cb[0]) + 2),
  1307. BPF_EXIT_INSN(),
  1308. },
  1309. .errstr = "misaligned access",
  1310. .result = REJECT,
  1311. },
  1312. {
  1313. "check cb access: word, unaligned 2",
  1314. .insns = {
  1315. BPF_MOV64_IMM(BPF_REG_0, 0),
  1316. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1317. offsetof(struct __sk_buff, cb[4]) + 1),
  1318. BPF_EXIT_INSN(),
  1319. },
  1320. .errstr = "misaligned access",
  1321. .result = REJECT,
  1322. },
  1323. {
  1324. "check cb access: word, unaligned 3",
  1325. .insns = {
  1326. BPF_MOV64_IMM(BPF_REG_0, 0),
  1327. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1328. offsetof(struct __sk_buff, cb[4]) + 2),
  1329. BPF_EXIT_INSN(),
  1330. },
  1331. .errstr = "misaligned access",
  1332. .result = REJECT,
  1333. },
  1334. {
  1335. "check cb access: word, unaligned 4",
  1336. .insns = {
  1337. BPF_MOV64_IMM(BPF_REG_0, 0),
  1338. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1339. offsetof(struct __sk_buff, cb[4]) + 3),
  1340. BPF_EXIT_INSN(),
  1341. },
  1342. .errstr = "misaligned access",
  1343. .result = REJECT,
  1344. },
  1345. {
  1346. "check cb access: double",
  1347. .insns = {
  1348. BPF_MOV64_IMM(BPF_REG_0, 0),
  1349. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  1350. offsetof(struct __sk_buff, cb[0])),
  1351. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  1352. offsetof(struct __sk_buff, cb[2])),
  1353. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
  1354. offsetof(struct __sk_buff, cb[0])),
  1355. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
  1356. offsetof(struct __sk_buff, cb[2])),
  1357. BPF_EXIT_INSN(),
  1358. },
  1359. .result = ACCEPT,
  1360. },
  1361. {
  1362. "check cb access: double, unaligned 1",
  1363. .insns = {
  1364. BPF_MOV64_IMM(BPF_REG_0, 0),
  1365. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  1366. offsetof(struct __sk_buff, cb[1])),
  1367. BPF_EXIT_INSN(),
  1368. },
  1369. .errstr = "misaligned access",
  1370. .result = REJECT,
  1371. },
  1372. {
  1373. "check cb access: double, unaligned 2",
  1374. .insns = {
  1375. BPF_MOV64_IMM(BPF_REG_0, 0),
  1376. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  1377. offsetof(struct __sk_buff, cb[3])),
  1378. BPF_EXIT_INSN(),
  1379. },
  1380. .errstr = "misaligned access",
  1381. .result = REJECT,
  1382. },
  1383. {
  1384. "check cb access: double, oob 1",
  1385. .insns = {
  1386. BPF_MOV64_IMM(BPF_REG_0, 0),
  1387. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  1388. offsetof(struct __sk_buff, cb[4])),
  1389. BPF_EXIT_INSN(),
  1390. },
  1391. .errstr = "invalid bpf_context access",
  1392. .result = REJECT,
  1393. },
  1394. {
  1395. "check cb access: double, oob 2",
  1396. .insns = {
  1397. BPF_MOV64_IMM(BPF_REG_0, 0),
  1398. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
  1399. offsetof(struct __sk_buff, cb[4])),
  1400. BPF_EXIT_INSN(),
  1401. },
  1402. .errstr = "invalid bpf_context access",
  1403. .result = REJECT,
  1404. },
  1405. {
  1406. "check __sk_buff->ifindex dw store not permitted",
  1407. .insns = {
  1408. BPF_MOV64_IMM(BPF_REG_0, 0),
  1409. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  1410. offsetof(struct __sk_buff, ifindex)),
  1411. BPF_EXIT_INSN(),
  1412. },
  1413. .errstr = "invalid bpf_context access",
  1414. .result = REJECT,
  1415. },
  1416. {
  1417. "check __sk_buff->ifindex dw load not permitted",
  1418. .insns = {
  1419. BPF_MOV64_IMM(BPF_REG_0, 0),
  1420. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
  1421. offsetof(struct __sk_buff, ifindex)),
  1422. BPF_EXIT_INSN(),
  1423. },
  1424. .errstr = "invalid bpf_context access",
  1425. .result = REJECT,
  1426. },
  1427. {
  1428. "check cb access: double, wrong type",
  1429. .insns = {
  1430. BPF_MOV64_IMM(BPF_REG_0, 0),
  1431. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  1432. offsetof(struct __sk_buff, cb[0])),
  1433. BPF_EXIT_INSN(),
  1434. },
  1435. .errstr = "invalid bpf_context access",
  1436. .result = REJECT,
  1437. .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
  1438. },
  1439. {
  1440. "check out of range skb->cb access",
  1441. .insns = {
  1442. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1443. offsetof(struct __sk_buff, cb[0]) + 256),
  1444. BPF_EXIT_INSN(),
  1445. },
  1446. .errstr = "invalid bpf_context access",
  1447. .errstr_unpriv = "",
  1448. .result = REJECT,
  1449. .prog_type = BPF_PROG_TYPE_SCHED_ACT,
  1450. },
  1451. {
  1452. "write skb fields from socket prog",
  1453. .insns = {
  1454. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1455. offsetof(struct __sk_buff, cb[4])),
  1456. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  1457. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1458. offsetof(struct __sk_buff, mark)),
  1459. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1460. offsetof(struct __sk_buff, tc_index)),
  1461. BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
  1462. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  1463. offsetof(struct __sk_buff, cb[0])),
  1464. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  1465. offsetof(struct __sk_buff, cb[2])),
  1466. BPF_EXIT_INSN(),
  1467. },
  1468. .result = ACCEPT,
  1469. .errstr_unpriv = "R1 leaks addr",
  1470. .result_unpriv = REJECT,
  1471. },
  1472. {
  1473. "write skb fields from tc_cls_act prog",
  1474. .insns = {
  1475. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1476. offsetof(struct __sk_buff, cb[0])),
  1477. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1478. offsetof(struct __sk_buff, mark)),
  1479. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  1480. offsetof(struct __sk_buff, tc_index)),
  1481. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1482. offsetof(struct __sk_buff, tc_index)),
  1483. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
  1484. offsetof(struct __sk_buff, cb[3])),
  1485. BPF_EXIT_INSN(),
  1486. },
  1487. .errstr_unpriv = "",
  1488. .result_unpriv = REJECT,
  1489. .result = ACCEPT,
  1490. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1491. },
  1492. {
  1493. "PTR_TO_STACK store/load",
  1494. .insns = {
  1495. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1496. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  1497. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  1498. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  1499. BPF_EXIT_INSN(),
  1500. },
  1501. .result = ACCEPT,
  1502. },
  1503. {
  1504. "PTR_TO_STACK store/load - bad alignment on off",
  1505. .insns = {
  1506. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1507. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1508. BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
  1509. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
  1510. BPF_EXIT_INSN(),
  1511. },
  1512. .result = REJECT,
  1513. .errstr = "misaligned access off -6 size 8",
  1514. },
  1515. {
  1516. "PTR_TO_STACK store/load - bad alignment on reg",
  1517. .insns = {
  1518. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1519. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
  1520. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  1521. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  1522. BPF_EXIT_INSN(),
  1523. },
  1524. .result = REJECT,
  1525. .errstr = "misaligned access off -2 size 8",
  1526. },
  1527. {
  1528. "PTR_TO_STACK store/load - out of bounds low",
  1529. .insns = {
  1530. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1531. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
  1532. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  1533. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  1534. BPF_EXIT_INSN(),
  1535. },
  1536. .result = REJECT,
  1537. .errstr = "invalid stack off=-79992 size=8",
  1538. },
  1539. {
  1540. "PTR_TO_STACK store/load - out of bounds high",
  1541. .insns = {
  1542. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1543. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1544. BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
  1545. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
  1546. BPF_EXIT_INSN(),
  1547. },
  1548. .result = REJECT,
  1549. .errstr = "invalid stack off=0 size=8",
  1550. },
  1551. {
  1552. "unpriv: return pointer",
  1553. .insns = {
  1554. BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
  1555. BPF_EXIT_INSN(),
  1556. },
  1557. .result = ACCEPT,
  1558. .result_unpriv = REJECT,
  1559. .errstr_unpriv = "R0 leaks addr",
  1560. },
  1561. {
  1562. "unpriv: add const to pointer",
  1563. .insns = {
  1564. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  1565. BPF_MOV64_IMM(BPF_REG_0, 0),
  1566. BPF_EXIT_INSN(),
  1567. },
  1568. .result = ACCEPT,
  1569. .result_unpriv = REJECT,
  1570. .errstr_unpriv = "R1 pointer arithmetic",
  1571. },
  1572. {
  1573. "unpriv: add pointer to pointer",
  1574. .insns = {
  1575. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
  1576. BPF_MOV64_IMM(BPF_REG_0, 0),
  1577. BPF_EXIT_INSN(),
  1578. },
  1579. .result = ACCEPT,
  1580. .result_unpriv = REJECT,
  1581. .errstr_unpriv = "R1 pointer arithmetic",
  1582. },
  1583. {
  1584. "unpriv: neg pointer",
  1585. .insns = {
  1586. BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
  1587. BPF_MOV64_IMM(BPF_REG_0, 0),
  1588. BPF_EXIT_INSN(),
  1589. },
  1590. .result = ACCEPT,
  1591. .result_unpriv = REJECT,
  1592. .errstr_unpriv = "R1 pointer arithmetic",
  1593. },
  1594. {
  1595. "unpriv: cmp pointer with const",
  1596. .insns = {
  1597. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  1598. BPF_MOV64_IMM(BPF_REG_0, 0),
  1599. BPF_EXIT_INSN(),
  1600. },
  1601. .result = ACCEPT,
  1602. .result_unpriv = REJECT,
  1603. .errstr_unpriv = "R1 pointer comparison",
  1604. },
  1605. {
  1606. "unpriv: cmp pointer with pointer",
  1607. .insns = {
  1608. BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
  1609. BPF_MOV64_IMM(BPF_REG_0, 0),
  1610. BPF_EXIT_INSN(),
  1611. },
  1612. .result = ACCEPT,
  1613. .result_unpriv = REJECT,
  1614. .errstr_unpriv = "R10 pointer comparison",
  1615. },
  1616. {
  1617. "unpriv: check that printk is disallowed",
  1618. .insns = {
  1619. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1620. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  1621. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  1622. BPF_MOV64_IMM(BPF_REG_2, 8),
  1623. BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
  1624. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1625. BPF_FUNC_trace_printk),
  1626. BPF_MOV64_IMM(BPF_REG_0, 0),
  1627. BPF_EXIT_INSN(),
  1628. },
  1629. .errstr_unpriv = "unknown func bpf_trace_printk#6",
  1630. .result_unpriv = REJECT,
  1631. .result = ACCEPT,
  1632. },
  1633. {
  1634. "unpriv: pass pointer to helper function",
  1635. .insns = {
  1636. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1637. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1638. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1639. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1640. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  1641. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  1642. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1643. BPF_FUNC_map_update_elem),
  1644. BPF_MOV64_IMM(BPF_REG_0, 0),
  1645. BPF_EXIT_INSN(),
  1646. },
  1647. .fixup_map1 = { 3 },
  1648. .errstr_unpriv = "R4 leaks addr",
  1649. .result_unpriv = REJECT,
  1650. .result = ACCEPT,
  1651. },
  1652. {
  1653. "unpriv: indirectly pass pointer on stack to helper function",
  1654. .insns = {
  1655. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1656. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1657. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1658. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1659. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1660. BPF_FUNC_map_lookup_elem),
  1661. BPF_MOV64_IMM(BPF_REG_0, 0),
  1662. BPF_EXIT_INSN(),
  1663. },
  1664. .fixup_map1 = { 3 },
  1665. .errstr = "invalid indirect read from stack off -8+0 size 8",
  1666. .result = REJECT,
  1667. },
  1668. {
  1669. "unpriv: mangle pointer on stack 1",
  1670. .insns = {
  1671. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1672. BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
  1673. BPF_MOV64_IMM(BPF_REG_0, 0),
  1674. BPF_EXIT_INSN(),
  1675. },
  1676. .errstr_unpriv = "attempt to corrupt spilled",
  1677. .result_unpriv = REJECT,
  1678. .result = ACCEPT,
  1679. },
  1680. {
  1681. "unpriv: mangle pointer on stack 2",
  1682. .insns = {
  1683. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1684. BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
  1685. BPF_MOV64_IMM(BPF_REG_0, 0),
  1686. BPF_EXIT_INSN(),
  1687. },
  1688. .errstr_unpriv = "attempt to corrupt spilled",
  1689. .result_unpriv = REJECT,
  1690. .result = ACCEPT,
  1691. },
  1692. {
  1693. "unpriv: read pointer from stack in small chunks",
  1694. .insns = {
  1695. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
  1696. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
  1697. BPF_MOV64_IMM(BPF_REG_0, 0),
  1698. BPF_EXIT_INSN(),
  1699. },
  1700. .errstr = "invalid size",
  1701. .result = REJECT,
  1702. },
  1703. {
  1704. "unpriv: write pointer into ctx",
  1705. .insns = {
  1706. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
  1707. BPF_MOV64_IMM(BPF_REG_0, 0),
  1708. BPF_EXIT_INSN(),
  1709. },
  1710. .errstr_unpriv = "R1 leaks addr",
  1711. .result_unpriv = REJECT,
  1712. .errstr = "invalid bpf_context access",
  1713. .result = REJECT,
  1714. },
  1715. {
  1716. "unpriv: spill/fill of ctx",
  1717. .insns = {
  1718. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1719. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1720. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1721. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1722. BPF_MOV64_IMM(BPF_REG_0, 0),
  1723. BPF_EXIT_INSN(),
  1724. },
  1725. .result = ACCEPT,
  1726. },
  1727. {
  1728. "unpriv: spill/fill of ctx 2",
  1729. .insns = {
  1730. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1731. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1732. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1733. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1734. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1735. BPF_FUNC_get_hash_recalc),
  1736. BPF_EXIT_INSN(),
  1737. },
  1738. .result = ACCEPT,
  1739. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1740. },
  1741. {
  1742. "unpriv: spill/fill of ctx 3",
  1743. .insns = {
  1744. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1745. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1746. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1747. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
  1748. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1749. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1750. BPF_FUNC_get_hash_recalc),
  1751. BPF_EXIT_INSN(),
  1752. },
  1753. .result = REJECT,
  1754. .errstr = "R1 type=fp expected=ctx",
  1755. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1756. },
  1757. {
  1758. "unpriv: spill/fill of ctx 4",
  1759. .insns = {
  1760. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1761. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1762. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1763. BPF_MOV64_IMM(BPF_REG_0, 1),
  1764. BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
  1765. BPF_REG_0, -8, 0),
  1766. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1767. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1768. BPF_FUNC_get_hash_recalc),
  1769. BPF_EXIT_INSN(),
  1770. },
  1771. .result = REJECT,
  1772. .errstr = "R1 type=inv expected=ctx",
  1773. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1774. },
  1775. {
  1776. "unpriv: spill/fill of different pointers stx",
  1777. .insns = {
  1778. BPF_MOV64_IMM(BPF_REG_3, 42),
  1779. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1780. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1781. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  1782. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1783. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
  1784. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
  1785. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  1786. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1787. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1788. BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
  1789. offsetof(struct __sk_buff, mark)),
  1790. BPF_MOV64_IMM(BPF_REG_0, 0),
  1791. BPF_EXIT_INSN(),
  1792. },
  1793. .result = REJECT,
  1794. .errstr = "same insn cannot be used with different pointers",
  1795. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1796. },
  1797. {
  1798. "unpriv: spill/fill of different pointers ldx",
  1799. .insns = {
  1800. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1801. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1802. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
  1803. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1804. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
  1805. -(__s32)offsetof(struct bpf_perf_event_data,
  1806. sample_period) - 8),
  1807. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
  1808. BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
  1809. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  1810. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
  1811. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
  1812. offsetof(struct bpf_perf_event_data,
  1813. sample_period)),
  1814. BPF_MOV64_IMM(BPF_REG_0, 0),
  1815. BPF_EXIT_INSN(),
  1816. },
  1817. .result = REJECT,
  1818. .errstr = "same insn cannot be used with different pointers",
  1819. .prog_type = BPF_PROG_TYPE_PERF_EVENT,
  1820. },
  1821. {
  1822. "unpriv: write pointer into map elem value",
  1823. .insns = {
  1824. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  1825. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1826. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1827. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1828. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1829. BPF_FUNC_map_lookup_elem),
  1830. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  1831. BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
  1832. BPF_EXIT_INSN(),
  1833. },
  1834. .fixup_map1 = { 3 },
  1835. .errstr_unpriv = "R0 leaks addr",
  1836. .result_unpriv = REJECT,
  1837. .result = ACCEPT,
  1838. },
  1839. {
  1840. "unpriv: partial copy of pointer",
  1841. .insns = {
  1842. BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
  1843. BPF_MOV64_IMM(BPF_REG_0, 0),
  1844. BPF_EXIT_INSN(),
  1845. },
  1846. .errstr_unpriv = "R10 partial copy",
  1847. .result_unpriv = REJECT,
  1848. .result = ACCEPT,
  1849. },
  1850. {
  1851. "unpriv: pass pointer to tail_call",
  1852. .insns = {
  1853. BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
  1854. BPF_LD_MAP_FD(BPF_REG_2, 0),
  1855. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1856. BPF_FUNC_tail_call),
  1857. BPF_MOV64_IMM(BPF_REG_0, 0),
  1858. BPF_EXIT_INSN(),
  1859. },
  1860. .fixup_prog = { 1 },
  1861. .errstr_unpriv = "R3 leaks addr into helper",
  1862. .result_unpriv = REJECT,
  1863. .result = ACCEPT,
  1864. },
  1865. {
  1866. "unpriv: cmp map pointer with zero",
  1867. .insns = {
  1868. BPF_MOV64_IMM(BPF_REG_1, 0),
  1869. BPF_LD_MAP_FD(BPF_REG_1, 0),
  1870. BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
  1871. BPF_MOV64_IMM(BPF_REG_0, 0),
  1872. BPF_EXIT_INSN(),
  1873. },
  1874. .fixup_map1 = { 1 },
  1875. .errstr_unpriv = "R1 pointer comparison",
  1876. .result_unpriv = REJECT,
  1877. .result = ACCEPT,
  1878. },
  1879. {
  1880. "unpriv: write into frame pointer",
  1881. .insns = {
  1882. BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
  1883. BPF_MOV64_IMM(BPF_REG_0, 0),
  1884. BPF_EXIT_INSN(),
  1885. },
  1886. .errstr = "frame pointer is read only",
  1887. .result = REJECT,
  1888. },
  1889. {
  1890. "unpriv: spill/fill frame pointer",
  1891. .insns = {
  1892. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1893. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1894. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
  1895. BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
  1896. BPF_MOV64_IMM(BPF_REG_0, 0),
  1897. BPF_EXIT_INSN(),
  1898. },
  1899. .errstr = "frame pointer is read only",
  1900. .result = REJECT,
  1901. },
  1902. {
  1903. "unpriv: cmp of frame pointer",
  1904. .insns = {
  1905. BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
  1906. BPF_MOV64_IMM(BPF_REG_0, 0),
  1907. BPF_EXIT_INSN(),
  1908. },
  1909. .errstr_unpriv = "R10 pointer comparison",
  1910. .result_unpriv = REJECT,
  1911. .result = ACCEPT,
  1912. },
  1913. {
  1914. "unpriv: adding of fp",
  1915. .insns = {
  1916. BPF_MOV64_IMM(BPF_REG_0, 0),
  1917. BPF_MOV64_IMM(BPF_REG_1, 0),
  1918. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
  1919. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
  1920. BPF_EXIT_INSN(),
  1921. },
  1922. .errstr_unpriv = "pointer arithmetic prohibited",
  1923. .result_unpriv = REJECT,
  1924. .errstr = "R1 invalid mem access",
  1925. .result = REJECT,
  1926. },
  1927. {
  1928. "unpriv: cmp of stack pointer",
  1929. .insns = {
  1930. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  1931. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  1932. BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
  1933. BPF_MOV64_IMM(BPF_REG_0, 0),
  1934. BPF_EXIT_INSN(),
  1935. },
  1936. .errstr_unpriv = "R2 pointer comparison",
  1937. .result_unpriv = REJECT,
  1938. .result = ACCEPT,
  1939. },
  1940. {
  1941. "stack pointer arithmetic",
  1942. .insns = {
  1943. BPF_MOV64_IMM(BPF_REG_1, 4),
  1944. BPF_JMP_IMM(BPF_JA, 0, 0, 0),
  1945. BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
  1946. BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
  1947. BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
  1948. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  1949. BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
  1950. BPF_ST_MEM(0, BPF_REG_2, 4, 0),
  1951. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  1952. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
  1953. BPF_ST_MEM(0, BPF_REG_2, 4, 0),
  1954. BPF_MOV64_IMM(BPF_REG_0, 0),
  1955. BPF_EXIT_INSN(),
  1956. },
  1957. .result = ACCEPT,
  1958. },
  1959. {
  1960. "raw_stack: no skb_load_bytes",
  1961. .insns = {
  1962. BPF_MOV64_IMM(BPF_REG_2, 4),
  1963. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1964. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1965. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1966. BPF_MOV64_IMM(BPF_REG_4, 8),
  1967. /* Call to skb_load_bytes() omitted. */
  1968. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1969. BPF_EXIT_INSN(),
  1970. },
  1971. .result = REJECT,
  1972. .errstr = "invalid read from stack off -8+0 size 8",
  1973. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1974. },
  1975. {
  1976. "raw_stack: skb_load_bytes, negative len",
  1977. .insns = {
  1978. BPF_MOV64_IMM(BPF_REG_2, 4),
  1979. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1980. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1981. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1982. BPF_MOV64_IMM(BPF_REG_4, -8),
  1983. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  1984. BPF_FUNC_skb_load_bytes),
  1985. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  1986. BPF_EXIT_INSN(),
  1987. },
  1988. .result = REJECT,
  1989. .errstr = "invalid stack type R3",
  1990. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  1991. },
  1992. {
  1993. "raw_stack: skb_load_bytes, negative len 2",
  1994. .insns = {
  1995. BPF_MOV64_IMM(BPF_REG_2, 4),
  1996. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  1997. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  1998. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  1999. BPF_MOV64_IMM(BPF_REG_4, ~0),
  2000. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2001. BPF_FUNC_skb_load_bytes),
  2002. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2003. BPF_EXIT_INSN(),
  2004. },
  2005. .result = REJECT,
  2006. .errstr = "invalid stack type R3",
  2007. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2008. },
  2009. {
  2010. "raw_stack: skb_load_bytes, zero len",
  2011. .insns = {
  2012. BPF_MOV64_IMM(BPF_REG_2, 4),
  2013. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2014. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  2015. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2016. BPF_MOV64_IMM(BPF_REG_4, 0),
  2017. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2018. BPF_FUNC_skb_load_bytes),
  2019. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2020. BPF_EXIT_INSN(),
  2021. },
  2022. .result = REJECT,
  2023. .errstr = "invalid stack type R3",
  2024. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2025. },
  2026. {
  2027. "raw_stack: skb_load_bytes, no init",
  2028. .insns = {
  2029. BPF_MOV64_IMM(BPF_REG_2, 4),
  2030. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2031. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  2032. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2033. BPF_MOV64_IMM(BPF_REG_4, 8),
  2034. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2035. BPF_FUNC_skb_load_bytes),
  2036. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2037. BPF_EXIT_INSN(),
  2038. },
  2039. .result = ACCEPT,
  2040. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2041. },
  2042. {
  2043. "raw_stack: skb_load_bytes, init",
  2044. .insns = {
  2045. BPF_MOV64_IMM(BPF_REG_2, 4),
  2046. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2047. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  2048. BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
  2049. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2050. BPF_MOV64_IMM(BPF_REG_4, 8),
  2051. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2052. BPF_FUNC_skb_load_bytes),
  2053. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2054. BPF_EXIT_INSN(),
  2055. },
  2056. .result = ACCEPT,
  2057. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2058. },
  2059. {
  2060. "raw_stack: skb_load_bytes, spilled regs around bounds",
  2061. .insns = {
  2062. BPF_MOV64_IMM(BPF_REG_2, 4),
  2063. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2064. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  2065. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
  2066. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
  2067. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2068. BPF_MOV64_IMM(BPF_REG_4, 8),
  2069. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2070. BPF_FUNC_skb_load_bytes),
  2071. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
  2072. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
  2073. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  2074. offsetof(struct __sk_buff, mark)),
  2075. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  2076. offsetof(struct __sk_buff, priority)),
  2077. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  2078. BPF_EXIT_INSN(),
  2079. },
  2080. .result = ACCEPT,
  2081. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2082. },
  2083. {
  2084. "raw_stack: skb_load_bytes, spilled regs corruption",
  2085. .insns = {
  2086. BPF_MOV64_IMM(BPF_REG_2, 4),
  2087. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2088. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
  2089. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  2090. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2091. BPF_MOV64_IMM(BPF_REG_4, 8),
  2092. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2093. BPF_FUNC_skb_load_bytes),
  2094. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2095. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  2096. offsetof(struct __sk_buff, mark)),
  2097. BPF_EXIT_INSN(),
  2098. },
  2099. .result = REJECT,
  2100. .errstr = "R0 invalid mem access 'inv'",
  2101. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2102. },
  2103. {
  2104. "raw_stack: skb_load_bytes, spilled regs corruption 2",
  2105. .insns = {
  2106. BPF_MOV64_IMM(BPF_REG_2, 4),
  2107. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2108. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  2109. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
  2110. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  2111. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
  2112. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2113. BPF_MOV64_IMM(BPF_REG_4, 8),
  2114. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2115. BPF_FUNC_skb_load_bytes),
  2116. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
  2117. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
  2118. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
  2119. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  2120. offsetof(struct __sk_buff, mark)),
  2121. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  2122. offsetof(struct __sk_buff, priority)),
  2123. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  2124. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
  2125. offsetof(struct __sk_buff, pkt_type)),
  2126. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
  2127. BPF_EXIT_INSN(),
  2128. },
  2129. .result = REJECT,
  2130. .errstr = "R3 invalid mem access 'inv'",
  2131. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2132. },
  2133. {
  2134. "raw_stack: skb_load_bytes, spilled regs + data",
  2135. .insns = {
  2136. BPF_MOV64_IMM(BPF_REG_2, 4),
  2137. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2138. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
  2139. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
  2140. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
  2141. BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
  2142. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2143. BPF_MOV64_IMM(BPF_REG_4, 8),
  2144. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2145. BPF_FUNC_skb_load_bytes),
  2146. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
  2147. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
  2148. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
  2149. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  2150. offsetof(struct __sk_buff, mark)),
  2151. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
  2152. offsetof(struct __sk_buff, priority)),
  2153. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  2154. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
  2155. BPF_EXIT_INSN(),
  2156. },
  2157. .result = ACCEPT,
  2158. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2159. },
  2160. {
  2161. "raw_stack: skb_load_bytes, invalid access 1",
  2162. .insns = {
  2163. BPF_MOV64_IMM(BPF_REG_2, 4),
  2164. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2165. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
  2166. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2167. BPF_MOV64_IMM(BPF_REG_4, 8),
  2168. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2169. BPF_FUNC_skb_load_bytes),
  2170. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2171. BPF_EXIT_INSN(),
  2172. },
  2173. .result = REJECT,
  2174. .errstr = "invalid stack type R3 off=-513 access_size=8",
  2175. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2176. },
  2177. {
  2178. "raw_stack: skb_load_bytes, invalid access 2",
  2179. .insns = {
  2180. BPF_MOV64_IMM(BPF_REG_2, 4),
  2181. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2182. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
  2183. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2184. BPF_MOV64_IMM(BPF_REG_4, 8),
  2185. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2186. BPF_FUNC_skb_load_bytes),
  2187. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2188. BPF_EXIT_INSN(),
  2189. },
  2190. .result = REJECT,
  2191. .errstr = "invalid stack type R3 off=-1 access_size=8",
  2192. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2193. },
  2194. {
  2195. "raw_stack: skb_load_bytes, invalid access 3",
  2196. .insns = {
  2197. BPF_MOV64_IMM(BPF_REG_2, 4),
  2198. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2199. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
  2200. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2201. BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
  2202. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2203. BPF_FUNC_skb_load_bytes),
  2204. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2205. BPF_EXIT_INSN(),
  2206. },
  2207. .result = REJECT,
  2208. .errstr = "invalid stack type R3 off=-1 access_size=-1",
  2209. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2210. },
  2211. {
  2212. "raw_stack: skb_load_bytes, invalid access 4",
  2213. .insns = {
  2214. BPF_MOV64_IMM(BPF_REG_2, 4),
  2215. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2216. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
  2217. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2218. BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
  2219. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2220. BPF_FUNC_skb_load_bytes),
  2221. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2222. BPF_EXIT_INSN(),
  2223. },
  2224. .result = REJECT,
  2225. .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
  2226. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2227. },
  2228. {
  2229. "raw_stack: skb_load_bytes, invalid access 5",
  2230. .insns = {
  2231. BPF_MOV64_IMM(BPF_REG_2, 4),
  2232. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2233. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  2234. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2235. BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
  2236. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2237. BPF_FUNC_skb_load_bytes),
  2238. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2239. BPF_EXIT_INSN(),
  2240. },
  2241. .result = REJECT,
  2242. .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
  2243. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2244. },
  2245. {
  2246. "raw_stack: skb_load_bytes, invalid access 6",
  2247. .insns = {
  2248. BPF_MOV64_IMM(BPF_REG_2, 4),
  2249. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2250. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  2251. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2252. BPF_MOV64_IMM(BPF_REG_4, 0),
  2253. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2254. BPF_FUNC_skb_load_bytes),
  2255. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2256. BPF_EXIT_INSN(),
  2257. },
  2258. .result = REJECT,
  2259. .errstr = "invalid stack type R3 off=-512 access_size=0",
  2260. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2261. },
  2262. {
  2263. "raw_stack: skb_load_bytes, large access",
  2264. .insns = {
  2265. BPF_MOV64_IMM(BPF_REG_2, 4),
  2266. BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
  2267. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
  2268. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  2269. BPF_MOV64_IMM(BPF_REG_4, 512),
  2270. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2271. BPF_FUNC_skb_load_bytes),
  2272. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  2273. BPF_EXIT_INSN(),
  2274. },
  2275. .result = ACCEPT,
  2276. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2277. },
  2278. {
  2279. "direct packet access: test1",
  2280. .insns = {
  2281. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2282. offsetof(struct __sk_buff, data)),
  2283. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2284. offsetof(struct __sk_buff, data_end)),
  2285. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2286. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2287. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2288. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2289. BPF_MOV64_IMM(BPF_REG_0, 0),
  2290. BPF_EXIT_INSN(),
  2291. },
  2292. .result = ACCEPT,
  2293. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2294. },
  2295. {
  2296. "direct packet access: test2",
  2297. .insns = {
  2298. BPF_MOV64_IMM(BPF_REG_0, 1),
  2299. BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
  2300. offsetof(struct __sk_buff, data_end)),
  2301. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2302. offsetof(struct __sk_buff, data)),
  2303. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  2304. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
  2305. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
  2306. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
  2307. BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
  2308. BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
  2309. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2310. offsetof(struct __sk_buff, data)),
  2311. BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
  2312. BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
  2313. BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
  2314. BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
  2315. BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
  2316. BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
  2317. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
  2318. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
  2319. offsetof(struct __sk_buff, data_end)),
  2320. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  2321. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
  2322. BPF_MOV64_IMM(BPF_REG_0, 0),
  2323. BPF_EXIT_INSN(),
  2324. },
  2325. .result = ACCEPT,
  2326. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2327. },
  2328. {
  2329. "direct packet access: test3",
  2330. .insns = {
  2331. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2332. offsetof(struct __sk_buff, data)),
  2333. BPF_MOV64_IMM(BPF_REG_0, 0),
  2334. BPF_EXIT_INSN(),
  2335. },
  2336. .errstr = "invalid bpf_context access off=76",
  2337. .result = REJECT,
  2338. .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
  2339. },
  2340. {
  2341. "direct packet access: test4 (write)",
  2342. .insns = {
  2343. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2344. offsetof(struct __sk_buff, data)),
  2345. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2346. offsetof(struct __sk_buff, data_end)),
  2347. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2348. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2349. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2350. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  2351. BPF_MOV64_IMM(BPF_REG_0, 0),
  2352. BPF_EXIT_INSN(),
  2353. },
  2354. .result = ACCEPT,
  2355. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2356. },
  2357. {
  2358. "direct packet access: test5 (pkt_end >= reg, good access)",
  2359. .insns = {
  2360. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2361. offsetof(struct __sk_buff, data)),
  2362. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2363. offsetof(struct __sk_buff, data_end)),
  2364. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2365. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2366. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
  2367. BPF_MOV64_IMM(BPF_REG_0, 1),
  2368. BPF_EXIT_INSN(),
  2369. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2370. BPF_MOV64_IMM(BPF_REG_0, 0),
  2371. BPF_EXIT_INSN(),
  2372. },
  2373. .result = ACCEPT,
  2374. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2375. },
  2376. {
  2377. "direct packet access: test6 (pkt_end >= reg, bad access)",
  2378. .insns = {
  2379. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2380. offsetof(struct __sk_buff, data)),
  2381. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2382. offsetof(struct __sk_buff, data_end)),
  2383. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2384. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2385. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
  2386. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2387. BPF_MOV64_IMM(BPF_REG_0, 1),
  2388. BPF_EXIT_INSN(),
  2389. BPF_MOV64_IMM(BPF_REG_0, 0),
  2390. BPF_EXIT_INSN(),
  2391. },
  2392. .errstr = "invalid access to packet",
  2393. .result = REJECT,
  2394. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2395. },
  2396. {
  2397. "direct packet access: test7 (pkt_end >= reg, both accesses)",
  2398. .insns = {
  2399. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2400. offsetof(struct __sk_buff, data)),
  2401. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2402. offsetof(struct __sk_buff, data_end)),
  2403. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2404. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2405. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
  2406. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2407. BPF_MOV64_IMM(BPF_REG_0, 1),
  2408. BPF_EXIT_INSN(),
  2409. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2410. BPF_MOV64_IMM(BPF_REG_0, 0),
  2411. BPF_EXIT_INSN(),
  2412. },
  2413. .errstr = "invalid access to packet",
  2414. .result = REJECT,
  2415. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2416. },
  2417. {
  2418. "direct packet access: test8 (double test, variant 1)",
  2419. .insns = {
  2420. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2421. offsetof(struct __sk_buff, data)),
  2422. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2423. offsetof(struct __sk_buff, data_end)),
  2424. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2425. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2426. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
  2427. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2428. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2429. BPF_MOV64_IMM(BPF_REG_0, 1),
  2430. BPF_EXIT_INSN(),
  2431. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2432. BPF_MOV64_IMM(BPF_REG_0, 0),
  2433. BPF_EXIT_INSN(),
  2434. },
  2435. .result = ACCEPT,
  2436. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2437. },
  2438. {
  2439. "direct packet access: test9 (double test, variant 2)",
  2440. .insns = {
  2441. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2442. offsetof(struct __sk_buff, data)),
  2443. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2444. offsetof(struct __sk_buff, data_end)),
  2445. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2446. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2447. BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
  2448. BPF_MOV64_IMM(BPF_REG_0, 1),
  2449. BPF_EXIT_INSN(),
  2450. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2451. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2452. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  2453. BPF_MOV64_IMM(BPF_REG_0, 0),
  2454. BPF_EXIT_INSN(),
  2455. },
  2456. .result = ACCEPT,
  2457. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2458. },
  2459. {
  2460. "direct packet access: test10 (write invalid)",
  2461. .insns = {
  2462. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2463. offsetof(struct __sk_buff, data)),
  2464. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2465. offsetof(struct __sk_buff, data_end)),
  2466. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2467. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2468. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
  2469. BPF_MOV64_IMM(BPF_REG_0, 0),
  2470. BPF_EXIT_INSN(),
  2471. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  2472. BPF_MOV64_IMM(BPF_REG_0, 0),
  2473. BPF_EXIT_INSN(),
  2474. },
  2475. .errstr = "invalid access to packet",
  2476. .result = REJECT,
  2477. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2478. },
  2479. {
  2480. "direct packet access: test11 (shift, good access)",
  2481. .insns = {
  2482. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2483. offsetof(struct __sk_buff, data)),
  2484. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2485. offsetof(struct __sk_buff, data_end)),
  2486. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2487. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
  2488. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
  2489. BPF_MOV64_IMM(BPF_REG_3, 144),
  2490. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  2491. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
  2492. BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
  2493. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  2494. BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
  2495. BPF_MOV64_IMM(BPF_REG_0, 1),
  2496. BPF_EXIT_INSN(),
  2497. BPF_MOV64_IMM(BPF_REG_0, 0),
  2498. BPF_EXIT_INSN(),
  2499. },
  2500. .result = ACCEPT,
  2501. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2502. },
  2503. {
  2504. "direct packet access: test12 (and, good access)",
  2505. .insns = {
  2506. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2507. offsetof(struct __sk_buff, data)),
  2508. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2509. offsetof(struct __sk_buff, data_end)),
  2510. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2511. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
  2512. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
  2513. BPF_MOV64_IMM(BPF_REG_3, 144),
  2514. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  2515. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
  2516. BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
  2517. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  2518. BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
  2519. BPF_MOV64_IMM(BPF_REG_0, 1),
  2520. BPF_EXIT_INSN(),
  2521. BPF_MOV64_IMM(BPF_REG_0, 0),
  2522. BPF_EXIT_INSN(),
  2523. },
  2524. .result = ACCEPT,
  2525. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2526. },
  2527. {
  2528. "direct packet access: test13 (branches, good access)",
  2529. .insns = {
  2530. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2531. offsetof(struct __sk_buff, data)),
  2532. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2533. offsetof(struct __sk_buff, data_end)),
  2534. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2535. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
  2536. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
  2537. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2538. offsetof(struct __sk_buff, mark)),
  2539. BPF_MOV64_IMM(BPF_REG_4, 1),
  2540. BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
  2541. BPF_MOV64_IMM(BPF_REG_3, 14),
  2542. BPF_JMP_IMM(BPF_JA, 0, 0, 1),
  2543. BPF_MOV64_IMM(BPF_REG_3, 24),
  2544. BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
  2545. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
  2546. BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
  2547. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  2548. BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
  2549. BPF_MOV64_IMM(BPF_REG_0, 1),
  2550. BPF_EXIT_INSN(),
  2551. BPF_MOV64_IMM(BPF_REG_0, 0),
  2552. BPF_EXIT_INSN(),
  2553. },
  2554. .result = ACCEPT,
  2555. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2556. },
  2557. {
  2558. "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
  2559. .insns = {
  2560. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2561. offsetof(struct __sk_buff, data)),
  2562. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2563. offsetof(struct __sk_buff, data_end)),
  2564. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2565. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
  2566. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
  2567. BPF_MOV64_IMM(BPF_REG_5, 12),
  2568. BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
  2569. BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
  2570. BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
  2571. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
  2572. BPF_MOV64_IMM(BPF_REG_0, 1),
  2573. BPF_EXIT_INSN(),
  2574. BPF_MOV64_IMM(BPF_REG_0, 0),
  2575. BPF_EXIT_INSN(),
  2576. },
  2577. .result = ACCEPT,
  2578. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2579. },
  2580. {
  2581. "direct packet access: test15 (spill with xadd)",
  2582. .insns = {
  2583. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2584. offsetof(struct __sk_buff, data)),
  2585. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2586. offsetof(struct __sk_buff, data_end)),
  2587. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2588. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2589. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
  2590. BPF_MOV64_IMM(BPF_REG_5, 4096),
  2591. BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
  2592. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
  2593. BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
  2594. BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
  2595. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
  2596. BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
  2597. BPF_MOV64_IMM(BPF_REG_0, 0),
  2598. BPF_EXIT_INSN(),
  2599. },
  2600. .errstr = "R2 invalid mem access 'inv'",
  2601. .result = REJECT,
  2602. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2603. },
  2604. {
  2605. "direct packet access: test16 (arith on data_end)",
  2606. .insns = {
  2607. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2608. offsetof(struct __sk_buff, data)),
  2609. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2610. offsetof(struct __sk_buff, data_end)),
  2611. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2612. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2613. BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
  2614. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2615. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  2616. BPF_MOV64_IMM(BPF_REG_0, 0),
  2617. BPF_EXIT_INSN(),
  2618. },
  2619. .errstr = "invalid access to packet",
  2620. .result = REJECT,
  2621. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2622. },
  2623. {
  2624. "direct packet access: test17 (pruning, alignment)",
  2625. .insns = {
  2626. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2627. offsetof(struct __sk_buff, data)),
  2628. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2629. offsetof(struct __sk_buff, data_end)),
  2630. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  2631. offsetof(struct __sk_buff, mark)),
  2632. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2633. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
  2634. BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
  2635. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2636. BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
  2637. BPF_MOV64_IMM(BPF_REG_0, 0),
  2638. BPF_EXIT_INSN(),
  2639. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
  2640. BPF_JMP_A(-6),
  2641. },
  2642. .errstr = "misaligned packet access off 2+15+-4 size 4",
  2643. .result = REJECT,
  2644. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2645. .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
  2646. },
  2647. {
  2648. "direct packet access: test18 (imm += pkt_ptr, 1)",
  2649. .insns = {
  2650. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2651. offsetof(struct __sk_buff, data)),
  2652. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2653. offsetof(struct __sk_buff, data_end)),
  2654. BPF_MOV64_IMM(BPF_REG_0, 8),
  2655. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  2656. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2657. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  2658. BPF_MOV64_IMM(BPF_REG_0, 0),
  2659. BPF_EXIT_INSN(),
  2660. },
  2661. .result = ACCEPT,
  2662. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2663. },
  2664. {
  2665. "direct packet access: test19 (imm += pkt_ptr, 2)",
  2666. .insns = {
  2667. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2668. offsetof(struct __sk_buff, data)),
  2669. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2670. offsetof(struct __sk_buff, data_end)),
  2671. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2672. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2673. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
  2674. BPF_MOV64_IMM(BPF_REG_4, 4),
  2675. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
  2676. BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
  2677. BPF_MOV64_IMM(BPF_REG_0, 0),
  2678. BPF_EXIT_INSN(),
  2679. },
  2680. .result = ACCEPT,
  2681. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2682. },
  2683. {
  2684. "direct packet access: test20 (x += pkt_ptr, 1)",
  2685. .insns = {
  2686. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2687. offsetof(struct __sk_buff, data)),
  2688. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2689. offsetof(struct __sk_buff, data_end)),
  2690. BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
  2691. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  2692. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  2693. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
  2694. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  2695. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
  2696. BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
  2697. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xffff - 1),
  2698. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
  2699. BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
  2700. BPF_MOV64_IMM(BPF_REG_0, 0),
  2701. BPF_EXIT_INSN(),
  2702. },
  2703. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2704. .result = ACCEPT,
  2705. },
  2706. {
  2707. "direct packet access: test21 (x += pkt_ptr, 2)",
  2708. .insns = {
  2709. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2710. offsetof(struct __sk_buff, data)),
  2711. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2712. offsetof(struct __sk_buff, data_end)),
  2713. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2714. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2715. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
  2716. BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
  2717. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
  2718. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  2719. BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0xffff),
  2720. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
  2721. BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
  2722. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xffff - 1),
  2723. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
  2724. BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
  2725. BPF_MOV64_IMM(BPF_REG_0, 0),
  2726. BPF_EXIT_INSN(),
  2727. },
  2728. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2729. .result = ACCEPT,
  2730. },
  2731. {
  2732. "direct packet access: test22 (x += pkt_ptr, 3)",
  2733. .insns = {
  2734. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2735. offsetof(struct __sk_buff, data)),
  2736. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2737. offsetof(struct __sk_buff, data_end)),
  2738. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  2739. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  2740. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
  2741. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
  2742. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
  2743. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
  2744. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
  2745. BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
  2746. BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
  2747. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
  2748. BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 48),
  2749. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
  2750. BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
  2751. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
  2752. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
  2753. BPF_MOV64_IMM(BPF_REG_2, 1),
  2754. BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
  2755. BPF_MOV64_IMM(BPF_REG_0, 0),
  2756. BPF_EXIT_INSN(),
  2757. },
  2758. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2759. .result = ACCEPT,
  2760. },
  2761. {
  2762. "direct packet access: test23 (x += pkt_ptr, 4)",
  2763. .insns = {
  2764. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2765. offsetof(struct __sk_buff, data)),
  2766. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2767. offsetof(struct __sk_buff, data_end)),
  2768. BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
  2769. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  2770. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  2771. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
  2772. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  2773. BPF_MOV64_IMM(BPF_REG_0, 31),
  2774. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
  2775. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  2776. BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
  2777. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
  2778. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2779. BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
  2780. BPF_MOV64_IMM(BPF_REG_0, 0),
  2781. BPF_EXIT_INSN(),
  2782. },
  2783. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2784. .result = REJECT,
  2785. .errstr = "cannot add integer value with 47 upper zero bits to ptr_to_packet",
  2786. },
  2787. {
  2788. "direct packet access: test24 (x += pkt_ptr, 5)",
  2789. .insns = {
  2790. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2791. offsetof(struct __sk_buff, data)),
  2792. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2793. offsetof(struct __sk_buff, data_end)),
  2794. BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
  2795. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  2796. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
  2797. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
  2798. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  2799. BPF_MOV64_IMM(BPF_REG_0, 64),
  2800. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
  2801. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
  2802. BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
  2803. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
  2804. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  2805. BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
  2806. BPF_MOV64_IMM(BPF_REG_0, 0),
  2807. BPF_EXIT_INSN(),
  2808. },
  2809. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2810. .result = ACCEPT,
  2811. },
  2812. {
  2813. "helper access to packet: test1, valid packet_ptr range",
  2814. .insns = {
  2815. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2816. offsetof(struct xdp_md, data)),
  2817. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2818. offsetof(struct xdp_md, data_end)),
  2819. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  2820. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  2821. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
  2822. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2823. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  2824. BPF_MOV64_IMM(BPF_REG_4, 0),
  2825. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2826. BPF_FUNC_map_update_elem),
  2827. BPF_MOV64_IMM(BPF_REG_0, 0),
  2828. BPF_EXIT_INSN(),
  2829. },
  2830. .fixup_map1 = { 5 },
  2831. .result_unpriv = ACCEPT,
  2832. .result = ACCEPT,
  2833. .prog_type = BPF_PROG_TYPE_XDP,
  2834. },
  2835. {
  2836. "helper access to packet: test2, unchecked packet_ptr",
  2837. .insns = {
  2838. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2839. offsetof(struct xdp_md, data)),
  2840. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2841. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2842. BPF_FUNC_map_lookup_elem),
  2843. BPF_MOV64_IMM(BPF_REG_0, 0),
  2844. BPF_EXIT_INSN(),
  2845. },
  2846. .fixup_map1 = { 1 },
  2847. .result = REJECT,
  2848. .errstr = "invalid access to packet",
  2849. .prog_type = BPF_PROG_TYPE_XDP,
  2850. },
  2851. {
  2852. "helper access to packet: test3, variable add",
  2853. .insns = {
  2854. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2855. offsetof(struct xdp_md, data)),
  2856. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2857. offsetof(struct xdp_md, data_end)),
  2858. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2859. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
  2860. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
  2861. BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
  2862. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2863. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
  2864. BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
  2865. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
  2866. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
  2867. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2868. BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
  2869. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2870. BPF_FUNC_map_lookup_elem),
  2871. BPF_MOV64_IMM(BPF_REG_0, 0),
  2872. BPF_EXIT_INSN(),
  2873. },
  2874. .fixup_map1 = { 11 },
  2875. .result = ACCEPT,
  2876. .prog_type = BPF_PROG_TYPE_XDP,
  2877. },
  2878. {
  2879. "helper access to packet: test4, packet_ptr with bad range",
  2880. .insns = {
  2881. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2882. offsetof(struct xdp_md, data)),
  2883. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2884. offsetof(struct xdp_md, data_end)),
  2885. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2886. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  2887. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
  2888. BPF_MOV64_IMM(BPF_REG_0, 0),
  2889. BPF_EXIT_INSN(),
  2890. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2891. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2892. BPF_FUNC_map_lookup_elem),
  2893. BPF_MOV64_IMM(BPF_REG_0, 0),
  2894. BPF_EXIT_INSN(),
  2895. },
  2896. .fixup_map1 = { 7 },
  2897. .result = REJECT,
  2898. .errstr = "invalid access to packet",
  2899. .prog_type = BPF_PROG_TYPE_XDP,
  2900. },
  2901. {
  2902. "helper access to packet: test5, packet_ptr with too short range",
  2903. .insns = {
  2904. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2905. offsetof(struct xdp_md, data)),
  2906. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2907. offsetof(struct xdp_md, data_end)),
  2908. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  2909. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2910. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
  2911. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
  2912. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2913. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2914. BPF_FUNC_map_lookup_elem),
  2915. BPF_MOV64_IMM(BPF_REG_0, 0),
  2916. BPF_EXIT_INSN(),
  2917. },
  2918. .fixup_map1 = { 6 },
  2919. .result = REJECT,
  2920. .errstr = "invalid access to packet",
  2921. .prog_type = BPF_PROG_TYPE_XDP,
  2922. },
  2923. {
  2924. "helper access to packet: test6, cls valid packet_ptr range",
  2925. .insns = {
  2926. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2927. offsetof(struct __sk_buff, data)),
  2928. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2929. offsetof(struct __sk_buff, data_end)),
  2930. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  2931. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  2932. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
  2933. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2934. BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
  2935. BPF_MOV64_IMM(BPF_REG_4, 0),
  2936. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2937. BPF_FUNC_map_update_elem),
  2938. BPF_MOV64_IMM(BPF_REG_0, 0),
  2939. BPF_EXIT_INSN(),
  2940. },
  2941. .fixup_map1 = { 5 },
  2942. .result = ACCEPT,
  2943. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2944. },
  2945. {
  2946. "helper access to packet: test7, cls unchecked packet_ptr",
  2947. .insns = {
  2948. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2949. offsetof(struct __sk_buff, data)),
  2950. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2951. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2952. BPF_FUNC_map_lookup_elem),
  2953. BPF_MOV64_IMM(BPF_REG_0, 0),
  2954. BPF_EXIT_INSN(),
  2955. },
  2956. .fixup_map1 = { 1 },
  2957. .result = REJECT,
  2958. .errstr = "invalid access to packet",
  2959. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2960. },
  2961. {
  2962. "helper access to packet: test8, cls variable add",
  2963. .insns = {
  2964. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2965. offsetof(struct __sk_buff, data)),
  2966. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2967. offsetof(struct __sk_buff, data_end)),
  2968. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2969. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
  2970. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
  2971. BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
  2972. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2973. BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
  2974. BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
  2975. BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
  2976. BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
  2977. BPF_LD_MAP_FD(BPF_REG_1, 0),
  2978. BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
  2979. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  2980. BPF_FUNC_map_lookup_elem),
  2981. BPF_MOV64_IMM(BPF_REG_0, 0),
  2982. BPF_EXIT_INSN(),
  2983. },
  2984. .fixup_map1 = { 11 },
  2985. .result = ACCEPT,
  2986. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  2987. },
  2988. {
  2989. "helper access to packet: test9, cls packet_ptr with bad range",
  2990. .insns = {
  2991. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  2992. offsetof(struct __sk_buff, data)),
  2993. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  2994. offsetof(struct __sk_buff, data_end)),
  2995. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  2996. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
  2997. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
  2998. BPF_MOV64_IMM(BPF_REG_0, 0),
  2999. BPF_EXIT_INSN(),
  3000. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3001. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3002. BPF_FUNC_map_lookup_elem),
  3003. BPF_MOV64_IMM(BPF_REG_0, 0),
  3004. BPF_EXIT_INSN(),
  3005. },
  3006. .fixup_map1 = { 7 },
  3007. .result = REJECT,
  3008. .errstr = "invalid access to packet",
  3009. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3010. },
  3011. {
  3012. "helper access to packet: test10, cls packet_ptr with too short range",
  3013. .insns = {
  3014. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3015. offsetof(struct __sk_buff, data)),
  3016. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3017. offsetof(struct __sk_buff, data_end)),
  3018. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  3019. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
  3020. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
  3021. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
  3022. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3023. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3024. BPF_FUNC_map_lookup_elem),
  3025. BPF_MOV64_IMM(BPF_REG_0, 0),
  3026. BPF_EXIT_INSN(),
  3027. },
  3028. .fixup_map1 = { 6 },
  3029. .result = REJECT,
  3030. .errstr = "invalid access to packet",
  3031. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3032. },
  3033. {
  3034. "helper access to packet: test11, cls unsuitable helper 1",
  3035. .insns = {
  3036. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  3037. offsetof(struct __sk_buff, data)),
  3038. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  3039. offsetof(struct __sk_buff, data_end)),
  3040. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  3041. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3042. BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
  3043. BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
  3044. BPF_MOV64_IMM(BPF_REG_2, 0),
  3045. BPF_MOV64_IMM(BPF_REG_4, 42),
  3046. BPF_MOV64_IMM(BPF_REG_5, 0),
  3047. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3048. BPF_FUNC_skb_store_bytes),
  3049. BPF_MOV64_IMM(BPF_REG_0, 0),
  3050. BPF_EXIT_INSN(),
  3051. },
  3052. .result = REJECT,
  3053. .errstr = "helper access to the packet",
  3054. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3055. },
  3056. {
  3057. "helper access to packet: test12, cls unsuitable helper 2",
  3058. .insns = {
  3059. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  3060. offsetof(struct __sk_buff, data)),
  3061. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  3062. offsetof(struct __sk_buff, data_end)),
  3063. BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
  3064. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
  3065. BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
  3066. BPF_MOV64_IMM(BPF_REG_2, 0),
  3067. BPF_MOV64_IMM(BPF_REG_4, 4),
  3068. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3069. BPF_FUNC_skb_load_bytes),
  3070. BPF_MOV64_IMM(BPF_REG_0, 0),
  3071. BPF_EXIT_INSN(),
  3072. },
  3073. .result = REJECT,
  3074. .errstr = "helper access to the packet",
  3075. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3076. },
  3077. {
  3078. "helper access to packet: test13, cls helper ok",
  3079. .insns = {
  3080. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  3081. offsetof(struct __sk_buff, data)),
  3082. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  3083. offsetof(struct __sk_buff, data_end)),
  3084. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  3085. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  3086. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  3087. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  3088. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  3089. BPF_MOV64_IMM(BPF_REG_2, 4),
  3090. BPF_MOV64_IMM(BPF_REG_3, 0),
  3091. BPF_MOV64_IMM(BPF_REG_4, 0),
  3092. BPF_MOV64_IMM(BPF_REG_5, 0),
  3093. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3094. BPF_FUNC_csum_diff),
  3095. BPF_MOV64_IMM(BPF_REG_0, 0),
  3096. BPF_EXIT_INSN(),
  3097. },
  3098. .result = ACCEPT,
  3099. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3100. },
  3101. {
  3102. "helper access to packet: test14, cls helper fail sub",
  3103. .insns = {
  3104. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  3105. offsetof(struct __sk_buff, data)),
  3106. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  3107. offsetof(struct __sk_buff, data_end)),
  3108. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  3109. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  3110. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  3111. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  3112. BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
  3113. BPF_MOV64_IMM(BPF_REG_2, 4),
  3114. BPF_MOV64_IMM(BPF_REG_3, 0),
  3115. BPF_MOV64_IMM(BPF_REG_4, 0),
  3116. BPF_MOV64_IMM(BPF_REG_5, 0),
  3117. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3118. BPF_FUNC_csum_diff),
  3119. BPF_MOV64_IMM(BPF_REG_0, 0),
  3120. BPF_EXIT_INSN(),
  3121. },
  3122. .result = REJECT,
  3123. .errstr = "type=inv expected=fp",
  3124. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3125. },
  3126. {
  3127. "helper access to packet: test15, cls helper fail range 1",
  3128. .insns = {
  3129. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  3130. offsetof(struct __sk_buff, data)),
  3131. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  3132. offsetof(struct __sk_buff, data_end)),
  3133. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  3134. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  3135. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  3136. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  3137. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  3138. BPF_MOV64_IMM(BPF_REG_2, 8),
  3139. BPF_MOV64_IMM(BPF_REG_3, 0),
  3140. BPF_MOV64_IMM(BPF_REG_4, 0),
  3141. BPF_MOV64_IMM(BPF_REG_5, 0),
  3142. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3143. BPF_FUNC_csum_diff),
  3144. BPF_MOV64_IMM(BPF_REG_0, 0),
  3145. BPF_EXIT_INSN(),
  3146. },
  3147. .result = REJECT,
  3148. .errstr = "invalid access to packet",
  3149. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3150. },
  3151. {
  3152. "helper access to packet: test16, cls helper fail range 2",
  3153. .insns = {
  3154. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  3155. offsetof(struct __sk_buff, data)),
  3156. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  3157. offsetof(struct __sk_buff, data_end)),
  3158. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  3159. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  3160. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  3161. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  3162. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  3163. BPF_MOV64_IMM(BPF_REG_2, -9),
  3164. BPF_MOV64_IMM(BPF_REG_3, 0),
  3165. BPF_MOV64_IMM(BPF_REG_4, 0),
  3166. BPF_MOV64_IMM(BPF_REG_5, 0),
  3167. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3168. BPF_FUNC_csum_diff),
  3169. BPF_MOV64_IMM(BPF_REG_0, 0),
  3170. BPF_EXIT_INSN(),
  3171. },
  3172. .result = REJECT,
  3173. .errstr = "invalid access to packet",
  3174. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3175. },
  3176. {
  3177. "helper access to packet: test17, cls helper fail range 3",
  3178. .insns = {
  3179. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  3180. offsetof(struct __sk_buff, data)),
  3181. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  3182. offsetof(struct __sk_buff, data_end)),
  3183. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  3184. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  3185. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  3186. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  3187. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  3188. BPF_MOV64_IMM(BPF_REG_2, ~0),
  3189. BPF_MOV64_IMM(BPF_REG_3, 0),
  3190. BPF_MOV64_IMM(BPF_REG_4, 0),
  3191. BPF_MOV64_IMM(BPF_REG_5, 0),
  3192. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3193. BPF_FUNC_csum_diff),
  3194. BPF_MOV64_IMM(BPF_REG_0, 0),
  3195. BPF_EXIT_INSN(),
  3196. },
  3197. .result = REJECT,
  3198. .errstr = "invalid access to packet",
  3199. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3200. },
  3201. {
  3202. "helper access to packet: test18, cls helper fail range zero",
  3203. .insns = {
  3204. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  3205. offsetof(struct __sk_buff, data)),
  3206. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  3207. offsetof(struct __sk_buff, data_end)),
  3208. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  3209. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  3210. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  3211. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  3212. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  3213. BPF_MOV64_IMM(BPF_REG_2, 0),
  3214. BPF_MOV64_IMM(BPF_REG_3, 0),
  3215. BPF_MOV64_IMM(BPF_REG_4, 0),
  3216. BPF_MOV64_IMM(BPF_REG_5, 0),
  3217. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3218. BPF_FUNC_csum_diff),
  3219. BPF_MOV64_IMM(BPF_REG_0, 0),
  3220. BPF_EXIT_INSN(),
  3221. },
  3222. .result = REJECT,
  3223. .errstr = "invalid access to packet",
  3224. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3225. },
  3226. {
  3227. "helper access to packet: test19, pkt end as input",
  3228. .insns = {
  3229. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  3230. offsetof(struct __sk_buff, data)),
  3231. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  3232. offsetof(struct __sk_buff, data_end)),
  3233. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  3234. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  3235. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  3236. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  3237. BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
  3238. BPF_MOV64_IMM(BPF_REG_2, 4),
  3239. BPF_MOV64_IMM(BPF_REG_3, 0),
  3240. BPF_MOV64_IMM(BPF_REG_4, 0),
  3241. BPF_MOV64_IMM(BPF_REG_5, 0),
  3242. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3243. BPF_FUNC_csum_diff),
  3244. BPF_MOV64_IMM(BPF_REG_0, 0),
  3245. BPF_EXIT_INSN(),
  3246. },
  3247. .result = REJECT,
  3248. .errstr = "R1 type=pkt_end expected=fp",
  3249. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3250. },
  3251. {
  3252. "helper access to packet: test20, wrong reg",
  3253. .insns = {
  3254. BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
  3255. offsetof(struct __sk_buff, data)),
  3256. BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
  3257. offsetof(struct __sk_buff, data_end)),
  3258. BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
  3259. BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
  3260. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
  3261. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
  3262. BPF_MOV64_IMM(BPF_REG_2, 4),
  3263. BPF_MOV64_IMM(BPF_REG_3, 0),
  3264. BPF_MOV64_IMM(BPF_REG_4, 0),
  3265. BPF_MOV64_IMM(BPF_REG_5, 0),
  3266. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3267. BPF_FUNC_csum_diff),
  3268. BPF_MOV64_IMM(BPF_REG_0, 0),
  3269. BPF_EXIT_INSN(),
  3270. },
  3271. .result = REJECT,
  3272. .errstr = "invalid access to packet",
  3273. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  3274. },
  3275. {
  3276. "valid map access into an array with a constant",
  3277. .insns = {
  3278. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3279. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3280. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3281. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3282. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3283. BPF_FUNC_map_lookup_elem),
  3284. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  3285. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  3286. offsetof(struct test_val, foo)),
  3287. BPF_EXIT_INSN(),
  3288. },
  3289. .fixup_map2 = { 3 },
  3290. .errstr_unpriv = "R0 leaks addr",
  3291. .result_unpriv = REJECT,
  3292. .result = ACCEPT,
  3293. },
  3294. {
  3295. "valid map access into an array with a register",
  3296. .insns = {
  3297. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3298. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3299. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3300. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3301. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3302. BPF_FUNC_map_lookup_elem),
  3303. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  3304. BPF_MOV64_IMM(BPF_REG_1, 4),
  3305. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  3306. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  3307. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  3308. offsetof(struct test_val, foo)),
  3309. BPF_EXIT_INSN(),
  3310. },
  3311. .fixup_map2 = { 3 },
  3312. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3313. .result_unpriv = REJECT,
  3314. .result = ACCEPT,
  3315. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  3316. },
  3317. {
  3318. "valid map access into an array with a variable",
  3319. .insns = {
  3320. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3321. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3322. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3323. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3324. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3325. BPF_FUNC_map_lookup_elem),
  3326. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  3327. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  3328. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
  3329. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  3330. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  3331. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  3332. offsetof(struct test_val, foo)),
  3333. BPF_EXIT_INSN(),
  3334. },
  3335. .fixup_map2 = { 3 },
  3336. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3337. .result_unpriv = REJECT,
  3338. .result = ACCEPT,
  3339. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  3340. },
  3341. {
  3342. "valid map access into an array with a signed variable",
  3343. .insns = {
  3344. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3345. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3346. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3347. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3348. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3349. BPF_FUNC_map_lookup_elem),
  3350. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  3351. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  3352. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
  3353. BPF_MOV32_IMM(BPF_REG_1, 0),
  3354. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
  3355. BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
  3356. BPF_MOV32_IMM(BPF_REG_1, 0),
  3357. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  3358. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  3359. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  3360. offsetof(struct test_val, foo)),
  3361. BPF_EXIT_INSN(),
  3362. },
  3363. .fixup_map2 = { 3 },
  3364. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3365. .result_unpriv = REJECT,
  3366. .result = ACCEPT,
  3367. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  3368. },
  3369. {
  3370. "invalid map access into an array with a constant",
  3371. .insns = {
  3372. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3373. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3374. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3375. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3376. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3377. BPF_FUNC_map_lookup_elem),
  3378. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  3379. BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
  3380. offsetof(struct test_val, foo)),
  3381. BPF_EXIT_INSN(),
  3382. },
  3383. .fixup_map2 = { 3 },
  3384. .errstr = "invalid access to map value, value_size=48 off=48 size=8",
  3385. .result = REJECT,
  3386. },
  3387. {
  3388. "invalid map access into an array with a register",
  3389. .insns = {
  3390. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3391. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3392. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3393. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3394. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3395. BPF_FUNC_map_lookup_elem),
  3396. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  3397. BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
  3398. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  3399. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  3400. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  3401. offsetof(struct test_val, foo)),
  3402. BPF_EXIT_INSN(),
  3403. },
  3404. .fixup_map2 = { 3 },
  3405. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3406. .errstr = "R0 min value is outside of the array range",
  3407. .result_unpriv = REJECT,
  3408. .result = REJECT,
  3409. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  3410. },
  3411. {
  3412. "invalid map access into an array with a variable",
  3413. .insns = {
  3414. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3415. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3416. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3417. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3418. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3419. BPF_FUNC_map_lookup_elem),
  3420. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  3421. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  3422. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  3423. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  3424. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  3425. offsetof(struct test_val, foo)),
  3426. BPF_EXIT_INSN(),
  3427. },
  3428. .fixup_map2 = { 3 },
  3429. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3430. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  3431. .result_unpriv = REJECT,
  3432. .result = REJECT,
  3433. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  3434. },
  3435. {
  3436. "invalid map access into an array with no floor check",
  3437. .insns = {
  3438. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3439. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3440. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3441. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3442. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3443. BPF_FUNC_map_lookup_elem),
  3444. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  3445. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  3446. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
  3447. BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
  3448. BPF_MOV32_IMM(BPF_REG_1, 0),
  3449. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  3450. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  3451. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  3452. offsetof(struct test_val, foo)),
  3453. BPF_EXIT_INSN(),
  3454. },
  3455. .fixup_map2 = { 3 },
  3456. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3457. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  3458. .result_unpriv = REJECT,
  3459. .result = REJECT,
  3460. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  3461. },
  3462. {
  3463. "invalid map access into an array with a invalid max check",
  3464. .insns = {
  3465. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3466. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3467. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3468. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3469. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3470. BPF_FUNC_map_lookup_elem),
  3471. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  3472. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  3473. BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
  3474. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
  3475. BPF_MOV32_IMM(BPF_REG_1, 0),
  3476. BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
  3477. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  3478. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  3479. offsetof(struct test_val, foo)),
  3480. BPF_EXIT_INSN(),
  3481. },
  3482. .fixup_map2 = { 3 },
  3483. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3484. .errstr = "invalid access to map value, value_size=48 off=44 size=8",
  3485. .result_unpriv = REJECT,
  3486. .result = REJECT,
  3487. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  3488. },
  3489. {
  3490. "invalid map access into an array with a invalid max check",
  3491. .insns = {
  3492. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3493. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3494. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3495. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3496. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3497. BPF_FUNC_map_lookup_elem),
  3498. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
  3499. BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
  3500. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3501. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3502. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3503. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3504. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3505. BPF_FUNC_map_lookup_elem),
  3506. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  3507. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
  3508. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
  3509. offsetof(struct test_val, foo)),
  3510. BPF_EXIT_INSN(),
  3511. },
  3512. .fixup_map2 = { 3, 11 },
  3513. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3514. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  3515. .result_unpriv = REJECT,
  3516. .result = REJECT,
  3517. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  3518. },
  3519. {
  3520. "multiple registers share map_lookup_elem result",
  3521. .insns = {
  3522. BPF_MOV64_IMM(BPF_REG_1, 10),
  3523. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  3524. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3525. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3526. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3527. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3528. BPF_FUNC_map_lookup_elem),
  3529. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  3530. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  3531. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  3532. BPF_EXIT_INSN(),
  3533. },
  3534. .fixup_map1 = { 4 },
  3535. .result = ACCEPT,
  3536. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  3537. },
  3538. {
  3539. "alu ops on ptr_to_map_value_or_null, 1",
  3540. .insns = {
  3541. BPF_MOV64_IMM(BPF_REG_1, 10),
  3542. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  3543. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3544. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3545. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3546. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3547. BPF_FUNC_map_lookup_elem),
  3548. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  3549. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
  3550. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
  3551. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  3552. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  3553. BPF_EXIT_INSN(),
  3554. },
  3555. .fixup_map1 = { 4 },
  3556. .errstr = "R4 invalid mem access",
  3557. .result = REJECT,
  3558. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  3559. },
  3560. {
  3561. "alu ops on ptr_to_map_value_or_null, 2",
  3562. .insns = {
  3563. BPF_MOV64_IMM(BPF_REG_1, 10),
  3564. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  3565. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3566. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3567. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3568. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3569. BPF_FUNC_map_lookup_elem),
  3570. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  3571. BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
  3572. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  3573. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  3574. BPF_EXIT_INSN(),
  3575. },
  3576. .fixup_map1 = { 4 },
  3577. .errstr = "R4 invalid mem access",
  3578. .result = REJECT,
  3579. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  3580. },
  3581. {
  3582. "alu ops on ptr_to_map_value_or_null, 3",
  3583. .insns = {
  3584. BPF_MOV64_IMM(BPF_REG_1, 10),
  3585. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  3586. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3587. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3588. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3589. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3590. BPF_FUNC_map_lookup_elem),
  3591. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  3592. BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
  3593. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  3594. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  3595. BPF_EXIT_INSN(),
  3596. },
  3597. .fixup_map1 = { 4 },
  3598. .errstr = "R4 invalid mem access",
  3599. .result = REJECT,
  3600. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  3601. },
  3602. {
  3603. "invalid memory access with multiple map_lookup_elem calls",
  3604. .insns = {
  3605. BPF_MOV64_IMM(BPF_REG_1, 10),
  3606. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  3607. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3608. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3609. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3610. BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
  3611. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  3612. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3613. BPF_FUNC_map_lookup_elem),
  3614. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  3615. BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
  3616. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  3617. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3618. BPF_FUNC_map_lookup_elem),
  3619. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  3620. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  3621. BPF_EXIT_INSN(),
  3622. },
  3623. .fixup_map1 = { 4 },
  3624. .result = REJECT,
  3625. .errstr = "R4 !read_ok",
  3626. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  3627. },
  3628. {
  3629. "valid indirect map_lookup_elem access with 2nd lookup in branch",
  3630. .insns = {
  3631. BPF_MOV64_IMM(BPF_REG_1, 10),
  3632. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  3633. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3634. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3635. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3636. BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
  3637. BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
  3638. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3639. BPF_FUNC_map_lookup_elem),
  3640. BPF_MOV64_IMM(BPF_REG_2, 10),
  3641. BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
  3642. BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
  3643. BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
  3644. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3645. BPF_FUNC_map_lookup_elem),
  3646. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  3647. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  3648. BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
  3649. BPF_EXIT_INSN(),
  3650. },
  3651. .fixup_map1 = { 4 },
  3652. .result = ACCEPT,
  3653. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  3654. },
  3655. {
  3656. "multiple registers share map_lookup_elem bad reg type",
  3657. .insns = {
  3658. BPF_MOV64_IMM(BPF_REG_1, 10),
  3659. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
  3660. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3661. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3662. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3663. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3664. BPF_FUNC_map_lookup_elem),
  3665. BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
  3666. BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
  3667. BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
  3668. BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
  3669. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  3670. BPF_MOV64_IMM(BPF_REG_1, 1),
  3671. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  3672. BPF_MOV64_IMM(BPF_REG_1, 2),
  3673. BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
  3674. BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
  3675. BPF_MOV64_IMM(BPF_REG_1, 3),
  3676. BPF_EXIT_INSN(),
  3677. },
  3678. .fixup_map1 = { 4 },
  3679. .result = REJECT,
  3680. .errstr = "R3 invalid mem access 'inv'",
  3681. .prog_type = BPF_PROG_TYPE_SCHED_CLS
  3682. },
  3683. {
  3684. "invalid map access from else condition",
  3685. .insns = {
  3686. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3687. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3688. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3689. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3690. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
  3691. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  3692. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  3693. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
  3694. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
  3695. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  3696. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  3697. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
  3698. BPF_EXIT_INSN(),
  3699. },
  3700. .fixup_map2 = { 3 },
  3701. .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
  3702. .result = REJECT,
  3703. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  3704. .result_unpriv = REJECT,
  3705. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  3706. },
  3707. {
  3708. "constant register |= constant should keep constant type",
  3709. .insns = {
  3710. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  3711. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  3712. BPF_MOV64_IMM(BPF_REG_2, 34),
  3713. BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
  3714. BPF_MOV64_IMM(BPF_REG_3, 0),
  3715. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3716. BPF_EXIT_INSN(),
  3717. },
  3718. .result = ACCEPT,
  3719. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3720. },
  3721. {
  3722. "constant register |= constant should not bypass stack boundary checks",
  3723. .insns = {
  3724. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  3725. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  3726. BPF_MOV64_IMM(BPF_REG_2, 34),
  3727. BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
  3728. BPF_MOV64_IMM(BPF_REG_3, 0),
  3729. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3730. BPF_EXIT_INSN(),
  3731. },
  3732. .errstr = "invalid stack type R1 off=-48 access_size=58",
  3733. .result = REJECT,
  3734. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3735. },
  3736. {
  3737. "constant register |= constant register should keep constant type",
  3738. .insns = {
  3739. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  3740. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  3741. BPF_MOV64_IMM(BPF_REG_2, 34),
  3742. BPF_MOV64_IMM(BPF_REG_4, 13),
  3743. BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
  3744. BPF_MOV64_IMM(BPF_REG_3, 0),
  3745. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3746. BPF_EXIT_INSN(),
  3747. },
  3748. .result = ACCEPT,
  3749. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3750. },
  3751. {
  3752. "constant register |= constant register should not bypass stack boundary checks",
  3753. .insns = {
  3754. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  3755. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
  3756. BPF_MOV64_IMM(BPF_REG_2, 34),
  3757. BPF_MOV64_IMM(BPF_REG_4, 24),
  3758. BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
  3759. BPF_MOV64_IMM(BPF_REG_3, 0),
  3760. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  3761. BPF_EXIT_INSN(),
  3762. },
  3763. .errstr = "invalid stack type R1 off=-48 access_size=58",
  3764. .result = REJECT,
  3765. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  3766. },
  3767. {
  3768. "invalid direct packet write for LWT_IN",
  3769. .insns = {
  3770. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3771. offsetof(struct __sk_buff, data)),
  3772. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3773. offsetof(struct __sk_buff, data_end)),
  3774. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3775. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3776. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3777. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  3778. BPF_MOV64_IMM(BPF_REG_0, 0),
  3779. BPF_EXIT_INSN(),
  3780. },
  3781. .errstr = "cannot write into packet",
  3782. .result = REJECT,
  3783. .prog_type = BPF_PROG_TYPE_LWT_IN,
  3784. },
  3785. {
  3786. "invalid direct packet write for LWT_OUT",
  3787. .insns = {
  3788. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3789. offsetof(struct __sk_buff, data)),
  3790. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3791. offsetof(struct __sk_buff, data_end)),
  3792. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3793. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3794. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3795. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  3796. BPF_MOV64_IMM(BPF_REG_0, 0),
  3797. BPF_EXIT_INSN(),
  3798. },
  3799. .errstr = "cannot write into packet",
  3800. .result = REJECT,
  3801. .prog_type = BPF_PROG_TYPE_LWT_OUT,
  3802. },
  3803. {
  3804. "direct packet write for LWT_XMIT",
  3805. .insns = {
  3806. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3807. offsetof(struct __sk_buff, data)),
  3808. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3809. offsetof(struct __sk_buff, data_end)),
  3810. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3811. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3812. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3813. BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
  3814. BPF_MOV64_IMM(BPF_REG_0, 0),
  3815. BPF_EXIT_INSN(),
  3816. },
  3817. .result = ACCEPT,
  3818. .prog_type = BPF_PROG_TYPE_LWT_XMIT,
  3819. },
  3820. {
  3821. "direct packet read for LWT_IN",
  3822. .insns = {
  3823. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3824. offsetof(struct __sk_buff, data)),
  3825. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3826. offsetof(struct __sk_buff, data_end)),
  3827. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3828. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3829. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3830. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3831. BPF_MOV64_IMM(BPF_REG_0, 0),
  3832. BPF_EXIT_INSN(),
  3833. },
  3834. .result = ACCEPT,
  3835. .prog_type = BPF_PROG_TYPE_LWT_IN,
  3836. },
  3837. {
  3838. "direct packet read for LWT_OUT",
  3839. .insns = {
  3840. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3841. offsetof(struct __sk_buff, data)),
  3842. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3843. offsetof(struct __sk_buff, data_end)),
  3844. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3845. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3846. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3847. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3848. BPF_MOV64_IMM(BPF_REG_0, 0),
  3849. BPF_EXIT_INSN(),
  3850. },
  3851. .result = ACCEPT,
  3852. .prog_type = BPF_PROG_TYPE_LWT_OUT,
  3853. },
  3854. {
  3855. "direct packet read for LWT_XMIT",
  3856. .insns = {
  3857. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3858. offsetof(struct __sk_buff, data)),
  3859. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3860. offsetof(struct __sk_buff, data_end)),
  3861. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3862. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3863. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
  3864. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
  3865. BPF_MOV64_IMM(BPF_REG_0, 0),
  3866. BPF_EXIT_INSN(),
  3867. },
  3868. .result = ACCEPT,
  3869. .prog_type = BPF_PROG_TYPE_LWT_XMIT,
  3870. },
  3871. {
  3872. "overlapping checks for direct packet access",
  3873. .insns = {
  3874. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
  3875. offsetof(struct __sk_buff, data)),
  3876. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
  3877. offsetof(struct __sk_buff, data_end)),
  3878. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  3879. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
  3880. BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
  3881. BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
  3882. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
  3883. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
  3884. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
  3885. BPF_MOV64_IMM(BPF_REG_0, 0),
  3886. BPF_EXIT_INSN(),
  3887. },
  3888. .result = ACCEPT,
  3889. .prog_type = BPF_PROG_TYPE_LWT_XMIT,
  3890. },
  3891. {
  3892. "invalid access of tc_classid for LWT_IN",
  3893. .insns = {
  3894. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  3895. offsetof(struct __sk_buff, tc_classid)),
  3896. BPF_EXIT_INSN(),
  3897. },
  3898. .result = REJECT,
  3899. .errstr = "invalid bpf_context access",
  3900. },
  3901. {
  3902. "invalid access of tc_classid for LWT_OUT",
  3903. .insns = {
  3904. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  3905. offsetof(struct __sk_buff, tc_classid)),
  3906. BPF_EXIT_INSN(),
  3907. },
  3908. .result = REJECT,
  3909. .errstr = "invalid bpf_context access",
  3910. },
  3911. {
  3912. "invalid access of tc_classid for LWT_XMIT",
  3913. .insns = {
  3914. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  3915. offsetof(struct __sk_buff, tc_classid)),
  3916. BPF_EXIT_INSN(),
  3917. },
  3918. .result = REJECT,
  3919. .errstr = "invalid bpf_context access",
  3920. },
  3921. {
  3922. "leak pointer into ctx 1",
  3923. .insns = {
  3924. BPF_MOV64_IMM(BPF_REG_0, 0),
  3925. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  3926. offsetof(struct __sk_buff, cb[0])),
  3927. BPF_LD_MAP_FD(BPF_REG_2, 0),
  3928. BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
  3929. offsetof(struct __sk_buff, cb[0])),
  3930. BPF_EXIT_INSN(),
  3931. },
  3932. .fixup_map1 = { 2 },
  3933. .errstr_unpriv = "R2 leaks addr into mem",
  3934. .result_unpriv = REJECT,
  3935. .result = ACCEPT,
  3936. },
  3937. {
  3938. "leak pointer into ctx 2",
  3939. .insns = {
  3940. BPF_MOV64_IMM(BPF_REG_0, 0),
  3941. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
  3942. offsetof(struct __sk_buff, cb[0])),
  3943. BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
  3944. offsetof(struct __sk_buff, cb[0])),
  3945. BPF_EXIT_INSN(),
  3946. },
  3947. .errstr_unpriv = "R10 leaks addr into mem",
  3948. .result_unpriv = REJECT,
  3949. .result = ACCEPT,
  3950. },
  3951. {
  3952. "leak pointer into ctx 3",
  3953. .insns = {
  3954. BPF_MOV64_IMM(BPF_REG_0, 0),
  3955. BPF_LD_MAP_FD(BPF_REG_2, 0),
  3956. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
  3957. offsetof(struct __sk_buff, cb[0])),
  3958. BPF_EXIT_INSN(),
  3959. },
  3960. .fixup_map1 = { 1 },
  3961. .errstr_unpriv = "R2 leaks addr into ctx",
  3962. .result_unpriv = REJECT,
  3963. .result = ACCEPT,
  3964. },
  3965. {
  3966. "leak pointer into map val",
  3967. .insns = {
  3968. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  3969. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  3970. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3971. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3972. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3973. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  3974. BPF_FUNC_map_lookup_elem),
  3975. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
  3976. BPF_MOV64_IMM(BPF_REG_3, 0),
  3977. BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
  3978. BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
  3979. BPF_MOV64_IMM(BPF_REG_0, 0),
  3980. BPF_EXIT_INSN(),
  3981. },
  3982. .fixup_map1 = { 4 },
  3983. .errstr_unpriv = "R6 leaks addr into mem",
  3984. .result_unpriv = REJECT,
  3985. .result = ACCEPT,
  3986. },
  3987. {
  3988. "helper access to map: full range",
  3989. .insns = {
  3990. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  3991. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  3992. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  3993. BPF_LD_MAP_FD(BPF_REG_1, 0),
  3994. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  3995. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  3996. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  3997. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
  3998. BPF_MOV64_IMM(BPF_REG_3, 0),
  3999. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4000. BPF_EXIT_INSN(),
  4001. },
  4002. .fixup_map2 = { 3 },
  4003. .result = ACCEPT,
  4004. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4005. },
  4006. {
  4007. "helper access to map: partial range",
  4008. .insns = {
  4009. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4010. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4011. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4012. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4013. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4014. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  4015. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4016. BPF_MOV64_IMM(BPF_REG_2, 8),
  4017. BPF_MOV64_IMM(BPF_REG_3, 0),
  4018. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4019. BPF_EXIT_INSN(),
  4020. },
  4021. .fixup_map2 = { 3 },
  4022. .result = ACCEPT,
  4023. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4024. },
  4025. {
  4026. "helper access to map: empty range",
  4027. .insns = {
  4028. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4029. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4030. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4031. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4032. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4033. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  4034. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4035. BPF_MOV64_IMM(BPF_REG_2, 0),
  4036. BPF_MOV64_IMM(BPF_REG_3, 0),
  4037. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4038. BPF_EXIT_INSN(),
  4039. },
  4040. .fixup_map2 = { 3 },
  4041. .errstr = "invalid access to map value, value_size=48 off=0 size=0",
  4042. .result = REJECT,
  4043. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4044. },
  4045. {
  4046. "helper access to map: out-of-bound range",
  4047. .insns = {
  4048. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4049. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4050. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4051. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4052. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4053. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  4054. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4055. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
  4056. BPF_MOV64_IMM(BPF_REG_3, 0),
  4057. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4058. BPF_EXIT_INSN(),
  4059. },
  4060. .fixup_map2 = { 3 },
  4061. .errstr = "invalid access to map value, value_size=48 off=0 size=56",
  4062. .result = REJECT,
  4063. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4064. },
  4065. {
  4066. "helper access to map: negative range",
  4067. .insns = {
  4068. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4069. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4070. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4071. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4072. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4073. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  4074. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4075. BPF_MOV64_IMM(BPF_REG_2, -8),
  4076. BPF_MOV64_IMM(BPF_REG_3, 0),
  4077. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4078. BPF_EXIT_INSN(),
  4079. },
  4080. .fixup_map2 = { 3 },
  4081. .errstr = "invalid access to map value, value_size=48 off=0 size=-8",
  4082. .result = REJECT,
  4083. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4084. },
  4085. {
  4086. "helper access to adjusted map (via const imm): full range",
  4087. .insns = {
  4088. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4089. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4090. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4091. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4092. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4093. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  4094. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4095. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  4096. offsetof(struct test_val, foo)),
  4097. BPF_MOV64_IMM(BPF_REG_2,
  4098. sizeof(struct test_val) -
  4099. offsetof(struct test_val, foo)),
  4100. BPF_MOV64_IMM(BPF_REG_3, 0),
  4101. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4102. BPF_EXIT_INSN(),
  4103. },
  4104. .fixup_map2 = { 3 },
  4105. .result = ACCEPT,
  4106. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4107. },
  4108. {
  4109. "helper access to adjusted map (via const imm): partial range",
  4110. .insns = {
  4111. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4112. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4113. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4114. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4115. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4116. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  4117. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4118. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  4119. offsetof(struct test_val, foo)),
  4120. BPF_MOV64_IMM(BPF_REG_2, 8),
  4121. BPF_MOV64_IMM(BPF_REG_3, 0),
  4122. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4123. BPF_EXIT_INSN(),
  4124. },
  4125. .fixup_map2 = { 3 },
  4126. .result = ACCEPT,
  4127. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4128. },
  4129. {
  4130. "helper access to adjusted map (via const imm): empty range",
  4131. .insns = {
  4132. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4133. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4134. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4135. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4136. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4137. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  4138. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4139. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  4140. offsetof(struct test_val, foo)),
  4141. BPF_MOV64_IMM(BPF_REG_2, 0),
  4142. BPF_MOV64_IMM(BPF_REG_3, 0),
  4143. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4144. BPF_EXIT_INSN(),
  4145. },
  4146. .fixup_map2 = { 3 },
  4147. .errstr = "R1 min value is outside of the array range",
  4148. .result = REJECT,
  4149. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4150. },
  4151. {
  4152. "helper access to adjusted map (via const imm): out-of-bound range",
  4153. .insns = {
  4154. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4155. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4156. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4157. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4158. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4159. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  4160. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4161. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  4162. offsetof(struct test_val, foo)),
  4163. BPF_MOV64_IMM(BPF_REG_2,
  4164. sizeof(struct test_val) -
  4165. offsetof(struct test_val, foo) + 8),
  4166. BPF_MOV64_IMM(BPF_REG_3, 0),
  4167. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4168. BPF_EXIT_INSN(),
  4169. },
  4170. .fixup_map2 = { 3 },
  4171. .errstr = "invalid access to map value, value_size=48 off=4 size=52",
  4172. .result = REJECT,
  4173. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4174. },
  4175. {
  4176. "helper access to adjusted map (via const imm): negative range (> adjustment)",
  4177. .insns = {
  4178. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4179. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4180. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4181. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4182. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4183. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  4184. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4185. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  4186. offsetof(struct test_val, foo)),
  4187. BPF_MOV64_IMM(BPF_REG_2, -8),
  4188. BPF_MOV64_IMM(BPF_REG_3, 0),
  4189. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4190. BPF_EXIT_INSN(),
  4191. },
  4192. .fixup_map2 = { 3 },
  4193. .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
  4194. .result = REJECT,
  4195. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4196. },
  4197. {
  4198. "helper access to adjusted map (via const imm): negative range (< adjustment)",
  4199. .insns = {
  4200. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4201. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4202. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4203. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4204. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4205. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  4206. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4207. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
  4208. offsetof(struct test_val, foo)),
  4209. BPF_MOV64_IMM(BPF_REG_2, -1),
  4210. BPF_MOV64_IMM(BPF_REG_3, 0),
  4211. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4212. BPF_EXIT_INSN(),
  4213. },
  4214. .fixup_map2 = { 3 },
  4215. .errstr = "R1 min value is outside of the array range",
  4216. .result = REJECT,
  4217. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4218. },
  4219. {
  4220. "helper access to adjusted map (via const reg): full range",
  4221. .insns = {
  4222. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4223. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4224. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4225. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4226. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4227. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  4228. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4229. BPF_MOV64_IMM(BPF_REG_3,
  4230. offsetof(struct test_val, foo)),
  4231. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  4232. BPF_MOV64_IMM(BPF_REG_2,
  4233. sizeof(struct test_val) -
  4234. offsetof(struct test_val, foo)),
  4235. BPF_MOV64_IMM(BPF_REG_3, 0),
  4236. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4237. BPF_EXIT_INSN(),
  4238. },
  4239. .fixup_map2 = { 3 },
  4240. .result = ACCEPT,
  4241. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4242. },
  4243. {
  4244. "helper access to adjusted map (via const reg): partial range",
  4245. .insns = {
  4246. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4247. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4248. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4249. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4250. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4251. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  4252. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4253. BPF_MOV64_IMM(BPF_REG_3,
  4254. offsetof(struct test_val, foo)),
  4255. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  4256. BPF_MOV64_IMM(BPF_REG_2, 8),
  4257. BPF_MOV64_IMM(BPF_REG_3, 0),
  4258. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4259. BPF_EXIT_INSN(),
  4260. },
  4261. .fixup_map2 = { 3 },
  4262. .result = ACCEPT,
  4263. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4264. },
  4265. {
  4266. "helper access to adjusted map (via const reg): empty range",
  4267. .insns = {
  4268. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4269. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4270. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4271. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4272. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4273. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  4274. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4275. BPF_MOV64_IMM(BPF_REG_3, 0),
  4276. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  4277. BPF_MOV64_IMM(BPF_REG_2, 0),
  4278. BPF_MOV64_IMM(BPF_REG_3, 0),
  4279. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4280. BPF_EXIT_INSN(),
  4281. },
  4282. .fixup_map2 = { 3 },
  4283. .errstr = "R1 min value is outside of the array range",
  4284. .result = REJECT,
  4285. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4286. },
  4287. {
  4288. "helper access to adjusted map (via const reg): out-of-bound range",
  4289. .insns = {
  4290. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4291. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4292. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4293. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4294. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4295. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  4296. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4297. BPF_MOV64_IMM(BPF_REG_3,
  4298. offsetof(struct test_val, foo)),
  4299. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  4300. BPF_MOV64_IMM(BPF_REG_2,
  4301. sizeof(struct test_val) -
  4302. offsetof(struct test_val, foo) + 8),
  4303. BPF_MOV64_IMM(BPF_REG_3, 0),
  4304. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4305. BPF_EXIT_INSN(),
  4306. },
  4307. .fixup_map2 = { 3 },
  4308. .errstr = "invalid access to map value, value_size=48 off=4 size=52",
  4309. .result = REJECT,
  4310. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4311. },
  4312. {
  4313. "helper access to adjusted map (via const reg): negative range (> adjustment)",
  4314. .insns = {
  4315. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4316. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4317. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4318. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4319. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4320. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  4321. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4322. BPF_MOV64_IMM(BPF_REG_3,
  4323. offsetof(struct test_val, foo)),
  4324. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  4325. BPF_MOV64_IMM(BPF_REG_2, -8),
  4326. BPF_MOV64_IMM(BPF_REG_3, 0),
  4327. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4328. BPF_EXIT_INSN(),
  4329. },
  4330. .fixup_map2 = { 3 },
  4331. .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
  4332. .result = REJECT,
  4333. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4334. },
  4335. {
  4336. "helper access to adjusted map (via const reg): negative range (< adjustment)",
  4337. .insns = {
  4338. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4339. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4340. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4341. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4342. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4343. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  4344. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4345. BPF_MOV64_IMM(BPF_REG_3,
  4346. offsetof(struct test_val, foo)),
  4347. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  4348. BPF_MOV64_IMM(BPF_REG_2, -1),
  4349. BPF_MOV64_IMM(BPF_REG_3, 0),
  4350. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4351. BPF_EXIT_INSN(),
  4352. },
  4353. .fixup_map2 = { 3 },
  4354. .errstr = "R1 min value is outside of the array range",
  4355. .result = REJECT,
  4356. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4357. },
  4358. {
  4359. "helper access to adjusted map (via variable): full range",
  4360. .insns = {
  4361. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4362. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4363. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4364. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4365. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4366. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  4367. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4368. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  4369. BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
  4370. offsetof(struct test_val, foo), 4),
  4371. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  4372. BPF_MOV64_IMM(BPF_REG_2,
  4373. sizeof(struct test_val) -
  4374. offsetof(struct test_val, foo)),
  4375. BPF_MOV64_IMM(BPF_REG_3, 0),
  4376. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4377. BPF_EXIT_INSN(),
  4378. },
  4379. .fixup_map2 = { 3 },
  4380. .result = ACCEPT,
  4381. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4382. },
  4383. {
  4384. "helper access to adjusted map (via variable): partial range",
  4385. .insns = {
  4386. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4387. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4388. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4389. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4390. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4391. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  4392. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4393. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  4394. BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
  4395. offsetof(struct test_val, foo), 4),
  4396. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  4397. BPF_MOV64_IMM(BPF_REG_2, 8),
  4398. BPF_MOV64_IMM(BPF_REG_3, 0),
  4399. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4400. BPF_EXIT_INSN(),
  4401. },
  4402. .fixup_map2 = { 3 },
  4403. .result = ACCEPT,
  4404. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4405. },
  4406. {
  4407. "helper access to adjusted map (via variable): empty range",
  4408. .insns = {
  4409. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4410. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4411. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4412. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4413. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4414. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  4415. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4416. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  4417. BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
  4418. offsetof(struct test_val, foo), 4),
  4419. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  4420. BPF_MOV64_IMM(BPF_REG_2, 0),
  4421. BPF_MOV64_IMM(BPF_REG_3, 0),
  4422. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4423. BPF_EXIT_INSN(),
  4424. },
  4425. .fixup_map2 = { 3 },
  4426. .errstr = "R1 min value is outside of the array range",
  4427. .result = REJECT,
  4428. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4429. },
  4430. {
  4431. "helper access to adjusted map (via variable): no max check",
  4432. .insns = {
  4433. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4434. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4435. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4436. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4437. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4438. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  4439. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4440. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  4441. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  4442. BPF_MOV64_IMM(BPF_REG_2, 0),
  4443. BPF_MOV64_IMM(BPF_REG_3, 0),
  4444. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4445. BPF_EXIT_INSN(),
  4446. },
  4447. .fixup_map2 = { 3 },
  4448. .errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check",
  4449. .result = REJECT,
  4450. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4451. },
  4452. {
  4453. "helper access to adjusted map (via variable): wrong max check",
  4454. .insns = {
  4455. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4456. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4457. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4458. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4459. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4460. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  4461. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4462. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
  4463. BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
  4464. offsetof(struct test_val, foo), 4),
  4465. BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
  4466. BPF_MOV64_IMM(BPF_REG_2,
  4467. sizeof(struct test_val) -
  4468. offsetof(struct test_val, foo) + 1),
  4469. BPF_MOV64_IMM(BPF_REG_3, 0),
  4470. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4471. BPF_EXIT_INSN(),
  4472. },
  4473. .fixup_map2 = { 3 },
  4474. .errstr = "invalid access to map value, value_size=48 off=4 size=45",
  4475. .result = REJECT,
  4476. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4477. },
  4478. {
  4479. "map element value is preserved across register spilling",
  4480. .insns = {
  4481. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4482. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4483. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4484. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4485. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4486. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  4487. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
  4488. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4489. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
  4490. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  4491. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
  4492. BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
  4493. BPF_EXIT_INSN(),
  4494. },
  4495. .fixup_map2 = { 3 },
  4496. .errstr_unpriv = "R0 leaks addr",
  4497. .result = ACCEPT,
  4498. .result_unpriv = REJECT,
  4499. },
  4500. {
  4501. "map element value or null is marked on register spilling",
  4502. .insns = {
  4503. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4504. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4505. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4506. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4507. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4508. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4509. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
  4510. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  4511. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  4512. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
  4513. BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
  4514. BPF_EXIT_INSN(),
  4515. },
  4516. .fixup_map2 = { 3 },
  4517. .errstr_unpriv = "R0 leaks addr",
  4518. .result = ACCEPT,
  4519. .result_unpriv = REJECT,
  4520. },
  4521. {
  4522. "map element value store of cleared call register",
  4523. .insns = {
  4524. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4525. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4526. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4527. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4528. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4529. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
  4530. BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
  4531. BPF_EXIT_INSN(),
  4532. },
  4533. .fixup_map2 = { 3 },
  4534. .errstr_unpriv = "R1 !read_ok",
  4535. .errstr = "R1 !read_ok",
  4536. .result = REJECT,
  4537. .result_unpriv = REJECT,
  4538. },
  4539. {
  4540. "map element value with unaligned store",
  4541. .insns = {
  4542. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4543. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4544. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4545. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4546. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4547. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
  4548. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
  4549. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
  4550. BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
  4551. BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
  4552. BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
  4553. BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
  4554. BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
  4555. BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
  4556. BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
  4557. BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
  4558. BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
  4559. BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
  4560. BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
  4561. BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
  4562. BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
  4563. BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
  4564. BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
  4565. BPF_EXIT_INSN(),
  4566. },
  4567. .fixup_map2 = { 3 },
  4568. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  4569. .result = ACCEPT,
  4570. .result_unpriv = REJECT,
  4571. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  4572. },
  4573. {
  4574. "map element value with unaligned load",
  4575. .insns = {
  4576. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4577. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4578. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4579. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4580. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4581. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
  4582. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  4583. BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
  4584. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
  4585. BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  4586. BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
  4587. BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
  4588. BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
  4589. BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
  4590. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
  4591. BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
  4592. BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
  4593. BPF_EXIT_INSN(),
  4594. },
  4595. .fixup_map2 = { 3 },
  4596. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  4597. .result = ACCEPT,
  4598. .result_unpriv = REJECT,
  4599. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  4600. },
  4601. {
  4602. "map element value illegal alu op, 1",
  4603. .insns = {
  4604. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4605. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4606. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4607. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4608. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4609. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  4610. BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
  4611. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
  4612. BPF_EXIT_INSN(),
  4613. },
  4614. .fixup_map2 = { 3 },
  4615. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  4616. .errstr = "invalid mem access 'inv'",
  4617. .result = REJECT,
  4618. .result_unpriv = REJECT,
  4619. },
  4620. {
  4621. "map element value illegal alu op, 2",
  4622. .insns = {
  4623. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4624. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4625. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4626. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4627. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4628. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  4629. BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
  4630. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
  4631. BPF_EXIT_INSN(),
  4632. },
  4633. .fixup_map2 = { 3 },
  4634. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  4635. .errstr = "invalid mem access 'inv'",
  4636. .result = REJECT,
  4637. .result_unpriv = REJECT,
  4638. },
  4639. {
  4640. "map element value illegal alu op, 3",
  4641. .insns = {
  4642. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4643. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4644. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4645. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4646. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4647. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  4648. BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
  4649. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
  4650. BPF_EXIT_INSN(),
  4651. },
  4652. .fixup_map2 = { 3 },
  4653. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  4654. .errstr = "invalid mem access 'inv'",
  4655. .result = REJECT,
  4656. .result_unpriv = REJECT,
  4657. },
  4658. {
  4659. "map element value illegal alu op, 4",
  4660. .insns = {
  4661. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4662. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4663. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4664. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4665. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4666. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
  4667. BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
  4668. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
  4669. BPF_EXIT_INSN(),
  4670. },
  4671. .fixup_map2 = { 3 },
  4672. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  4673. .errstr = "invalid mem access 'inv'",
  4674. .result = REJECT,
  4675. .result_unpriv = REJECT,
  4676. },
  4677. {
  4678. "map element value illegal alu op, 5",
  4679. .insns = {
  4680. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4681. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4682. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4683. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4684. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4685. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  4686. BPF_MOV64_IMM(BPF_REG_3, 4096),
  4687. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4688. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4689. BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
  4690. BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
  4691. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
  4692. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
  4693. BPF_EXIT_INSN(),
  4694. },
  4695. .fixup_map2 = { 3 },
  4696. .errstr_unpriv = "R0 invalid mem access 'inv'",
  4697. .errstr = "R0 invalid mem access 'inv'",
  4698. .result = REJECT,
  4699. .result_unpriv = REJECT,
  4700. },
  4701. {
  4702. "map element value is preserved across register spilling",
  4703. .insns = {
  4704. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4705. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4706. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4707. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4708. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4709. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  4710. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
  4711. offsetof(struct test_val, foo)),
  4712. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
  4713. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4714. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
  4715. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
  4716. BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
  4717. BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
  4718. BPF_EXIT_INSN(),
  4719. },
  4720. .fixup_map2 = { 3 },
  4721. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  4722. .result = ACCEPT,
  4723. .result_unpriv = REJECT,
  4724. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  4725. },
  4726. {
  4727. "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
  4728. .insns = {
  4729. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4730. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4731. BPF_MOV64_IMM(BPF_REG_0, 0),
  4732. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
  4733. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
  4734. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
  4735. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
  4736. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
  4737. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
  4738. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
  4739. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  4740. BPF_MOV64_IMM(BPF_REG_2, 16),
  4741. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4742. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4743. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
  4744. BPF_MOV64_IMM(BPF_REG_4, 0),
  4745. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  4746. BPF_MOV64_IMM(BPF_REG_3, 0),
  4747. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4748. BPF_MOV64_IMM(BPF_REG_0, 0),
  4749. BPF_EXIT_INSN(),
  4750. },
  4751. .result = ACCEPT,
  4752. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4753. },
  4754. {
  4755. "helper access to variable memory: stack, bitwise AND, zero included",
  4756. .insns = {
  4757. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4758. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4759. BPF_MOV64_IMM(BPF_REG_2, 16),
  4760. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4761. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4762. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
  4763. BPF_MOV64_IMM(BPF_REG_3, 0),
  4764. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4765. BPF_EXIT_INSN(),
  4766. },
  4767. .errstr = "invalid stack type R1 off=-64 access_size=0",
  4768. .result = REJECT,
  4769. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4770. },
  4771. {
  4772. "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
  4773. .insns = {
  4774. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4775. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4776. BPF_MOV64_IMM(BPF_REG_2, 16),
  4777. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4778. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4779. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
  4780. BPF_MOV64_IMM(BPF_REG_4, 0),
  4781. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  4782. BPF_MOV64_IMM(BPF_REG_3, 0),
  4783. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4784. BPF_MOV64_IMM(BPF_REG_0, 0),
  4785. BPF_EXIT_INSN(),
  4786. },
  4787. .errstr = "invalid stack type R1 off=-64 access_size=65",
  4788. .result = REJECT,
  4789. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4790. },
  4791. {
  4792. "helper access to variable memory: stack, JMP, correct bounds",
  4793. .insns = {
  4794. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4795. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4796. BPF_MOV64_IMM(BPF_REG_0, 0),
  4797. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
  4798. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
  4799. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
  4800. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
  4801. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
  4802. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
  4803. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
  4804. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  4805. BPF_MOV64_IMM(BPF_REG_2, 16),
  4806. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4807. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4808. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
  4809. BPF_MOV64_IMM(BPF_REG_4, 0),
  4810. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  4811. BPF_MOV64_IMM(BPF_REG_3, 0),
  4812. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4813. BPF_MOV64_IMM(BPF_REG_0, 0),
  4814. BPF_EXIT_INSN(),
  4815. },
  4816. .result = ACCEPT,
  4817. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4818. },
  4819. {
  4820. "helper access to variable memory: stack, JMP (signed), correct bounds",
  4821. .insns = {
  4822. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4823. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4824. BPF_MOV64_IMM(BPF_REG_0, 0),
  4825. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
  4826. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
  4827. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
  4828. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
  4829. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
  4830. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
  4831. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
  4832. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  4833. BPF_MOV64_IMM(BPF_REG_2, 16),
  4834. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4835. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4836. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
  4837. BPF_MOV64_IMM(BPF_REG_4, 0),
  4838. BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
  4839. BPF_MOV64_IMM(BPF_REG_3, 0),
  4840. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4841. BPF_MOV64_IMM(BPF_REG_0, 0),
  4842. BPF_EXIT_INSN(),
  4843. },
  4844. .result = ACCEPT,
  4845. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4846. },
  4847. {
  4848. "helper access to variable memory: stack, JMP, bounds + offset",
  4849. .insns = {
  4850. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4851. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4852. BPF_MOV64_IMM(BPF_REG_2, 16),
  4853. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4854. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4855. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
  4856. BPF_MOV64_IMM(BPF_REG_4, 0),
  4857. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
  4858. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  4859. BPF_MOV64_IMM(BPF_REG_3, 0),
  4860. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4861. BPF_MOV64_IMM(BPF_REG_0, 0),
  4862. BPF_EXIT_INSN(),
  4863. },
  4864. .errstr = "invalid stack type R1 off=-64 access_size=65",
  4865. .result = REJECT,
  4866. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4867. },
  4868. {
  4869. "helper access to variable memory: stack, JMP, wrong max",
  4870. .insns = {
  4871. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4872. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4873. BPF_MOV64_IMM(BPF_REG_2, 16),
  4874. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4875. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4876. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
  4877. BPF_MOV64_IMM(BPF_REG_4, 0),
  4878. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  4879. BPF_MOV64_IMM(BPF_REG_3, 0),
  4880. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4881. BPF_MOV64_IMM(BPF_REG_0, 0),
  4882. BPF_EXIT_INSN(),
  4883. },
  4884. .errstr = "invalid stack type R1 off=-64 access_size=65",
  4885. .result = REJECT,
  4886. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4887. },
  4888. {
  4889. "helper access to variable memory: stack, JMP, no max check",
  4890. .insns = {
  4891. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4892. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4893. BPF_MOV64_IMM(BPF_REG_2, 16),
  4894. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4895. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4896. BPF_MOV64_IMM(BPF_REG_4, 0),
  4897. BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
  4898. BPF_MOV64_IMM(BPF_REG_3, 0),
  4899. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4900. BPF_MOV64_IMM(BPF_REG_0, 0),
  4901. BPF_EXIT_INSN(),
  4902. },
  4903. .errstr = "R2 unbounded memory access",
  4904. .result = REJECT,
  4905. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4906. },
  4907. {
  4908. "helper access to variable memory: stack, JMP, no min check",
  4909. .insns = {
  4910. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4911. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4912. BPF_MOV64_IMM(BPF_REG_2, 16),
  4913. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4914. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4915. BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
  4916. BPF_MOV64_IMM(BPF_REG_3, 0),
  4917. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4918. BPF_MOV64_IMM(BPF_REG_0, 0),
  4919. BPF_EXIT_INSN(),
  4920. },
  4921. .errstr = "invalid stack type R1 off=-64 access_size=0",
  4922. .result = REJECT,
  4923. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4924. },
  4925. {
  4926. "helper access to variable memory: stack, JMP (signed), no min check",
  4927. .insns = {
  4928. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  4929. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  4930. BPF_MOV64_IMM(BPF_REG_2, 16),
  4931. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
  4932. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
  4933. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
  4934. BPF_MOV64_IMM(BPF_REG_3, 0),
  4935. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4936. BPF_MOV64_IMM(BPF_REG_0, 0),
  4937. BPF_EXIT_INSN(),
  4938. },
  4939. .errstr = "R2 min value is negative",
  4940. .result = REJECT,
  4941. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4942. },
  4943. {
  4944. "helper access to variable memory: map, JMP, correct bounds",
  4945. .insns = {
  4946. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4947. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4948. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4949. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4950. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4951. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
  4952. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4953. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
  4954. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  4955. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  4956. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
  4957. sizeof(struct test_val), 4),
  4958. BPF_MOV64_IMM(BPF_REG_4, 0),
  4959. BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
  4960. BPF_MOV64_IMM(BPF_REG_3, 0),
  4961. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4962. BPF_MOV64_IMM(BPF_REG_0, 0),
  4963. BPF_EXIT_INSN(),
  4964. },
  4965. .fixup_map2 = { 3 },
  4966. .result = ACCEPT,
  4967. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4968. },
  4969. {
  4970. "helper access to variable memory: map, JMP, wrong max",
  4971. .insns = {
  4972. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  4973. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  4974. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  4975. BPF_LD_MAP_FD(BPF_REG_1, 0),
  4976. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  4977. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
  4978. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  4979. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
  4980. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  4981. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  4982. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
  4983. sizeof(struct test_val) + 1, 4),
  4984. BPF_MOV64_IMM(BPF_REG_4, 0),
  4985. BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
  4986. BPF_MOV64_IMM(BPF_REG_3, 0),
  4987. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  4988. BPF_MOV64_IMM(BPF_REG_0, 0),
  4989. BPF_EXIT_INSN(),
  4990. },
  4991. .fixup_map2 = { 3 },
  4992. .errstr = "invalid access to map value, value_size=48 off=0 size=49",
  4993. .result = REJECT,
  4994. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  4995. },
  4996. {
  4997. "helper access to variable memory: map adjusted, JMP, correct bounds",
  4998. .insns = {
  4999. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5000. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5001. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5002. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5003. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5004. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
  5005. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5006. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
  5007. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
  5008. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  5009. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  5010. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
  5011. sizeof(struct test_val) - 20, 4),
  5012. BPF_MOV64_IMM(BPF_REG_4, 0),
  5013. BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
  5014. BPF_MOV64_IMM(BPF_REG_3, 0),
  5015. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5016. BPF_MOV64_IMM(BPF_REG_0, 0),
  5017. BPF_EXIT_INSN(),
  5018. },
  5019. .fixup_map2 = { 3 },
  5020. .result = ACCEPT,
  5021. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5022. },
  5023. {
  5024. "helper access to variable memory: map adjusted, JMP, wrong max",
  5025. .insns = {
  5026. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5027. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5028. BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
  5029. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5030. BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
  5031. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
  5032. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5033. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
  5034. BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
  5035. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  5036. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  5037. BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
  5038. sizeof(struct test_val) - 19, 4),
  5039. BPF_MOV64_IMM(BPF_REG_4, 0),
  5040. BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
  5041. BPF_MOV64_IMM(BPF_REG_3, 0),
  5042. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5043. BPF_MOV64_IMM(BPF_REG_0, 0),
  5044. BPF_EXIT_INSN(),
  5045. },
  5046. .fixup_map2 = { 3 },
  5047. .errstr = "R1 min value is outside of the array range",
  5048. .result = REJECT,
  5049. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5050. },
  5051. {
  5052. "helper access to variable memory: size > 0 not allowed on NULL",
  5053. .insns = {
  5054. BPF_MOV64_IMM(BPF_REG_1, 0),
  5055. BPF_MOV64_IMM(BPF_REG_2, 0),
  5056. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  5057. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  5058. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
  5059. BPF_MOV64_IMM(BPF_REG_3, 0),
  5060. BPF_MOV64_IMM(BPF_REG_4, 0),
  5061. BPF_MOV64_IMM(BPF_REG_5, 0),
  5062. BPF_EMIT_CALL(BPF_FUNC_csum_diff),
  5063. BPF_EXIT_INSN(),
  5064. },
  5065. .errstr = "R1 type=imm expected=fp",
  5066. .result = REJECT,
  5067. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  5068. },
  5069. {
  5070. "helper access to variable memory: size = 0 not allowed on != NULL",
  5071. .insns = {
  5072. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  5073. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
  5074. BPF_MOV64_IMM(BPF_REG_2, 0),
  5075. BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
  5076. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
  5077. BPF_MOV64_IMM(BPF_REG_3, 0),
  5078. BPF_MOV64_IMM(BPF_REG_4, 0),
  5079. BPF_MOV64_IMM(BPF_REG_5, 0),
  5080. BPF_EMIT_CALL(BPF_FUNC_csum_diff),
  5081. BPF_EXIT_INSN(),
  5082. },
  5083. .errstr = "invalid stack type R1 off=-8 access_size=0",
  5084. .result = REJECT,
  5085. .prog_type = BPF_PROG_TYPE_SCHED_CLS,
  5086. },
  5087. {
  5088. "helper access to variable memory: 8 bytes leak",
  5089. .insns = {
  5090. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  5091. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  5092. BPF_MOV64_IMM(BPF_REG_0, 0),
  5093. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
  5094. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
  5095. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
  5096. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
  5097. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
  5098. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
  5099. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  5100. BPF_MOV64_IMM(BPF_REG_2, 0),
  5101. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
  5102. BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
  5103. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
  5104. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
  5105. BPF_MOV64_IMM(BPF_REG_3, 0),
  5106. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5107. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5108. BPF_EXIT_INSN(),
  5109. },
  5110. .errstr = "invalid indirect read from stack off -64+32 size 64",
  5111. .result = REJECT,
  5112. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5113. },
  5114. {
  5115. "helper access to variable memory: 8 bytes no leak (init memory)",
  5116. .insns = {
  5117. BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  5118. BPF_MOV64_IMM(BPF_REG_0, 0),
  5119. BPF_MOV64_IMM(BPF_REG_0, 0),
  5120. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
  5121. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
  5122. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
  5123. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
  5124. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
  5125. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
  5126. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
  5127. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
  5128. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
  5129. BPF_MOV64_IMM(BPF_REG_2, 0),
  5130. BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
  5131. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
  5132. BPF_MOV64_IMM(BPF_REG_3, 0),
  5133. BPF_EMIT_CALL(BPF_FUNC_probe_read),
  5134. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5135. BPF_EXIT_INSN(),
  5136. },
  5137. .result = ACCEPT,
  5138. .prog_type = BPF_PROG_TYPE_TRACEPOINT,
  5139. },
  5140. {
  5141. "invalid and of negative number",
  5142. .insns = {
  5143. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5144. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5145. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5146. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5147. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5148. BPF_FUNC_map_lookup_elem),
  5149. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  5150. BPF_MOV64_IMM(BPF_REG_1, 6),
  5151. BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
  5152. BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
  5153. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  5154. BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
  5155. offsetof(struct test_val, foo)),
  5156. BPF_EXIT_INSN(),
  5157. },
  5158. .fixup_map2 = { 3 },
  5159. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5160. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  5161. .result = REJECT,
  5162. .result_unpriv = REJECT,
  5163. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  5164. },
  5165. {
  5166. "invalid range check",
  5167. .insns = {
  5168. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5169. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5170. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5171. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5172. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5173. BPF_FUNC_map_lookup_elem),
  5174. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
  5175. BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
  5176. BPF_MOV64_IMM(BPF_REG_9, 1),
  5177. BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
  5178. BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
  5179. BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
  5180. BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
  5181. BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
  5182. BPF_MOV32_IMM(BPF_REG_3, 1),
  5183. BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
  5184. BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
  5185. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
  5186. BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
  5187. BPF_MOV64_REG(BPF_REG_0, 0),
  5188. BPF_EXIT_INSN(),
  5189. },
  5190. .fixup_map2 = { 3 },
  5191. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5192. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  5193. .result = REJECT,
  5194. .result_unpriv = REJECT,
  5195. .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
  5196. },
  5197. {
  5198. "map in map access",
  5199. .insns = {
  5200. BPF_ST_MEM(0, BPF_REG_10, -4, 0),
  5201. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5202. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
  5203. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5204. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5205. BPF_FUNC_map_lookup_elem),
  5206. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
  5207. BPF_ST_MEM(0, BPF_REG_10, -4, 0),
  5208. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5209. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
  5210. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5211. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5212. BPF_FUNC_map_lookup_elem),
  5213. BPF_MOV64_REG(BPF_REG_0, 0),
  5214. BPF_EXIT_INSN(),
  5215. },
  5216. .fixup_map_in_map = { 3 },
  5217. .result = ACCEPT,
  5218. },
  5219. {
  5220. "invalid inner map pointer",
  5221. .insns = {
  5222. BPF_ST_MEM(0, BPF_REG_10, -4, 0),
  5223. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5224. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
  5225. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5226. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5227. BPF_FUNC_map_lookup_elem),
  5228. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  5229. BPF_ST_MEM(0, BPF_REG_10, -4, 0),
  5230. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5231. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
  5232. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5233. BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
  5234. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5235. BPF_FUNC_map_lookup_elem),
  5236. BPF_MOV64_REG(BPF_REG_0, 0),
  5237. BPF_EXIT_INSN(),
  5238. },
  5239. .fixup_map_in_map = { 3 },
  5240. .errstr = "R1 type=inv expected=map_ptr",
  5241. .errstr_unpriv = "R1 pointer arithmetic prohibited",
  5242. .result = REJECT,
  5243. },
  5244. {
  5245. "forgot null checking on the inner map pointer",
  5246. .insns = {
  5247. BPF_ST_MEM(0, BPF_REG_10, -4, 0),
  5248. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5249. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
  5250. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5251. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5252. BPF_FUNC_map_lookup_elem),
  5253. BPF_ST_MEM(0, BPF_REG_10, -4, 0),
  5254. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5255. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
  5256. BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
  5257. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5258. BPF_FUNC_map_lookup_elem),
  5259. BPF_MOV64_REG(BPF_REG_0, 0),
  5260. BPF_EXIT_INSN(),
  5261. },
  5262. .fixup_map_in_map = { 3 },
  5263. .errstr = "R1 type=map_value_or_null expected=map_ptr",
  5264. .result = REJECT,
  5265. },
  5266. {
  5267. "ld_abs: check calling conv, r1",
  5268. .insns = {
  5269. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  5270. BPF_MOV64_IMM(BPF_REG_1, 0),
  5271. BPF_LD_ABS(BPF_W, -0x200000),
  5272. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  5273. BPF_EXIT_INSN(),
  5274. },
  5275. .errstr = "R1 !read_ok",
  5276. .result = REJECT,
  5277. },
  5278. {
  5279. "ld_abs: check calling conv, r2",
  5280. .insns = {
  5281. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  5282. BPF_MOV64_IMM(BPF_REG_2, 0),
  5283. BPF_LD_ABS(BPF_W, -0x200000),
  5284. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  5285. BPF_EXIT_INSN(),
  5286. },
  5287. .errstr = "R2 !read_ok",
  5288. .result = REJECT,
  5289. },
  5290. {
  5291. "ld_abs: check calling conv, r3",
  5292. .insns = {
  5293. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  5294. BPF_MOV64_IMM(BPF_REG_3, 0),
  5295. BPF_LD_ABS(BPF_W, -0x200000),
  5296. BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
  5297. BPF_EXIT_INSN(),
  5298. },
  5299. .errstr = "R3 !read_ok",
  5300. .result = REJECT,
  5301. },
  5302. {
  5303. "ld_abs: check calling conv, r4",
  5304. .insns = {
  5305. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  5306. BPF_MOV64_IMM(BPF_REG_4, 0),
  5307. BPF_LD_ABS(BPF_W, -0x200000),
  5308. BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
  5309. BPF_EXIT_INSN(),
  5310. },
  5311. .errstr = "R4 !read_ok",
  5312. .result = REJECT,
  5313. },
  5314. {
  5315. "ld_abs: check calling conv, r5",
  5316. .insns = {
  5317. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  5318. BPF_MOV64_IMM(BPF_REG_5, 0),
  5319. BPF_LD_ABS(BPF_W, -0x200000),
  5320. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  5321. BPF_EXIT_INSN(),
  5322. },
  5323. .errstr = "R5 !read_ok",
  5324. .result = REJECT,
  5325. },
  5326. {
  5327. "ld_abs: check calling conv, r7",
  5328. .insns = {
  5329. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  5330. BPF_MOV64_IMM(BPF_REG_7, 0),
  5331. BPF_LD_ABS(BPF_W, -0x200000),
  5332. BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
  5333. BPF_EXIT_INSN(),
  5334. },
  5335. .result = ACCEPT,
  5336. },
  5337. {
  5338. "ld_ind: check calling conv, r1",
  5339. .insns = {
  5340. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  5341. BPF_MOV64_IMM(BPF_REG_1, 1),
  5342. BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
  5343. BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
  5344. BPF_EXIT_INSN(),
  5345. },
  5346. .errstr = "R1 !read_ok",
  5347. .result = REJECT,
  5348. },
  5349. {
  5350. "ld_ind: check calling conv, r2",
  5351. .insns = {
  5352. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  5353. BPF_MOV64_IMM(BPF_REG_2, 1),
  5354. BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
  5355. BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
  5356. BPF_EXIT_INSN(),
  5357. },
  5358. .errstr = "R2 !read_ok",
  5359. .result = REJECT,
  5360. },
  5361. {
  5362. "ld_ind: check calling conv, r3",
  5363. .insns = {
  5364. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  5365. BPF_MOV64_IMM(BPF_REG_3, 1),
  5366. BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
  5367. BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
  5368. BPF_EXIT_INSN(),
  5369. },
  5370. .errstr = "R3 !read_ok",
  5371. .result = REJECT,
  5372. },
  5373. {
  5374. "ld_ind: check calling conv, r4",
  5375. .insns = {
  5376. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  5377. BPF_MOV64_IMM(BPF_REG_4, 1),
  5378. BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
  5379. BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
  5380. BPF_EXIT_INSN(),
  5381. },
  5382. .errstr = "R4 !read_ok",
  5383. .result = REJECT,
  5384. },
  5385. {
  5386. "ld_ind: check calling conv, r5",
  5387. .insns = {
  5388. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  5389. BPF_MOV64_IMM(BPF_REG_5, 1),
  5390. BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
  5391. BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
  5392. BPF_EXIT_INSN(),
  5393. },
  5394. .errstr = "R5 !read_ok",
  5395. .result = REJECT,
  5396. },
  5397. {
  5398. "ld_ind: check calling conv, r7",
  5399. .insns = {
  5400. BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
  5401. BPF_MOV64_IMM(BPF_REG_7, 1),
  5402. BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
  5403. BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
  5404. BPF_EXIT_INSN(),
  5405. },
  5406. .result = ACCEPT,
  5407. },
  5408. {
  5409. "check bpf_perf_event_data->sample_period byte load permitted",
  5410. .insns = {
  5411. BPF_MOV64_IMM(BPF_REG_0, 0),
  5412. #ifdef __LITTLE_ENDIAN
  5413. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  5414. offsetof(struct bpf_perf_event_data, sample_period)),
  5415. #else
  5416. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
  5417. offsetof(struct bpf_perf_event_data, sample_period) + 7),
  5418. #endif
  5419. BPF_EXIT_INSN(),
  5420. },
  5421. .result = ACCEPT,
  5422. .prog_type = BPF_PROG_TYPE_PERF_EVENT,
  5423. },
  5424. {
  5425. "check bpf_perf_event_data->sample_period half load permitted",
  5426. .insns = {
  5427. BPF_MOV64_IMM(BPF_REG_0, 0),
  5428. #ifdef __LITTLE_ENDIAN
  5429. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  5430. offsetof(struct bpf_perf_event_data, sample_period)),
  5431. #else
  5432. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  5433. offsetof(struct bpf_perf_event_data, sample_period) + 6),
  5434. #endif
  5435. BPF_EXIT_INSN(),
  5436. },
  5437. .result = ACCEPT,
  5438. .prog_type = BPF_PROG_TYPE_PERF_EVENT,
  5439. },
  5440. {
  5441. "check bpf_perf_event_data->sample_period word load permitted",
  5442. .insns = {
  5443. BPF_MOV64_IMM(BPF_REG_0, 0),
  5444. #ifdef __LITTLE_ENDIAN
  5445. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  5446. offsetof(struct bpf_perf_event_data, sample_period)),
  5447. #else
  5448. BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
  5449. offsetof(struct bpf_perf_event_data, sample_period) + 4),
  5450. #endif
  5451. BPF_EXIT_INSN(),
  5452. },
  5453. .result = ACCEPT,
  5454. .prog_type = BPF_PROG_TYPE_PERF_EVENT,
  5455. },
  5456. {
  5457. "check bpf_perf_event_data->sample_period dword load permitted",
  5458. .insns = {
  5459. BPF_MOV64_IMM(BPF_REG_0, 0),
  5460. BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
  5461. offsetof(struct bpf_perf_event_data, sample_period)),
  5462. BPF_EXIT_INSN(),
  5463. },
  5464. .result = ACCEPT,
  5465. .prog_type = BPF_PROG_TYPE_PERF_EVENT,
  5466. },
  5467. {
  5468. "check skb->data half load not permitted",
  5469. .insns = {
  5470. BPF_MOV64_IMM(BPF_REG_0, 0),
  5471. #ifdef __LITTLE_ENDIAN
  5472. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  5473. offsetof(struct __sk_buff, data)),
  5474. #else
  5475. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  5476. offsetof(struct __sk_buff, data) + 2),
  5477. #endif
  5478. BPF_EXIT_INSN(),
  5479. },
  5480. .result = REJECT,
  5481. .errstr = "invalid bpf_context access",
  5482. },
  5483. {
  5484. "check skb->tc_classid half load not permitted for lwt prog",
  5485. .insns = {
  5486. BPF_MOV64_IMM(BPF_REG_0, 0),
  5487. #ifdef __LITTLE_ENDIAN
  5488. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  5489. offsetof(struct __sk_buff, tc_classid)),
  5490. #else
  5491. BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
  5492. offsetof(struct __sk_buff, tc_classid) + 2),
  5493. #endif
  5494. BPF_EXIT_INSN(),
  5495. },
  5496. .result = REJECT,
  5497. .errstr = "invalid bpf_context access",
  5498. .prog_type = BPF_PROG_TYPE_LWT_IN,
  5499. },
  5500. {
  5501. "bounds checks mixing signed and unsigned, positive bounds",
  5502. .insns = {
  5503. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5504. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5505. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5506. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5507. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5508. BPF_FUNC_map_lookup_elem),
  5509. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  5510. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5511. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5512. BPF_MOV64_IMM(BPF_REG_2, 2),
  5513. BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
  5514. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
  5515. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  5516. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  5517. BPF_MOV64_IMM(BPF_REG_0, 0),
  5518. BPF_EXIT_INSN(),
  5519. },
  5520. .fixup_map1 = { 3 },
  5521. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5522. .errstr = "R0 min value is negative",
  5523. .result = REJECT,
  5524. .result_unpriv = REJECT,
  5525. },
  5526. {
  5527. "bounds checks mixing signed and unsigned",
  5528. .insns = {
  5529. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5530. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5531. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5532. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5533. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5534. BPF_FUNC_map_lookup_elem),
  5535. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  5536. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5537. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5538. BPF_MOV64_IMM(BPF_REG_2, -1),
  5539. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
  5540. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  5541. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  5542. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  5543. BPF_MOV64_IMM(BPF_REG_0, 0),
  5544. BPF_EXIT_INSN(),
  5545. },
  5546. .fixup_map1 = { 3 },
  5547. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5548. .errstr = "R0 min value is negative",
  5549. .result = REJECT,
  5550. .result_unpriv = REJECT,
  5551. },
  5552. {
  5553. "bounds checks mixing signed and unsigned, variant 2",
  5554. .insns = {
  5555. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5556. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5557. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5558. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5559. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5560. BPF_FUNC_map_lookup_elem),
  5561. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  5562. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5563. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5564. BPF_MOV64_IMM(BPF_REG_2, -1),
  5565. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
  5566. BPF_MOV64_IMM(BPF_REG_8, 0),
  5567. BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
  5568. BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
  5569. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
  5570. BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
  5571. BPF_MOV64_IMM(BPF_REG_0, 0),
  5572. BPF_EXIT_INSN(),
  5573. },
  5574. .fixup_map1 = { 3 },
  5575. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5576. .errstr = "R8 invalid mem access 'inv'",
  5577. .result = REJECT,
  5578. .result_unpriv = REJECT,
  5579. },
  5580. {
  5581. "bounds checks mixing signed and unsigned, variant 3",
  5582. .insns = {
  5583. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5584. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5585. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5586. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5587. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5588. BPF_FUNC_map_lookup_elem),
  5589. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
  5590. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5591. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5592. BPF_MOV64_IMM(BPF_REG_2, -1),
  5593. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
  5594. BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
  5595. BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
  5596. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
  5597. BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
  5598. BPF_MOV64_IMM(BPF_REG_0, 0),
  5599. BPF_EXIT_INSN(),
  5600. },
  5601. .fixup_map1 = { 3 },
  5602. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5603. .errstr = "R8 invalid mem access 'inv'",
  5604. .result = REJECT,
  5605. .result_unpriv = REJECT,
  5606. },
  5607. {
  5608. "bounds checks mixing signed and unsigned, variant 4",
  5609. .insns = {
  5610. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5611. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5612. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5613. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5614. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5615. BPF_FUNC_map_lookup_elem),
  5616. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  5617. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5618. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5619. BPF_MOV64_IMM(BPF_REG_2, 1),
  5620. BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
  5621. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  5622. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  5623. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  5624. BPF_MOV64_IMM(BPF_REG_0, 0),
  5625. BPF_EXIT_INSN(),
  5626. },
  5627. .fixup_map1 = { 3 },
  5628. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5629. .errstr = "R0 min value is negative",
  5630. .result = REJECT,
  5631. .result_unpriv = REJECT,
  5632. },
  5633. {
  5634. "bounds checks mixing signed and unsigned, variant 5",
  5635. .insns = {
  5636. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5637. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5638. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5639. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5640. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5641. BPF_FUNC_map_lookup_elem),
  5642. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  5643. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5644. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5645. BPF_MOV64_IMM(BPF_REG_2, -1),
  5646. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
  5647. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
  5648. BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
  5649. BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
  5650. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  5651. BPF_MOV64_IMM(BPF_REG_0, 0),
  5652. BPF_EXIT_INSN(),
  5653. },
  5654. .fixup_map1 = { 3 },
  5655. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5656. .errstr = "R0 invalid mem access",
  5657. .result = REJECT,
  5658. .result_unpriv = REJECT,
  5659. },
  5660. {
  5661. "bounds checks mixing signed and unsigned, variant 6",
  5662. .insns = {
  5663. BPF_MOV64_IMM(BPF_REG_2, 0),
  5664. BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
  5665. BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
  5666. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5667. BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
  5668. BPF_MOV64_IMM(BPF_REG_6, -1),
  5669. BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
  5670. BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
  5671. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
  5672. BPF_MOV64_IMM(BPF_REG_5, 0),
  5673. BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
  5674. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5675. BPF_FUNC_skb_load_bytes),
  5676. BPF_MOV64_IMM(BPF_REG_0, 0),
  5677. BPF_EXIT_INSN(),
  5678. },
  5679. .errstr_unpriv = "R4 min value is negative, either use unsigned",
  5680. .errstr = "R4 min value is negative, either use unsigned",
  5681. .result = REJECT,
  5682. .result_unpriv = REJECT,
  5683. },
  5684. {
  5685. "bounds checks mixing signed and unsigned, variant 7",
  5686. .insns = {
  5687. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5688. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5689. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5690. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5691. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5692. BPF_FUNC_map_lookup_elem),
  5693. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  5694. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5695. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5696. BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
  5697. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
  5698. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  5699. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  5700. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  5701. BPF_MOV64_IMM(BPF_REG_0, 0),
  5702. BPF_EXIT_INSN(),
  5703. },
  5704. .fixup_map1 = { 3 },
  5705. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5706. .errstr = "R0 min value is negative",
  5707. .result = REJECT,
  5708. .result_unpriv = REJECT,
  5709. },
  5710. {
  5711. "bounds checks mixing signed and unsigned, variant 8",
  5712. .insns = {
  5713. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5714. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5715. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5716. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5717. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5718. BPF_FUNC_map_lookup_elem),
  5719. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
  5720. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5721. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5722. BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024 + 1),
  5723. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
  5724. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  5725. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  5726. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  5727. BPF_MOV64_IMM(BPF_REG_0, 0),
  5728. BPF_EXIT_INSN(),
  5729. },
  5730. .fixup_map1 = { 3 },
  5731. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5732. .errstr = "R0 min value is negative",
  5733. .result = REJECT,
  5734. .result_unpriv = REJECT,
  5735. },
  5736. {
  5737. "bounds checks mixing signed and unsigned, variant 9",
  5738. .insns = {
  5739. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5740. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5741. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5742. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5743. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5744. BPF_FUNC_map_lookup_elem),
  5745. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  5746. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5747. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5748. BPF_MOV64_IMM(BPF_REG_2, -1),
  5749. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
  5750. BPF_MOV64_IMM(BPF_REG_0, 0),
  5751. BPF_EXIT_INSN(),
  5752. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  5753. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  5754. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  5755. BPF_MOV64_IMM(BPF_REG_0, 0),
  5756. BPF_EXIT_INSN(),
  5757. },
  5758. .fixup_map1 = { 3 },
  5759. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5760. .errstr = "R0 min value is negative",
  5761. .result = REJECT,
  5762. .result_unpriv = REJECT,
  5763. },
  5764. {
  5765. "bounds checks mixing signed and unsigned, variant 10",
  5766. .insns = {
  5767. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5768. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5769. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5770. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5771. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5772. BPF_FUNC_map_lookup_elem),
  5773. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
  5774. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5775. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5776. BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
  5777. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
  5778. BPF_MOV64_IMM(BPF_REG_0, 0),
  5779. BPF_EXIT_INSN(),
  5780. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  5781. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  5782. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  5783. BPF_MOV64_IMM(BPF_REG_0, 0),
  5784. BPF_EXIT_INSN(),
  5785. },
  5786. .fixup_map1 = { 3 },
  5787. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5788. .errstr = "R0 min value is negative",
  5789. .result = REJECT,
  5790. .result_unpriv = REJECT,
  5791. },
  5792. {
  5793. "bounds checks mixing signed and unsigned, variant 11",
  5794. .insns = {
  5795. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5796. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5797. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5798. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5799. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5800. BPF_FUNC_map_lookup_elem),
  5801. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  5802. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5803. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5804. BPF_MOV64_IMM(BPF_REG_2, 0),
  5805. BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
  5806. BPF_MOV64_IMM(BPF_REG_0, 0),
  5807. BPF_EXIT_INSN(),
  5808. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  5809. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  5810. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  5811. BPF_MOV64_IMM(BPF_REG_0, 0),
  5812. BPF_EXIT_INSN(),
  5813. },
  5814. .fixup_map1 = { 3 },
  5815. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5816. .errstr = "R0 min value is negative",
  5817. .result = REJECT,
  5818. .result_unpriv = REJECT,
  5819. },
  5820. {
  5821. "bounds checks mixing signed and unsigned, variant 12",
  5822. .insns = {
  5823. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5824. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5825. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5826. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5827. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5828. BPF_FUNC_map_lookup_elem),
  5829. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  5830. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5831. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5832. BPF_MOV64_IMM(BPF_REG_2, -1),
  5833. BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
  5834. /* Dead branch. */
  5835. BPF_MOV64_IMM(BPF_REG_0, 0),
  5836. BPF_EXIT_INSN(),
  5837. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  5838. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  5839. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  5840. BPF_MOV64_IMM(BPF_REG_0, 0),
  5841. BPF_EXIT_INSN(),
  5842. },
  5843. .fixup_map1 = { 3 },
  5844. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5845. .errstr = "R0 min value is negative",
  5846. .result = REJECT,
  5847. .result_unpriv = REJECT,
  5848. },
  5849. {
  5850. "bounds checks mixing signed and unsigned, variant 13",
  5851. .insns = {
  5852. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5853. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5854. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5855. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5856. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5857. BPF_FUNC_map_lookup_elem),
  5858. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  5859. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5860. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5861. BPF_MOV64_IMM(BPF_REG_2, -6),
  5862. BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
  5863. BPF_MOV64_IMM(BPF_REG_0, 0),
  5864. BPF_EXIT_INSN(),
  5865. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  5866. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  5867. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  5868. BPF_MOV64_IMM(BPF_REG_0, 0),
  5869. BPF_EXIT_INSN(),
  5870. },
  5871. .fixup_map1 = { 3 },
  5872. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5873. .errstr = "R0 min value is negative",
  5874. .result = REJECT,
  5875. .result_unpriv = REJECT,
  5876. },
  5877. {
  5878. "bounds checks mixing signed and unsigned, variant 14",
  5879. .insns = {
  5880. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5881. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5882. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5883. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5884. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5885. BPF_FUNC_map_lookup_elem),
  5886. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
  5887. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5888. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5889. BPF_MOV64_IMM(BPF_REG_2, 2),
  5890. BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
  5891. BPF_MOV64_IMM(BPF_REG_7, 1),
  5892. BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
  5893. BPF_MOV64_IMM(BPF_REG_0, 0),
  5894. BPF_EXIT_INSN(),
  5895. BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
  5896. BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
  5897. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
  5898. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  5899. BPF_MOV64_IMM(BPF_REG_0, 0),
  5900. BPF_EXIT_INSN(),
  5901. },
  5902. .fixup_map1 = { 3 },
  5903. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5904. .errstr = "R0 min value is negative",
  5905. .result = REJECT,
  5906. .result_unpriv = REJECT,
  5907. },
  5908. {
  5909. "bounds checks mixing signed and unsigned, variant 15",
  5910. .insns = {
  5911. BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
  5912. offsetof(struct __sk_buff, mark)),
  5913. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5914. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5915. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5916. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5917. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5918. BPF_FUNC_map_lookup_elem),
  5919. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
  5920. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5921. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5922. BPF_MOV64_IMM(BPF_REG_2, -1),
  5923. BPF_MOV64_IMM(BPF_REG_8, 2),
  5924. BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
  5925. BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
  5926. BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
  5927. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  5928. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  5929. BPF_MOV64_IMM(BPF_REG_0, 0),
  5930. BPF_EXIT_INSN(),
  5931. BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
  5932. BPF_JMP_IMM(BPF_JA, 0, 0, -7),
  5933. },
  5934. .fixup_map1 = { 4 },
  5935. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5936. .errstr = "R0 min value is negative",
  5937. .result = REJECT,
  5938. .result_unpriv = REJECT,
  5939. },
  5940. {
  5941. "bounds checks mixing signed and unsigned, variant 16",
  5942. .insns = {
  5943. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5944. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5945. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5946. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5947. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5948. BPF_FUNC_map_lookup_elem),
  5949. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
  5950. BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
  5951. BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
  5952. BPF_MOV64_IMM(BPF_REG_2, -6),
  5953. BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
  5954. BPF_MOV64_IMM(BPF_REG_0, 0),
  5955. BPF_EXIT_INSN(),
  5956. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  5957. BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
  5958. BPF_MOV64_IMM(BPF_REG_0, 0),
  5959. BPF_EXIT_INSN(),
  5960. BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
  5961. BPF_MOV64_IMM(BPF_REG_0, 0),
  5962. BPF_EXIT_INSN(),
  5963. },
  5964. .fixup_map1 = { 3 },
  5965. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5966. .errstr = "R0 min value is negative",
  5967. .result = REJECT,
  5968. .result_unpriv = REJECT,
  5969. },
  5970. {
  5971. "subtraction bounds (map value)",
  5972. .insns = {
  5973. BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
  5974. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
  5975. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
  5976. BPF_LD_MAP_FD(BPF_REG_1, 0),
  5977. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  5978. BPF_FUNC_map_lookup_elem),
  5979. BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
  5980. BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
  5981. BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
  5982. BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
  5983. BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
  5984. BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
  5985. BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
  5986. BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
  5987. BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
  5988. BPF_EXIT_INSN(),
  5989. BPF_MOV64_IMM(BPF_REG_0, 0),
  5990. BPF_EXIT_INSN(),
  5991. },
  5992. .fixup_map1 = { 3 },
  5993. .errstr_unpriv = "R0 pointer arithmetic prohibited",
  5994. .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
  5995. .result = REJECT,
  5996. .result_unpriv = REJECT,
  5997. },
  5998. };
  5999. static int probe_filter_length(const struct bpf_insn *fp)
  6000. {
  6001. int len;
  6002. for (len = MAX_INSNS - 1; len > 0; --len)
  6003. if (fp[len].code != 0 || fp[len].imm != 0)
  6004. break;
  6005. return len + 1;
  6006. }
  6007. static int create_map(uint32_t size_value, uint32_t max_elem)
  6008. {
  6009. int fd;
  6010. fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
  6011. size_value, max_elem, BPF_F_NO_PREALLOC);
  6012. if (fd < 0)
  6013. printf("Failed to create hash map '%s'!\n", strerror(errno));
  6014. return fd;
  6015. }
  6016. static int create_prog_array(void)
  6017. {
  6018. int fd;
  6019. fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
  6020. sizeof(int), 4, 0);
  6021. if (fd < 0)
  6022. printf("Failed to create prog array '%s'!\n", strerror(errno));
  6023. return fd;
  6024. }
  6025. static int create_map_in_map(void)
  6026. {
  6027. int inner_map_fd, outer_map_fd;
  6028. inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
  6029. sizeof(int), 1, 0);
  6030. if (inner_map_fd < 0) {
  6031. printf("Failed to create array '%s'!\n", strerror(errno));
  6032. return inner_map_fd;
  6033. }
  6034. outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS,
  6035. sizeof(int), inner_map_fd, 1, 0);
  6036. if (outer_map_fd < 0)
  6037. printf("Failed to create array of maps '%s'!\n",
  6038. strerror(errno));
  6039. close(inner_map_fd);
  6040. return outer_map_fd;
  6041. }
  6042. static char bpf_vlog[32768];
  6043. static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
  6044. int *map_fds)
  6045. {
  6046. int *fixup_map1 = test->fixup_map1;
  6047. int *fixup_map2 = test->fixup_map2;
  6048. int *fixup_prog = test->fixup_prog;
  6049. int *fixup_map_in_map = test->fixup_map_in_map;
  6050. /* Allocating HTs with 1 elem is fine here, since we only test
  6051. * for verifier and not do a runtime lookup, so the only thing
  6052. * that really matters is value size in this case.
  6053. */
  6054. if (*fixup_map1) {
  6055. map_fds[0] = create_map(sizeof(long long), 1);
  6056. do {
  6057. prog[*fixup_map1].imm = map_fds[0];
  6058. fixup_map1++;
  6059. } while (*fixup_map1);
  6060. }
  6061. if (*fixup_map2) {
  6062. map_fds[1] = create_map(sizeof(struct test_val), 1);
  6063. do {
  6064. prog[*fixup_map2].imm = map_fds[1];
  6065. fixup_map2++;
  6066. } while (*fixup_map2);
  6067. }
  6068. if (*fixup_prog) {
  6069. map_fds[2] = create_prog_array();
  6070. do {
  6071. prog[*fixup_prog].imm = map_fds[2];
  6072. fixup_prog++;
  6073. } while (*fixup_prog);
  6074. }
  6075. if (*fixup_map_in_map) {
  6076. map_fds[3] = create_map_in_map();
  6077. do {
  6078. prog[*fixup_map_in_map].imm = map_fds[3];
  6079. fixup_map_in_map++;
  6080. } while (*fixup_map_in_map);
  6081. }
  6082. }
  6083. static void do_test_single(struct bpf_test *test, bool unpriv,
  6084. int *passes, int *errors)
  6085. {
  6086. int fd_prog, expected_ret, reject_from_alignment;
  6087. struct bpf_insn *prog = test->insns;
  6088. int prog_len = probe_filter_length(prog);
  6089. int prog_type = test->prog_type;
  6090. int map_fds[MAX_NR_MAPS];
  6091. const char *expected_err;
  6092. int i;
  6093. for (i = 0; i < MAX_NR_MAPS; i++)
  6094. map_fds[i] = -1;
  6095. do_test_fixup(test, prog, map_fds);
  6096. fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
  6097. prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
  6098. "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
  6099. expected_ret = unpriv && test->result_unpriv != UNDEF ?
  6100. test->result_unpriv : test->result;
  6101. expected_err = unpriv && test->errstr_unpriv ?
  6102. test->errstr_unpriv : test->errstr;
  6103. reject_from_alignment = fd_prog < 0 &&
  6104. (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
  6105. strstr(bpf_vlog, "Unknown alignment.");
  6106. #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  6107. if (reject_from_alignment) {
  6108. printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
  6109. strerror(errno));
  6110. goto fail_log;
  6111. }
  6112. #endif
  6113. if (expected_ret == ACCEPT) {
  6114. if (fd_prog < 0 && !reject_from_alignment) {
  6115. printf("FAIL\nFailed to load prog '%s'!\n",
  6116. strerror(errno));
  6117. goto fail_log;
  6118. }
  6119. } else {
  6120. if (fd_prog >= 0) {
  6121. printf("FAIL\nUnexpected success to load!\n");
  6122. goto fail_log;
  6123. }
  6124. if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
  6125. printf("FAIL\nUnexpected error message!\n");
  6126. goto fail_log;
  6127. }
  6128. }
  6129. (*passes)++;
  6130. printf("OK%s\n", reject_from_alignment ?
  6131. " (NOTE: reject due to unknown alignment)" : "");
  6132. close_fds:
  6133. close(fd_prog);
  6134. for (i = 0; i < MAX_NR_MAPS; i++)
  6135. close(map_fds[i]);
  6136. sched_yield();
  6137. return;
  6138. fail_log:
  6139. (*errors)++;
  6140. printf("%s", bpf_vlog);
  6141. goto close_fds;
  6142. }
  6143. static bool is_admin(void)
  6144. {
  6145. cap_t caps;
  6146. cap_flag_value_t sysadmin = CAP_CLEAR;
  6147. const cap_value_t cap_val = CAP_SYS_ADMIN;
  6148. #ifdef CAP_IS_SUPPORTED
  6149. if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
  6150. perror("cap_get_flag");
  6151. return false;
  6152. }
  6153. #endif
  6154. caps = cap_get_proc();
  6155. if (!caps) {
  6156. perror("cap_get_proc");
  6157. return false;
  6158. }
  6159. if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
  6160. perror("cap_get_flag");
  6161. if (cap_free(caps))
  6162. perror("cap_free");
  6163. return (sysadmin == CAP_SET);
  6164. }
  6165. static int set_admin(bool admin)
  6166. {
  6167. cap_t caps;
  6168. const cap_value_t cap_val = CAP_SYS_ADMIN;
  6169. int ret = -1;
  6170. caps = cap_get_proc();
  6171. if (!caps) {
  6172. perror("cap_get_proc");
  6173. return -1;
  6174. }
  6175. if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
  6176. admin ? CAP_SET : CAP_CLEAR)) {
  6177. perror("cap_set_flag");
  6178. goto out;
  6179. }
  6180. if (cap_set_proc(caps)) {
  6181. perror("cap_set_proc");
  6182. goto out;
  6183. }
  6184. ret = 0;
  6185. out:
  6186. if (cap_free(caps))
  6187. perror("cap_free");
  6188. return ret;
  6189. }
  6190. static int do_test(bool unpriv, unsigned int from, unsigned int to)
  6191. {
  6192. int i, passes = 0, errors = 0;
  6193. for (i = from; i < to; i++) {
  6194. struct bpf_test *test = &tests[i];
  6195. /* Program types that are not supported by non-root we
  6196. * skip right away.
  6197. */
  6198. if (!test->prog_type) {
  6199. if (!unpriv)
  6200. set_admin(false);
  6201. printf("#%d/u %s ", i, test->descr);
  6202. do_test_single(test, true, &passes, &errors);
  6203. if (!unpriv)
  6204. set_admin(true);
  6205. }
  6206. if (!unpriv) {
  6207. printf("#%d/p %s ", i, test->descr);
  6208. do_test_single(test, false, &passes, &errors);
  6209. }
  6210. }
  6211. printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
  6212. return errors ? EXIT_FAILURE : EXIT_SUCCESS;
  6213. }
  6214. int main(int argc, char **argv)
  6215. {
  6216. struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
  6217. struct rlimit rlim = { 1 << 20, 1 << 20 };
  6218. unsigned int from = 0, to = ARRAY_SIZE(tests);
  6219. bool unpriv = !is_admin();
  6220. if (argc == 3) {
  6221. unsigned int l = atoi(argv[argc - 2]);
  6222. unsigned int u = atoi(argv[argc - 1]);
  6223. if (l < to && u < to) {
  6224. from = l;
  6225. to = u + 1;
  6226. }
  6227. } else if (argc == 2) {
  6228. unsigned int t = atoi(argv[argc - 1]);
  6229. if (t < to) {
  6230. from = t;
  6231. to = t + 1;
  6232. }
  6233. }
  6234. setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
  6235. return do_test(unpriv, from, to);
  6236. }