mvpp2.c 180 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525
  1. /*
  2. * Driver for Marvell PPv2 network controller for Armada 375 SoC.
  3. *
  4. * Copyright (C) 2014 Marvell
  5. *
  6. * Marcin Wojtas <mw@semihalf.com>
  7. *
  8. * This file is licensed under the terms of the GNU General Public
  9. * License version 2. This program is licensed "as is" without any
  10. * warranty of any kind, whether express or implied.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/skbuff.h>
  17. #include <linux/inetdevice.h>
  18. #include <linux/mbus.h>
  19. #include <linux/module.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/cpumask.h>
  22. #include <linux/of.h>
  23. #include <linux/of_irq.h>
  24. #include <linux/of_mdio.h>
  25. #include <linux/of_net.h>
  26. #include <linux/of_address.h>
  27. #include <linux/phy.h>
  28. #include <linux/clk.h>
  29. #include <linux/hrtimer.h>
  30. #include <linux/ktime.h>
  31. #include <uapi/linux/ppp_defs.h>
  32. #include <net/ip.h>
  33. #include <net/ipv6.h>
  34. /* RX Fifo Registers */
  35. #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
  36. #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
  37. #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
  38. #define MVPP2_RX_FIFO_INIT_REG 0x64
  39. /* RX DMA Top Registers */
  40. #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
  41. #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
  42. #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
  43. #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
  44. #define MVPP2_POOL_BUF_SIZE_OFFSET 5
  45. #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
  46. #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
  47. #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
  48. #define MVPP2_RXQ_POOL_SHORT_OFFS 20
  49. #define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
  50. #define MVPP2_RXQ_POOL_LONG_OFFS 24
  51. #define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
  52. #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
  53. #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
  54. #define MVPP2_RXQ_DISABLE_MASK BIT(31)
  55. /* Parser Registers */
  56. #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
  57. #define MVPP2_PRS_PORT_LU_MAX 0xf
  58. #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
  59. #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
  60. #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
  61. #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
  62. #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
  63. #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
  64. #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
  65. #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
  66. #define MVPP2_PRS_TCAM_IDX_REG 0x1100
  67. #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
  68. #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
  69. #define MVPP2_PRS_SRAM_IDX_REG 0x1200
  70. #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
  71. #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
  72. #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
  73. /* Classifier Registers */
  74. #define MVPP2_CLS_MODE_REG 0x1800
  75. #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
  76. #define MVPP2_CLS_PORT_WAY_REG 0x1810
  77. #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
  78. #define MVPP2_CLS_LKP_INDEX_REG 0x1814
  79. #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
  80. #define MVPP2_CLS_LKP_TBL_REG 0x1818
  81. #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
  82. #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
  83. #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
  84. #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
  85. #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
  86. #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
  87. #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
  88. #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
  89. #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
  90. #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
  91. #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
  92. #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
  93. /* Descriptor Manager Top Registers */
  94. #define MVPP2_RXQ_NUM_REG 0x2040
  95. #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
  96. #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
  97. #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
  98. #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
  99. #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
  100. #define MVPP2_RXQ_NUM_NEW_OFFSET 16
  101. #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
  102. #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
  103. #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
  104. #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
  105. #define MVPP2_RXQ_THRESH_REG 0x204c
  106. #define MVPP2_OCCUPIED_THRESH_OFFSET 0
  107. #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
  108. #define MVPP2_RXQ_INDEX_REG 0x2050
  109. #define MVPP2_TXQ_NUM_REG 0x2080
  110. #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
  111. #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
  112. #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
  113. #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
  114. #define MVPP2_TXQ_THRESH_REG 0x2094
  115. #define MVPP2_TRANSMITTED_THRESH_OFFSET 16
  116. #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
  117. #define MVPP2_TXQ_INDEX_REG 0x2098
  118. #define MVPP2_TXQ_PREF_BUF_REG 0x209c
  119. #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
  120. #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
  121. #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
  122. #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
  123. #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
  124. #define MVPP2_TXQ_PENDING_REG 0x20a0
  125. #define MVPP2_TXQ_PENDING_MASK 0x3fff
  126. #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
  127. #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
  128. #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
  129. #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
  130. #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
  131. #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
  132. #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
  133. #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
  134. #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
  135. #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
  136. #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
  137. #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
  138. #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
  139. #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
  140. #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
  141. #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
  142. /* MBUS bridge registers */
  143. #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
  144. #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
  145. #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
  146. #define MVPP2_BASE_ADDR_ENABLE 0x4060
  147. /* Interrupt Cause and Mask registers */
  148. #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
  149. #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
  150. #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
  151. #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
  152. #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
  153. #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
  154. #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
  155. #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
  156. #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
  157. #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
  158. #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
  159. #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
  160. #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
  161. #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
  162. #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
  163. #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
  164. #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
  165. #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
  166. #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
  167. #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
  168. /* Buffer Manager registers */
  169. #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
  170. #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
  171. #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
  172. #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
  173. #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
  174. #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
  175. #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
  176. #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
  177. #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
  178. #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
  179. #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
  180. #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
  181. #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
  182. #define MVPP2_BM_START_MASK BIT(0)
  183. #define MVPP2_BM_STOP_MASK BIT(1)
  184. #define MVPP2_BM_STATE_MASK BIT(4)
  185. #define MVPP2_BM_LOW_THRESH_OFFS 8
  186. #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
  187. #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
  188. MVPP2_BM_LOW_THRESH_OFFS)
  189. #define MVPP2_BM_HIGH_THRESH_OFFS 16
  190. #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
  191. #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
  192. MVPP2_BM_HIGH_THRESH_OFFS)
  193. #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
  194. #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
  195. #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
  196. #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
  197. #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
  198. #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
  199. #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
  200. #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
  201. #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
  202. #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
  203. #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
  204. #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
  205. #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
  206. #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
  207. #define MVPP2_BM_VIRT_RLS_REG 0x64c0
  208. #define MVPP2_BM_MC_RLS_REG 0x64c4
  209. #define MVPP2_BM_MC_ID_MASK 0xfff
  210. #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
  211. /* TX Scheduler registers */
  212. #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
  213. #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
  214. #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
  215. #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
  216. #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
  217. #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
  218. #define MVPP2_TXP_SCHED_MTU_REG 0x801c
  219. #define MVPP2_TXP_MTU_MAX 0x7FFFF
  220. #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
  221. #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
  222. #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
  223. #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
  224. #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
  225. #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
  226. #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
  227. #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
  228. #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
  229. #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
  230. #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
  231. #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
  232. #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
  233. #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
  234. /* TX general registers */
  235. #define MVPP2_TX_SNOOP_REG 0x8800
  236. #define MVPP2_TX_PORT_FLUSH_REG 0x8810
  237. #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
  238. /* LMS registers */
  239. #define MVPP2_SRC_ADDR_MIDDLE 0x24
  240. #define MVPP2_SRC_ADDR_HIGH 0x28
  241. #define MVPP2_PHY_AN_CFG0_REG 0x34
  242. #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
  243. #define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \
  244. 0x400 + (port) * 0x400)
  245. #define MVPP2_MIB_LATE_COLLISION 0x7c
  246. #define MVPP2_ISR_SUM_MASK_REG 0x220c
  247. #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
  248. #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
  249. /* Per-port registers */
  250. #define MVPP2_GMAC_CTRL_0_REG 0x0
  251. #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
  252. #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
  253. #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
  254. #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
  255. #define MVPP2_GMAC_CTRL_1_REG 0x4
  256. #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
  257. #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
  258. #define MVPP2_GMAC_PCS_LB_EN_BIT 6
  259. #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
  260. #define MVPP2_GMAC_SA_LOW_OFFS 7
  261. #define MVPP2_GMAC_CTRL_2_REG 0x8
  262. #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
  263. #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
  264. #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
  265. #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
  266. #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
  267. #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
  268. #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
  269. #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
  270. #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
  271. #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
  272. #define MVPP2_GMAC_FC_ADV_EN BIT(9)
  273. #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
  274. #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
  275. #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
  276. #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
  277. #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
  278. #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
  279. MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
  280. #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
  281. /* Descriptor ring Macros */
  282. #define MVPP2_QUEUE_NEXT_DESC(q, index) \
  283. (((index) < (q)->last_desc) ? ((index) + 1) : 0)
  284. /* Various constants */
  285. /* Coalescing */
  286. #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
  287. #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
  288. #define MVPP2_RX_COAL_PKTS 32
  289. #define MVPP2_RX_COAL_USEC 100
  290. /* The two bytes Marvell header. Either contains a special value used
  291. * by Marvell switches when a specific hardware mode is enabled (not
  292. * supported by this driver) or is filled automatically by zeroes on
  293. * the RX side. Those two bytes being at the front of the Ethernet
  294. * header, they allow to have the IP header aligned on a 4 bytes
  295. * boundary automatically: the hardware skips those two bytes on its
  296. * own.
  297. */
  298. #define MVPP2_MH_SIZE 2
  299. #define MVPP2_ETH_TYPE_LEN 2
  300. #define MVPP2_PPPOE_HDR_SIZE 8
  301. #define MVPP2_VLAN_TAG_LEN 4
  302. /* Lbtd 802.3 type */
  303. #define MVPP2_IP_LBDT_TYPE 0xfffa
  304. #define MVPP2_CPU_D_CACHE_LINE_SIZE 32
  305. #define MVPP2_TX_CSUM_MAX_SIZE 9800
  306. /* Timeout constants */
  307. #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
  308. #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
  309. #define MVPP2_TX_MTU_MAX 0x7ffff
  310. /* Maximum number of T-CONTs of PON port */
  311. #define MVPP2_MAX_TCONT 16
  312. /* Maximum number of supported ports */
  313. #define MVPP2_MAX_PORTS 4
  314. /* Maximum number of TXQs used by single port */
  315. #define MVPP2_MAX_TXQ 8
  316. /* Maximum number of RXQs used by single port */
  317. #define MVPP2_MAX_RXQ 8
  318. /* Dfault number of RXQs in use */
  319. #define MVPP2_DEFAULT_RXQ 4
  320. /* Total number of RXQs available to all ports */
  321. #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
  322. /* Max number of Rx descriptors */
  323. #define MVPP2_MAX_RXD 128
  324. /* Max number of Tx descriptors */
  325. #define MVPP2_MAX_TXD 1024
  326. /* Amount of Tx descriptors that can be reserved at once by CPU */
  327. #define MVPP2_CPU_DESC_CHUNK 64
  328. /* Max number of Tx descriptors in each aggregated queue */
  329. #define MVPP2_AGGR_TXQ_SIZE 256
  330. /* Descriptor aligned size */
  331. #define MVPP2_DESC_ALIGNED_SIZE 32
  332. /* Descriptor alignment mask */
  333. #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
  334. /* RX FIFO constants */
  335. #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
  336. #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
  337. #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
  338. /* RX buffer constants */
  339. #define MVPP2_SKB_SHINFO_SIZE \
  340. SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
  341. #define MVPP2_RX_PKT_SIZE(mtu) \
  342. ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
  343. ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
  344. #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
  345. #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
  346. #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
  347. ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
  348. #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
  349. /* IPv6 max L3 address size */
  350. #define MVPP2_MAX_L3_ADDR_SIZE 16
  351. /* Port flags */
  352. #define MVPP2_F_LOOPBACK BIT(0)
  353. /* Marvell tag types */
  354. enum mvpp2_tag_type {
  355. MVPP2_TAG_TYPE_NONE = 0,
  356. MVPP2_TAG_TYPE_MH = 1,
  357. MVPP2_TAG_TYPE_DSA = 2,
  358. MVPP2_TAG_TYPE_EDSA = 3,
  359. MVPP2_TAG_TYPE_VLAN = 4,
  360. MVPP2_TAG_TYPE_LAST = 5
  361. };
  362. /* Parser constants */
  363. #define MVPP2_PRS_TCAM_SRAM_SIZE 256
  364. #define MVPP2_PRS_TCAM_WORDS 6
  365. #define MVPP2_PRS_SRAM_WORDS 4
  366. #define MVPP2_PRS_FLOW_ID_SIZE 64
  367. #define MVPP2_PRS_FLOW_ID_MASK 0x3f
  368. #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
  369. #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
  370. #define MVPP2_PRS_IPV4_HEAD 0x40
  371. #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
  372. #define MVPP2_PRS_IPV4_MC 0xe0
  373. #define MVPP2_PRS_IPV4_MC_MASK 0xf0
  374. #define MVPP2_PRS_IPV4_BC_MASK 0xff
  375. #define MVPP2_PRS_IPV4_IHL 0x5
  376. #define MVPP2_PRS_IPV4_IHL_MASK 0xf
  377. #define MVPP2_PRS_IPV6_MC 0xff
  378. #define MVPP2_PRS_IPV6_MC_MASK 0xff
  379. #define MVPP2_PRS_IPV6_HOP_MASK 0xff
  380. #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
  381. #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
  382. #define MVPP2_PRS_DBL_VLANS_MAX 100
  383. /* Tcam structure:
  384. * - lookup ID - 4 bits
  385. * - port ID - 1 byte
  386. * - additional information - 1 byte
  387. * - header data - 8 bytes
  388. * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
  389. */
  390. #define MVPP2_PRS_AI_BITS 8
  391. #define MVPP2_PRS_PORT_MASK 0xff
  392. #define MVPP2_PRS_LU_MASK 0xf
  393. #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
  394. (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
  395. #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
  396. (((offs) * 2) - ((offs) % 2) + 2)
  397. #define MVPP2_PRS_TCAM_AI_BYTE 16
  398. #define MVPP2_PRS_TCAM_PORT_BYTE 17
  399. #define MVPP2_PRS_TCAM_LU_BYTE 20
  400. #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
  401. #define MVPP2_PRS_TCAM_INV_WORD 5
  402. /* Tcam entries ID */
  403. #define MVPP2_PE_DROP_ALL 0
  404. #define MVPP2_PE_FIRST_FREE_TID 1
  405. #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
  406. #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
  407. #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
  408. #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
  409. #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
  410. #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
  411. #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
  412. #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
  413. #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
  414. #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
  415. #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
  416. #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
  417. #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
  418. #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
  419. #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
  420. #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
  421. #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
  422. #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
  423. #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
  424. #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
  425. #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
  426. #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
  427. #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
  428. #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
  429. #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  430. /* Sram structure
  431. * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
  432. */
  433. #define MVPP2_PRS_SRAM_RI_OFFS 0
  434. #define MVPP2_PRS_SRAM_RI_WORD 0
  435. #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
  436. #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
  437. #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
  438. #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
  439. #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
  440. #define MVPP2_PRS_SRAM_UDF_OFFS 73
  441. #define MVPP2_PRS_SRAM_UDF_BITS 8
  442. #define MVPP2_PRS_SRAM_UDF_MASK 0xff
  443. #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
  444. #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
  445. #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
  446. #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
  447. #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
  448. #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
  449. #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
  450. #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
  451. #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
  452. #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
  453. #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
  454. #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
  455. #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
  456. #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
  457. #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
  458. #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
  459. #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
  460. #define MVPP2_PRS_SRAM_AI_OFFS 90
  461. #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
  462. #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
  463. #define MVPP2_PRS_SRAM_AI_MASK 0xff
  464. #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
  465. #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
  466. #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
  467. #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
  468. /* Sram result info bits assignment */
  469. #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
  470. #define MVPP2_PRS_RI_DSA_MASK 0x2
  471. #define MVPP2_PRS_RI_VLAN_MASK 0xc
  472. #define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3))
  473. #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
  474. #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
  475. #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
  476. #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
  477. #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
  478. #define MVPP2_PRS_RI_L2_CAST_MASK 0x600
  479. #define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10))
  480. #define MVPP2_PRS_RI_L2_MCAST BIT(9)
  481. #define MVPP2_PRS_RI_L2_BCAST BIT(10)
  482. #define MVPP2_PRS_RI_PPPOE_MASK 0x800
  483. #define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
  484. #define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14))
  485. #define MVPP2_PRS_RI_L3_IP4 BIT(12)
  486. #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
  487. #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
  488. #define MVPP2_PRS_RI_L3_IP6 BIT(14)
  489. #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
  490. #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
  491. #define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
  492. #define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16))
  493. #define MVPP2_PRS_RI_L3_MCAST BIT(15)
  494. #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
  495. #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
  496. #define MVPP2_PRS_RI_UDF3_MASK 0x300000
  497. #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
  498. #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
  499. #define MVPP2_PRS_RI_L4_TCP BIT(22)
  500. #define MVPP2_PRS_RI_L4_UDP BIT(23)
  501. #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
  502. #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
  503. #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
  504. #define MVPP2_PRS_RI_DROP_MASK 0x80000000
  505. /* Sram additional info bits assignment */
  506. #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
  507. #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
  508. #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
  509. #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
  510. #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
  511. #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
  512. #define MVPP2_PRS_SINGLE_VLAN_AI 0
  513. #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
  514. /* DSA/EDSA type */
  515. #define MVPP2_PRS_TAGGED true
  516. #define MVPP2_PRS_UNTAGGED false
  517. #define MVPP2_PRS_EDSA true
  518. #define MVPP2_PRS_DSA false
  519. /* MAC entries, shadow udf */
  520. enum mvpp2_prs_udf {
  521. MVPP2_PRS_UDF_MAC_DEF,
  522. MVPP2_PRS_UDF_MAC_RANGE,
  523. MVPP2_PRS_UDF_L2_DEF,
  524. MVPP2_PRS_UDF_L2_DEF_COPY,
  525. MVPP2_PRS_UDF_L2_USER,
  526. };
  527. /* Lookup ID */
  528. enum mvpp2_prs_lookup {
  529. MVPP2_PRS_LU_MH,
  530. MVPP2_PRS_LU_MAC,
  531. MVPP2_PRS_LU_DSA,
  532. MVPP2_PRS_LU_VLAN,
  533. MVPP2_PRS_LU_L2,
  534. MVPP2_PRS_LU_PPPOE,
  535. MVPP2_PRS_LU_IP4,
  536. MVPP2_PRS_LU_IP6,
  537. MVPP2_PRS_LU_FLOWS,
  538. MVPP2_PRS_LU_LAST,
  539. };
  540. /* L3 cast enum */
  541. enum mvpp2_prs_l3_cast {
  542. MVPP2_PRS_L3_UNI_CAST,
  543. MVPP2_PRS_L3_MULTI_CAST,
  544. MVPP2_PRS_L3_BROAD_CAST
  545. };
  546. /* Classifier constants */
  547. #define MVPP2_CLS_FLOWS_TBL_SIZE 512
  548. #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
  549. #define MVPP2_CLS_LKP_TBL_SIZE 64
  550. /* BM constants */
  551. #define MVPP2_BM_POOLS_NUM 8
  552. #define MVPP2_BM_LONG_BUF_NUM 1024
  553. #define MVPP2_BM_SHORT_BUF_NUM 2048
  554. #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
  555. #define MVPP2_BM_POOL_PTR_ALIGN 128
  556. #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
  557. #define MVPP2_BM_SWF_SHORT_POOL 3
  558. /* BM cookie (32 bits) definition */
  559. #define MVPP2_BM_COOKIE_POOL_OFFS 8
  560. #define MVPP2_BM_COOKIE_CPU_OFFS 24
  561. /* BM short pool packet size
  562. * These value assure that for SWF the total number
  563. * of bytes allocated for each buffer will be 512
  564. */
  565. #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
  566. enum mvpp2_bm_type {
  567. MVPP2_BM_FREE,
  568. MVPP2_BM_SWF_LONG,
  569. MVPP2_BM_SWF_SHORT
  570. };
  571. /* Definitions */
  572. /* Shared Packet Processor resources */
  573. struct mvpp2 {
  574. /* Shared registers' base addresses */
  575. void __iomem *base;
  576. void __iomem *lms_base;
  577. /* Common clocks */
  578. struct clk *pp_clk;
  579. struct clk *gop_clk;
  580. /* List of pointers to port structures */
  581. struct mvpp2_port **port_list;
  582. /* Aggregated TXQs */
  583. struct mvpp2_tx_queue *aggr_txqs;
  584. /* BM pools */
  585. struct mvpp2_bm_pool *bm_pools;
  586. /* PRS shadow table */
  587. struct mvpp2_prs_shadow *prs_shadow;
  588. /* PRS auxiliary table for double vlan entries control */
  589. bool *prs_double_vlans;
  590. /* Tclk value */
  591. u32 tclk;
  592. };
  593. struct mvpp2_pcpu_stats {
  594. struct u64_stats_sync syncp;
  595. u64 rx_packets;
  596. u64 rx_bytes;
  597. u64 tx_packets;
  598. u64 tx_bytes;
  599. };
  600. /* Per-CPU port control */
  601. struct mvpp2_port_pcpu {
  602. struct hrtimer tx_done_timer;
  603. bool timer_scheduled;
  604. /* Tasklet for egress finalization */
  605. struct tasklet_struct tx_done_tasklet;
  606. };
  607. struct mvpp2_port {
  608. u8 id;
  609. int irq;
  610. struct mvpp2 *priv;
  611. /* Per-port registers' base address */
  612. void __iomem *base;
  613. struct mvpp2_rx_queue **rxqs;
  614. struct mvpp2_tx_queue **txqs;
  615. struct net_device *dev;
  616. int pkt_size;
  617. u32 pending_cause_rx;
  618. struct napi_struct napi;
  619. /* Per-CPU port control */
  620. struct mvpp2_port_pcpu __percpu *pcpu;
  621. /* Flags */
  622. unsigned long flags;
  623. u16 tx_ring_size;
  624. u16 rx_ring_size;
  625. struct mvpp2_pcpu_stats __percpu *stats;
  626. struct phy_device *phy_dev;
  627. phy_interface_t phy_interface;
  628. struct device_node *phy_node;
  629. unsigned int link;
  630. unsigned int duplex;
  631. unsigned int speed;
  632. struct mvpp2_bm_pool *pool_long;
  633. struct mvpp2_bm_pool *pool_short;
  634. /* Index of first port's physical RXQ */
  635. u8 first_rxq;
  636. };
  637. /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
  638. * layout of the transmit and reception DMA descriptors, and their
  639. * layout is therefore defined by the hardware design
  640. */
  641. #define MVPP2_TXD_L3_OFF_SHIFT 0
  642. #define MVPP2_TXD_IP_HLEN_SHIFT 8
  643. #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
  644. #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
  645. #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
  646. #define MVPP2_TXD_PADDING_DISABLE BIT(23)
  647. #define MVPP2_TXD_L4_UDP BIT(24)
  648. #define MVPP2_TXD_L3_IP6 BIT(26)
  649. #define MVPP2_TXD_L_DESC BIT(28)
  650. #define MVPP2_TXD_F_DESC BIT(29)
  651. #define MVPP2_RXD_ERR_SUMMARY BIT(15)
  652. #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
  653. #define MVPP2_RXD_ERR_CRC 0x0
  654. #define MVPP2_RXD_ERR_OVERRUN BIT(13)
  655. #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
  656. #define MVPP2_RXD_BM_POOL_ID_OFFS 16
  657. #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
  658. #define MVPP2_RXD_HWF_SYNC BIT(21)
  659. #define MVPP2_RXD_L4_CSUM_OK BIT(22)
  660. #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
  661. #define MVPP2_RXD_L4_TCP BIT(25)
  662. #define MVPP2_RXD_L4_UDP BIT(26)
  663. #define MVPP2_RXD_L3_IP4 BIT(28)
  664. #define MVPP2_RXD_L3_IP6 BIT(30)
  665. #define MVPP2_RXD_BUF_HDR BIT(31)
  666. struct mvpp2_tx_desc {
  667. u32 command; /* Options used by HW for packet transmitting.*/
  668. u8 packet_offset; /* the offset from the buffer beginning */
  669. u8 phys_txq; /* destination queue ID */
  670. u16 data_size; /* data size of transmitted packet in bytes */
  671. u32 buf_phys_addr; /* physical addr of transmitted buffer */
  672. u32 buf_cookie; /* cookie for access to TX buffer in tx path */
  673. u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
  674. u32 reserved2; /* reserved (for future use) */
  675. };
  676. struct mvpp2_rx_desc {
  677. u32 status; /* info about received packet */
  678. u16 reserved1; /* parser_info (for future use, PnC) */
  679. u16 data_size; /* size of received packet in bytes */
  680. u32 buf_phys_addr; /* physical address of the buffer */
  681. u32 buf_cookie; /* cookie for access to RX buffer in rx path */
  682. u16 reserved2; /* gem_port_id (for future use, PON) */
  683. u16 reserved3; /* csum_l4 (for future use, PnC) */
  684. u8 reserved4; /* bm_qset (for future use, BM) */
  685. u8 reserved5;
  686. u16 reserved6; /* classify_info (for future use, PnC) */
  687. u32 reserved7; /* flow_id (for future use, PnC) */
  688. u32 reserved8;
  689. };
  690. /* Per-CPU Tx queue control */
  691. struct mvpp2_txq_pcpu {
  692. int cpu;
  693. /* Number of Tx DMA descriptors in the descriptor ring */
  694. int size;
  695. /* Number of currently used Tx DMA descriptor in the
  696. * descriptor ring
  697. */
  698. int count;
  699. /* Number of Tx DMA descriptors reserved for each CPU */
  700. int reserved_num;
  701. /* Array of transmitted skb */
  702. struct sk_buff **tx_skb;
  703. /* Array of transmitted buffers' physical addresses */
  704. dma_addr_t *tx_buffs;
  705. /* Index of last TX DMA descriptor that was inserted */
  706. int txq_put_index;
  707. /* Index of the TX DMA descriptor to be cleaned up */
  708. int txq_get_index;
  709. };
  710. struct mvpp2_tx_queue {
  711. /* Physical number of this Tx queue */
  712. u8 id;
  713. /* Logical number of this Tx queue */
  714. u8 log_id;
  715. /* Number of Tx DMA descriptors in the descriptor ring */
  716. int size;
  717. /* Number of currently used Tx DMA descriptor in the descriptor ring */
  718. int count;
  719. /* Per-CPU control of physical Tx queues */
  720. struct mvpp2_txq_pcpu __percpu *pcpu;
  721. /* Array of transmitted skb */
  722. struct sk_buff **tx_skb;
  723. u32 done_pkts_coal;
  724. /* Virtual address of thex Tx DMA descriptors array */
  725. struct mvpp2_tx_desc *descs;
  726. /* DMA address of the Tx DMA descriptors array */
  727. dma_addr_t descs_phys;
  728. /* Index of the last Tx DMA descriptor */
  729. int last_desc;
  730. /* Index of the next Tx DMA descriptor to process */
  731. int next_desc_to_proc;
  732. };
  733. struct mvpp2_rx_queue {
  734. /* RX queue number, in the range 0-31 for physical RXQs */
  735. u8 id;
  736. /* Num of rx descriptors in the rx descriptor ring */
  737. int size;
  738. u32 pkts_coal;
  739. u32 time_coal;
  740. /* Virtual address of the RX DMA descriptors array */
  741. struct mvpp2_rx_desc *descs;
  742. /* DMA address of the RX DMA descriptors array */
  743. dma_addr_t descs_phys;
  744. /* Index of the last RX DMA descriptor */
  745. int last_desc;
  746. /* Index of the next RX DMA descriptor to process */
  747. int next_desc_to_proc;
  748. /* ID of port to which physical RXQ is mapped */
  749. int port;
  750. /* Port's logic RXQ number to which physical RXQ is mapped */
  751. int logic_rxq;
  752. };
  753. union mvpp2_prs_tcam_entry {
  754. u32 word[MVPP2_PRS_TCAM_WORDS];
  755. u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
  756. };
  757. union mvpp2_prs_sram_entry {
  758. u32 word[MVPP2_PRS_SRAM_WORDS];
  759. u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
  760. };
  761. struct mvpp2_prs_entry {
  762. u32 index;
  763. union mvpp2_prs_tcam_entry tcam;
  764. union mvpp2_prs_sram_entry sram;
  765. };
  766. struct mvpp2_prs_shadow {
  767. bool valid;
  768. bool finish;
  769. /* Lookup ID */
  770. int lu;
  771. /* User defined offset */
  772. int udf;
  773. /* Result info */
  774. u32 ri;
  775. u32 ri_mask;
  776. };
  777. struct mvpp2_cls_flow_entry {
  778. u32 index;
  779. u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
  780. };
  781. struct mvpp2_cls_lookup_entry {
  782. u32 lkpid;
  783. u32 way;
  784. u32 data;
  785. };
  786. struct mvpp2_bm_pool {
  787. /* Pool number in the range 0-7 */
  788. int id;
  789. enum mvpp2_bm_type type;
  790. /* Buffer Pointers Pool External (BPPE) size */
  791. int size;
  792. /* Number of buffers for this pool */
  793. int buf_num;
  794. /* Pool buffer size */
  795. int buf_size;
  796. /* Packet size */
  797. int pkt_size;
  798. /* BPPE virtual base address */
  799. u32 *virt_addr;
  800. /* BPPE physical base address */
  801. dma_addr_t phys_addr;
  802. /* Ports using BM pool */
  803. u32 port_map;
  804. /* Occupied buffers indicator */
  805. atomic_t in_use;
  806. int in_use_thresh;
  807. };
  808. struct mvpp2_buff_hdr {
  809. u32 next_buff_phys_addr;
  810. u32 next_buff_virt_addr;
  811. u16 byte_count;
  812. u16 info;
  813. u8 reserved1; /* bm_qset (for future use, BM) */
  814. };
  815. /* Buffer header info bits */
  816. #define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
  817. #define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
  818. #define MVPP2_B_HDR_INFO_LAST_OFFS 12
  819. #define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
  820. #define MVPP2_B_HDR_INFO_IS_LAST(info) \
  821. ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
  822. /* Static declaractions */
  823. /* Number of RXQs used by single port */
  824. static int rxq_number = MVPP2_DEFAULT_RXQ;
  825. /* Number of TXQs used by single port */
  826. static int txq_number = MVPP2_MAX_TXQ;
  827. #define MVPP2_DRIVER_NAME "mvpp2"
  828. #define MVPP2_DRIVER_VERSION "1.0"
  829. /* Utility/helper methods */
  830. static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
  831. {
  832. writel(data, priv->base + offset);
  833. }
  834. static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
  835. {
  836. return readl(priv->base + offset);
  837. }
  838. static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
  839. {
  840. txq_pcpu->txq_get_index++;
  841. if (txq_pcpu->txq_get_index == txq_pcpu->size)
  842. txq_pcpu->txq_get_index = 0;
  843. }
  844. static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
  845. struct sk_buff *skb,
  846. struct mvpp2_tx_desc *tx_desc)
  847. {
  848. txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
  849. if (skb)
  850. txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
  851. tx_desc->buf_phys_addr;
  852. txq_pcpu->txq_put_index++;
  853. if (txq_pcpu->txq_put_index == txq_pcpu->size)
  854. txq_pcpu->txq_put_index = 0;
  855. }
  856. /* Get number of physical egress port */
  857. static inline int mvpp2_egress_port(struct mvpp2_port *port)
  858. {
  859. return MVPP2_MAX_TCONT + port->id;
  860. }
  861. /* Get number of physical TXQ */
  862. static inline int mvpp2_txq_phys(int port, int txq)
  863. {
  864. return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
  865. }
  866. /* Parser configuration routines */
  867. /* Update parser tcam and sram hw entries */
  868. static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
  869. {
  870. int i;
  871. if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  872. return -EINVAL;
  873. /* Clear entry invalidation bit */
  874. pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
  875. /* Write tcam index - indirect access */
  876. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
  877. for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  878. mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
  879. /* Write sram index - indirect access */
  880. mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
  881. for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  882. mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
  883. return 0;
  884. }
  885. /* Read tcam entry from hw */
  886. static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
  887. {
  888. int i;
  889. if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  890. return -EINVAL;
  891. /* Write tcam index - indirect access */
  892. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
  893. pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
  894. MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
  895. if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
  896. return MVPP2_PRS_TCAM_ENTRY_INVALID;
  897. for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  898. pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
  899. /* Write sram index - indirect access */
  900. mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
  901. for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  902. pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
  903. return 0;
  904. }
  905. /* Invalidate tcam hw entry */
  906. static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
  907. {
  908. /* Write index - indirect access */
  909. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
  910. mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
  911. MVPP2_PRS_TCAM_INV_MASK);
  912. }
  913. /* Enable shadow table entry and set its lookup ID */
  914. static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
  915. {
  916. priv->prs_shadow[index].valid = true;
  917. priv->prs_shadow[index].lu = lu;
  918. }
  919. /* Update ri fields in shadow table entry */
  920. static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
  921. unsigned int ri, unsigned int ri_mask)
  922. {
  923. priv->prs_shadow[index].ri_mask = ri_mask;
  924. priv->prs_shadow[index].ri = ri;
  925. }
  926. /* Update lookup field in tcam sw entry */
  927. static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
  928. {
  929. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
  930. pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
  931. pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
  932. }
  933. /* Update mask for single port in tcam sw entry */
  934. static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
  935. unsigned int port, bool add)
  936. {
  937. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
  938. if (add)
  939. pe->tcam.byte[enable_off] &= ~(1 << port);
  940. else
  941. pe->tcam.byte[enable_off] |= 1 << port;
  942. }
  943. /* Update port map in tcam sw entry */
  944. static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
  945. unsigned int ports)
  946. {
  947. unsigned char port_mask = MVPP2_PRS_PORT_MASK;
  948. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
  949. pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
  950. pe->tcam.byte[enable_off] &= ~port_mask;
  951. pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
  952. }
  953. /* Obtain port map from tcam sw entry */
  954. static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
  955. {
  956. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
  957. return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
  958. }
  959. /* Set byte of data and its enable bits in tcam sw entry */
  960. static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
  961. unsigned int offs, unsigned char byte,
  962. unsigned char enable)
  963. {
  964. pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
  965. pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
  966. }
  967. /* Get byte of data and its enable bits from tcam sw entry */
  968. static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
  969. unsigned int offs, unsigned char *byte,
  970. unsigned char *enable)
  971. {
  972. *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
  973. *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
  974. }
  975. /* Compare tcam data bytes with a pattern */
  976. static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
  977. u16 data)
  978. {
  979. int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
  980. u16 tcam_data;
  981. tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
  982. if (tcam_data != data)
  983. return false;
  984. return true;
  985. }
  986. /* Update ai bits in tcam sw entry */
  987. static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
  988. unsigned int bits, unsigned int enable)
  989. {
  990. int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
  991. for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
  992. if (!(enable & BIT(i)))
  993. continue;
  994. if (bits & BIT(i))
  995. pe->tcam.byte[ai_idx] |= 1 << i;
  996. else
  997. pe->tcam.byte[ai_idx] &= ~(1 << i);
  998. }
  999. pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
  1000. }
  1001. /* Get ai bits from tcam sw entry */
  1002. static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
  1003. {
  1004. return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
  1005. }
  1006. /* Set ethertype in tcam sw entry */
  1007. static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
  1008. unsigned short ethertype)
  1009. {
  1010. mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
  1011. mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
  1012. }
  1013. /* Set bits in sram sw entry */
  1014. static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
  1015. int val)
  1016. {
  1017. pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
  1018. }
  1019. /* Clear bits in sram sw entry */
  1020. static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
  1021. int val)
  1022. {
  1023. pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
  1024. }
  1025. /* Update ri bits in sram sw entry */
  1026. static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
  1027. unsigned int bits, unsigned int mask)
  1028. {
  1029. unsigned int i;
  1030. for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
  1031. int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
  1032. if (!(mask & BIT(i)))
  1033. continue;
  1034. if (bits & BIT(i))
  1035. mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
  1036. else
  1037. mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
  1038. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
  1039. }
  1040. }
  1041. /* Obtain ri bits from sram sw entry */
  1042. static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
  1043. {
  1044. return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
  1045. }
  1046. /* Update ai bits in sram sw entry */
  1047. static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
  1048. unsigned int bits, unsigned int mask)
  1049. {
  1050. unsigned int i;
  1051. int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
  1052. for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
  1053. if (!(mask & BIT(i)))
  1054. continue;
  1055. if (bits & BIT(i))
  1056. mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
  1057. else
  1058. mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
  1059. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
  1060. }
  1061. }
  1062. /* Read ai bits from sram sw entry */
  1063. static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
  1064. {
  1065. u8 bits;
  1066. int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
  1067. int ai_en_off = ai_off + 1;
  1068. int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
  1069. bits = (pe->sram.byte[ai_off] >> ai_shift) |
  1070. (pe->sram.byte[ai_en_off] << (8 - ai_shift));
  1071. return bits;
  1072. }
  1073. /* In sram sw entry set lookup ID field of the tcam key to be used in the next
  1074. * lookup interation
  1075. */
  1076. static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
  1077. unsigned int lu)
  1078. {
  1079. int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
  1080. mvpp2_prs_sram_bits_clear(pe, sram_next_off,
  1081. MVPP2_PRS_SRAM_NEXT_LU_MASK);
  1082. mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
  1083. }
  1084. /* In the sram sw entry set sign and value of the next lookup offset
  1085. * and the offset value generated to the classifier
  1086. */
  1087. static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
  1088. unsigned int op)
  1089. {
  1090. /* Set sign */
  1091. if (shift < 0) {
  1092. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
  1093. shift = 0 - shift;
  1094. } else {
  1095. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
  1096. }
  1097. /* Set value */
  1098. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
  1099. (unsigned char)shift;
  1100. /* Reset and set operation */
  1101. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
  1102. MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
  1103. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
  1104. /* Set base offset as current */
  1105. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
  1106. }
  1107. /* In the sram sw entry set sign and value of the user defined offset
  1108. * generated to the classifier
  1109. */
  1110. static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
  1111. unsigned int type, int offset,
  1112. unsigned int op)
  1113. {
  1114. /* Set sign */
  1115. if (offset < 0) {
  1116. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
  1117. offset = 0 - offset;
  1118. } else {
  1119. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
  1120. }
  1121. /* Set value */
  1122. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
  1123. MVPP2_PRS_SRAM_UDF_MASK);
  1124. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
  1125. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
  1126. MVPP2_PRS_SRAM_UDF_BITS)] &=
  1127. ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
  1128. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
  1129. MVPP2_PRS_SRAM_UDF_BITS)] |=
  1130. (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
  1131. /* Set offset type */
  1132. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
  1133. MVPP2_PRS_SRAM_UDF_TYPE_MASK);
  1134. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
  1135. /* Set offset operation */
  1136. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
  1137. MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
  1138. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
  1139. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
  1140. MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
  1141. ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
  1142. (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
  1143. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
  1144. MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
  1145. (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
  1146. /* Set base offset as current */
  1147. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
  1148. }
  1149. /* Find parser flow entry */
  1150. static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
  1151. {
  1152. struct mvpp2_prs_entry *pe;
  1153. int tid;
  1154. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  1155. if (!pe)
  1156. return NULL;
  1157. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
  1158. /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
  1159. for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
  1160. u8 bits;
  1161. if (!priv->prs_shadow[tid].valid ||
  1162. priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
  1163. continue;
  1164. pe->index = tid;
  1165. mvpp2_prs_hw_read(priv, pe);
  1166. bits = mvpp2_prs_sram_ai_get(pe);
  1167. /* Sram store classification lookup ID in AI bits [5:0] */
  1168. if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
  1169. return pe;
  1170. }
  1171. kfree(pe);
  1172. return NULL;
  1173. }
  1174. /* Return first free tcam index, seeking from start to end */
  1175. static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
  1176. unsigned char end)
  1177. {
  1178. int tid;
  1179. if (start > end)
  1180. swap(start, end);
  1181. if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
  1182. end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
  1183. for (tid = start; tid <= end; tid++) {
  1184. if (!priv->prs_shadow[tid].valid)
  1185. return tid;
  1186. }
  1187. return -EINVAL;
  1188. }
  1189. /* Enable/disable dropping all mac da's */
  1190. static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
  1191. {
  1192. struct mvpp2_prs_entry pe;
  1193. if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
  1194. /* Entry exist - update port only */
  1195. pe.index = MVPP2_PE_DROP_ALL;
  1196. mvpp2_prs_hw_read(priv, &pe);
  1197. } else {
  1198. /* Entry doesn't exist - create new */
  1199. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1200. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1201. pe.index = MVPP2_PE_DROP_ALL;
  1202. /* Non-promiscuous mode for all ports - DROP unknown packets */
  1203. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
  1204. MVPP2_PRS_RI_DROP_MASK);
  1205. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1206. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1207. /* Update shadow table */
  1208. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1209. /* Mask all ports */
  1210. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1211. }
  1212. /* Update port mask */
  1213. mvpp2_prs_tcam_port_set(&pe, port, add);
  1214. mvpp2_prs_hw_write(priv, &pe);
  1215. }
  1216. /* Set port to promiscuous mode */
  1217. static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
  1218. {
  1219. struct mvpp2_prs_entry pe;
  1220. /* Promiscuous mode - Accept unknown packets */
  1221. if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
  1222. /* Entry exist - update port only */
  1223. pe.index = MVPP2_PE_MAC_PROMISCUOUS;
  1224. mvpp2_prs_hw_read(priv, &pe);
  1225. } else {
  1226. /* Entry doesn't exist - create new */
  1227. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1228. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1229. pe.index = MVPP2_PE_MAC_PROMISCUOUS;
  1230. /* Continue - set next lookup */
  1231. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
  1232. /* Set result info bits */
  1233. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
  1234. MVPP2_PRS_RI_L2_CAST_MASK);
  1235. /* Shift to ethertype */
  1236. mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
  1237. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1238. /* Mask all ports */
  1239. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1240. /* Update shadow table */
  1241. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1242. }
  1243. /* Update port mask */
  1244. mvpp2_prs_tcam_port_set(&pe, port, add);
  1245. mvpp2_prs_hw_write(priv, &pe);
  1246. }
  1247. /* Accept multicast */
  1248. static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
  1249. bool add)
  1250. {
  1251. struct mvpp2_prs_entry pe;
  1252. unsigned char da_mc;
  1253. /* Ethernet multicast address first byte is
  1254. * 0x01 for IPv4 and 0x33 for IPv6
  1255. */
  1256. da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
  1257. if (priv->prs_shadow[index].valid) {
  1258. /* Entry exist - update port only */
  1259. pe.index = index;
  1260. mvpp2_prs_hw_read(priv, &pe);
  1261. } else {
  1262. /* Entry doesn't exist - create new */
  1263. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1264. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1265. pe.index = index;
  1266. /* Continue - set next lookup */
  1267. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
  1268. /* Set result info bits */
  1269. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
  1270. MVPP2_PRS_RI_L2_CAST_MASK);
  1271. /* Update tcam entry data first byte */
  1272. mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
  1273. /* Shift to ethertype */
  1274. mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
  1275. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1276. /* Mask all ports */
  1277. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1278. /* Update shadow table */
  1279. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1280. }
  1281. /* Update port mask */
  1282. mvpp2_prs_tcam_port_set(&pe, port, add);
  1283. mvpp2_prs_hw_write(priv, &pe);
  1284. }
  1285. /* Set entry for dsa packets */
  1286. static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
  1287. bool tagged, bool extend)
  1288. {
  1289. struct mvpp2_prs_entry pe;
  1290. int tid, shift;
  1291. if (extend) {
  1292. tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
  1293. shift = 8;
  1294. } else {
  1295. tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
  1296. shift = 4;
  1297. }
  1298. if (priv->prs_shadow[tid].valid) {
  1299. /* Entry exist - update port only */
  1300. pe.index = tid;
  1301. mvpp2_prs_hw_read(priv, &pe);
  1302. } else {
  1303. /* Entry doesn't exist - create new */
  1304. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1305. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
  1306. pe.index = tid;
  1307. /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
  1308. mvpp2_prs_sram_shift_set(&pe, shift,
  1309. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1310. /* Update shadow table */
  1311. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
  1312. if (tagged) {
  1313. /* Set tagged bit in DSA tag */
  1314. mvpp2_prs_tcam_data_byte_set(&pe, 0,
  1315. MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
  1316. MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
  1317. /* Clear all ai bits for next iteration */
  1318. mvpp2_prs_sram_ai_update(&pe, 0,
  1319. MVPP2_PRS_SRAM_AI_MASK);
  1320. /* If packet is tagged continue check vlans */
  1321. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  1322. } else {
  1323. /* Set result info bits to 'no vlans' */
  1324. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
  1325. MVPP2_PRS_RI_VLAN_MASK);
  1326. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  1327. }
  1328. /* Mask all ports */
  1329. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1330. }
  1331. /* Update port mask */
  1332. mvpp2_prs_tcam_port_set(&pe, port, add);
  1333. mvpp2_prs_hw_write(priv, &pe);
  1334. }
  1335. /* Set entry for dsa ethertype */
  1336. static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
  1337. bool add, bool tagged, bool extend)
  1338. {
  1339. struct mvpp2_prs_entry pe;
  1340. int tid, shift, port_mask;
  1341. if (extend) {
  1342. tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
  1343. MVPP2_PE_ETYPE_EDSA_UNTAGGED;
  1344. port_mask = 0;
  1345. shift = 8;
  1346. } else {
  1347. tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
  1348. MVPP2_PE_ETYPE_DSA_UNTAGGED;
  1349. port_mask = MVPP2_PRS_PORT_MASK;
  1350. shift = 4;
  1351. }
  1352. if (priv->prs_shadow[tid].valid) {
  1353. /* Entry exist - update port only */
  1354. pe.index = tid;
  1355. mvpp2_prs_hw_read(priv, &pe);
  1356. } else {
  1357. /* Entry doesn't exist - create new */
  1358. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1359. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
  1360. pe.index = tid;
  1361. /* Set ethertype */
  1362. mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
  1363. mvpp2_prs_match_etype(&pe, 2, 0);
  1364. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
  1365. MVPP2_PRS_RI_DSA_MASK);
  1366. /* Shift ethertype + 2 byte reserved + tag*/
  1367. mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
  1368. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1369. /* Update shadow table */
  1370. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
  1371. if (tagged) {
  1372. /* Set tagged bit in DSA tag */
  1373. mvpp2_prs_tcam_data_byte_set(&pe,
  1374. MVPP2_ETH_TYPE_LEN + 2 + 3,
  1375. MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
  1376. MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
  1377. /* Clear all ai bits for next iteration */
  1378. mvpp2_prs_sram_ai_update(&pe, 0,
  1379. MVPP2_PRS_SRAM_AI_MASK);
  1380. /* If packet is tagged continue check vlans */
  1381. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  1382. } else {
  1383. /* Set result info bits to 'no vlans' */
  1384. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
  1385. MVPP2_PRS_RI_VLAN_MASK);
  1386. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  1387. }
  1388. /* Mask/unmask all ports, depending on dsa type */
  1389. mvpp2_prs_tcam_port_map_set(&pe, port_mask);
  1390. }
  1391. /* Update port mask */
  1392. mvpp2_prs_tcam_port_set(&pe, port, add);
  1393. mvpp2_prs_hw_write(priv, &pe);
  1394. }
  1395. /* Search for existing single/triple vlan entry */
  1396. static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
  1397. unsigned short tpid, int ai)
  1398. {
  1399. struct mvpp2_prs_entry *pe;
  1400. int tid;
  1401. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  1402. if (!pe)
  1403. return NULL;
  1404. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
  1405. /* Go through the all entries with MVPP2_PRS_LU_VLAN */
  1406. for (tid = MVPP2_PE_FIRST_FREE_TID;
  1407. tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
  1408. unsigned int ri_bits, ai_bits;
  1409. bool match;
  1410. if (!priv->prs_shadow[tid].valid ||
  1411. priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
  1412. continue;
  1413. pe->index = tid;
  1414. mvpp2_prs_hw_read(priv, pe);
  1415. match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
  1416. if (!match)
  1417. continue;
  1418. /* Get vlan type */
  1419. ri_bits = mvpp2_prs_sram_ri_get(pe);
  1420. ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
  1421. /* Get current ai value from tcam */
  1422. ai_bits = mvpp2_prs_tcam_ai_get(pe);
  1423. /* Clear double vlan bit */
  1424. ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
  1425. if (ai != ai_bits)
  1426. continue;
  1427. if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
  1428. ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
  1429. return pe;
  1430. }
  1431. kfree(pe);
  1432. return NULL;
  1433. }
  1434. /* Add/update single/triple vlan entry */
  1435. static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
  1436. unsigned int port_map)
  1437. {
  1438. struct mvpp2_prs_entry *pe;
  1439. int tid_aux, tid;
  1440. int ret = 0;
  1441. pe = mvpp2_prs_vlan_find(priv, tpid, ai);
  1442. if (!pe) {
  1443. /* Create new tcam entry */
  1444. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
  1445. MVPP2_PE_FIRST_FREE_TID);
  1446. if (tid < 0)
  1447. return tid;
  1448. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  1449. if (!pe)
  1450. return -ENOMEM;
  1451. /* Get last double vlan tid */
  1452. for (tid_aux = MVPP2_PE_LAST_FREE_TID;
  1453. tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
  1454. unsigned int ri_bits;
  1455. if (!priv->prs_shadow[tid_aux].valid ||
  1456. priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
  1457. continue;
  1458. pe->index = tid_aux;
  1459. mvpp2_prs_hw_read(priv, pe);
  1460. ri_bits = mvpp2_prs_sram_ri_get(pe);
  1461. if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
  1462. MVPP2_PRS_RI_VLAN_DOUBLE)
  1463. break;
  1464. }
  1465. if (tid <= tid_aux) {
  1466. ret = -EINVAL;
  1467. goto error;
  1468. }
  1469. memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
  1470. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
  1471. pe->index = tid;
  1472. mvpp2_prs_match_etype(pe, 0, tpid);
  1473. mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
  1474. /* Shift 4 bytes - skip 1 vlan tag */
  1475. mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
  1476. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1477. /* Clear all ai bits for next iteration */
  1478. mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  1479. if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
  1480. mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
  1481. MVPP2_PRS_RI_VLAN_MASK);
  1482. } else {
  1483. ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
  1484. mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
  1485. MVPP2_PRS_RI_VLAN_MASK);
  1486. }
  1487. mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
  1488. mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
  1489. }
  1490. /* Update ports' mask */
  1491. mvpp2_prs_tcam_port_map_set(pe, port_map);
  1492. mvpp2_prs_hw_write(priv, pe);
  1493. error:
  1494. kfree(pe);
  1495. return ret;
  1496. }
  1497. /* Get first free double vlan ai number */
  1498. static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
  1499. {
  1500. int i;
  1501. for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
  1502. if (!priv->prs_double_vlans[i])
  1503. return i;
  1504. }
  1505. return -EINVAL;
  1506. }
  1507. /* Search for existing double vlan entry */
  1508. static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
  1509. unsigned short tpid1,
  1510. unsigned short tpid2)
  1511. {
  1512. struct mvpp2_prs_entry *pe;
  1513. int tid;
  1514. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  1515. if (!pe)
  1516. return NULL;
  1517. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
  1518. /* Go through the all entries with MVPP2_PRS_LU_VLAN */
  1519. for (tid = MVPP2_PE_FIRST_FREE_TID;
  1520. tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
  1521. unsigned int ri_mask;
  1522. bool match;
  1523. if (!priv->prs_shadow[tid].valid ||
  1524. priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
  1525. continue;
  1526. pe->index = tid;
  1527. mvpp2_prs_hw_read(priv, pe);
  1528. match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
  1529. && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
  1530. if (!match)
  1531. continue;
  1532. ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
  1533. if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
  1534. return pe;
  1535. }
  1536. kfree(pe);
  1537. return NULL;
  1538. }
  1539. /* Add or update double vlan entry */
  1540. static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
  1541. unsigned short tpid2,
  1542. unsigned int port_map)
  1543. {
  1544. struct mvpp2_prs_entry *pe;
  1545. int tid_aux, tid, ai, ret = 0;
  1546. pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
  1547. if (!pe) {
  1548. /* Create new tcam entry */
  1549. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1550. MVPP2_PE_LAST_FREE_TID);
  1551. if (tid < 0)
  1552. return tid;
  1553. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  1554. if (!pe)
  1555. return -ENOMEM;
  1556. /* Set ai value for new double vlan entry */
  1557. ai = mvpp2_prs_double_vlan_ai_free_get(priv);
  1558. if (ai < 0) {
  1559. ret = ai;
  1560. goto error;
  1561. }
  1562. /* Get first single/triple vlan tid */
  1563. for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
  1564. tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
  1565. unsigned int ri_bits;
  1566. if (!priv->prs_shadow[tid_aux].valid ||
  1567. priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
  1568. continue;
  1569. pe->index = tid_aux;
  1570. mvpp2_prs_hw_read(priv, pe);
  1571. ri_bits = mvpp2_prs_sram_ri_get(pe);
  1572. ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
  1573. if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
  1574. ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
  1575. break;
  1576. }
  1577. if (tid >= tid_aux) {
  1578. ret = -ERANGE;
  1579. goto error;
  1580. }
  1581. memset(pe, 0, sizeof(struct mvpp2_prs_entry));
  1582. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
  1583. pe->index = tid;
  1584. priv->prs_double_vlans[ai] = true;
  1585. mvpp2_prs_match_etype(pe, 0, tpid1);
  1586. mvpp2_prs_match_etype(pe, 4, tpid2);
  1587. mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
  1588. /* Shift 8 bytes - skip 2 vlan tags */
  1589. mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
  1590. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1591. mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
  1592. MVPP2_PRS_RI_VLAN_MASK);
  1593. mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
  1594. MVPP2_PRS_SRAM_AI_MASK);
  1595. mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
  1596. }
  1597. /* Update ports' mask */
  1598. mvpp2_prs_tcam_port_map_set(pe, port_map);
  1599. mvpp2_prs_hw_write(priv, pe);
  1600. error:
  1601. kfree(pe);
  1602. return ret;
  1603. }
  1604. /* IPv4 header parsing for fragmentation and L4 offset */
  1605. static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
  1606. unsigned int ri, unsigned int ri_mask)
  1607. {
  1608. struct mvpp2_prs_entry pe;
  1609. int tid;
  1610. if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
  1611. (proto != IPPROTO_IGMP))
  1612. return -EINVAL;
  1613. /* Fragmented packet */
  1614. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1615. MVPP2_PE_LAST_FREE_TID);
  1616. if (tid < 0)
  1617. return tid;
  1618. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1619. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1620. pe.index = tid;
  1621. /* Set next lu to IPv4 */
  1622. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1623. mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1624. /* Set L4 offset */
  1625. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  1626. sizeof(struct iphdr) - 4,
  1627. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1628. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  1629. MVPP2_PRS_IPV4_DIP_AI_BIT);
  1630. mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
  1631. ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
  1632. mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
  1633. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
  1634. /* Unmask all ports */
  1635. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1636. /* Update shadow table and hw entry */
  1637. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1638. mvpp2_prs_hw_write(priv, &pe);
  1639. /* Not fragmented packet */
  1640. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1641. MVPP2_PE_LAST_FREE_TID);
  1642. if (tid < 0)
  1643. return tid;
  1644. pe.index = tid;
  1645. /* Clear ri before updating */
  1646. pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
  1647. pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
  1648. mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
  1649. mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
  1650. mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
  1651. /* Update shadow table and hw entry */
  1652. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1653. mvpp2_prs_hw_write(priv, &pe);
  1654. return 0;
  1655. }
  1656. /* IPv4 L3 multicast or broadcast */
  1657. static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
  1658. {
  1659. struct mvpp2_prs_entry pe;
  1660. int mask, tid;
  1661. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1662. MVPP2_PE_LAST_FREE_TID);
  1663. if (tid < 0)
  1664. return tid;
  1665. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1666. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1667. pe.index = tid;
  1668. switch (l3_cast) {
  1669. case MVPP2_PRS_L3_MULTI_CAST:
  1670. mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
  1671. MVPP2_PRS_IPV4_MC_MASK);
  1672. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
  1673. MVPP2_PRS_RI_L3_ADDR_MASK);
  1674. break;
  1675. case MVPP2_PRS_L3_BROAD_CAST:
  1676. mask = MVPP2_PRS_IPV4_BC_MASK;
  1677. mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
  1678. mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
  1679. mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
  1680. mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
  1681. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
  1682. MVPP2_PRS_RI_L3_ADDR_MASK);
  1683. break;
  1684. default:
  1685. return -EINVAL;
  1686. }
  1687. /* Finished: go to flowid generation */
  1688. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1689. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1690. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  1691. MVPP2_PRS_IPV4_DIP_AI_BIT);
  1692. /* Unmask all ports */
  1693. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1694. /* Update shadow table and hw entry */
  1695. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1696. mvpp2_prs_hw_write(priv, &pe);
  1697. return 0;
  1698. }
  1699. /* Set entries for protocols over IPv6 */
  1700. static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
  1701. unsigned int ri, unsigned int ri_mask)
  1702. {
  1703. struct mvpp2_prs_entry pe;
  1704. int tid;
  1705. if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
  1706. (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
  1707. return -EINVAL;
  1708. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1709. MVPP2_PE_LAST_FREE_TID);
  1710. if (tid < 0)
  1711. return tid;
  1712. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1713. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1714. pe.index = tid;
  1715. /* Finished: go to flowid generation */
  1716. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1717. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1718. mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
  1719. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  1720. sizeof(struct ipv6hdr) - 6,
  1721. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1722. mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
  1723. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  1724. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  1725. /* Unmask all ports */
  1726. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1727. /* Write HW */
  1728. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
  1729. mvpp2_prs_hw_write(priv, &pe);
  1730. return 0;
  1731. }
  1732. /* IPv6 L3 multicast entry */
  1733. static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
  1734. {
  1735. struct mvpp2_prs_entry pe;
  1736. int tid;
  1737. if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
  1738. return -EINVAL;
  1739. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1740. MVPP2_PE_LAST_FREE_TID);
  1741. if (tid < 0)
  1742. return tid;
  1743. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1744. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1745. pe.index = tid;
  1746. /* Finished: go to flowid generation */
  1747. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1748. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
  1749. MVPP2_PRS_RI_L3_ADDR_MASK);
  1750. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  1751. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  1752. /* Shift back to IPv6 NH */
  1753. mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1754. mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
  1755. MVPP2_PRS_IPV6_MC_MASK);
  1756. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  1757. /* Unmask all ports */
  1758. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1759. /* Update shadow table and hw entry */
  1760. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
  1761. mvpp2_prs_hw_write(priv, &pe);
  1762. return 0;
  1763. }
  1764. /* Parser per-port initialization */
  1765. static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
  1766. int lu_max, int offset)
  1767. {
  1768. u32 val;
  1769. /* Set lookup ID */
  1770. val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
  1771. val &= ~MVPP2_PRS_PORT_LU_MASK(port);
  1772. val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
  1773. mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
  1774. /* Set maximum number of loops for packet received from port */
  1775. val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
  1776. val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
  1777. val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
  1778. mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
  1779. /* Set initial offset for packet header extraction for the first
  1780. * searching loop
  1781. */
  1782. val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
  1783. val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
  1784. val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
  1785. mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
  1786. }
  1787. /* Default flow entries initialization for all ports */
  1788. static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
  1789. {
  1790. struct mvpp2_prs_entry pe;
  1791. int port;
  1792. for (port = 0; port < MVPP2_MAX_PORTS; port++) {
  1793. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1794. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1795. pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
  1796. /* Mask all ports */
  1797. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1798. /* Set flow ID*/
  1799. mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
  1800. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
  1801. /* Update shadow table and hw entry */
  1802. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
  1803. mvpp2_prs_hw_write(priv, &pe);
  1804. }
  1805. }
  1806. /* Set default entry for Marvell Header field */
  1807. static void mvpp2_prs_mh_init(struct mvpp2 *priv)
  1808. {
  1809. struct mvpp2_prs_entry pe;
  1810. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1811. pe.index = MVPP2_PE_MH_DEFAULT;
  1812. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
  1813. mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
  1814. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1815. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1816. /* Unmask all ports */
  1817. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1818. /* Update shadow table and hw entry */
  1819. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
  1820. mvpp2_prs_hw_write(priv, &pe);
  1821. }
  1822. /* Set default entires (place holder) for promiscuous, non-promiscuous and
  1823. * multicast MAC addresses
  1824. */
  1825. static void mvpp2_prs_mac_init(struct mvpp2 *priv)
  1826. {
  1827. struct mvpp2_prs_entry pe;
  1828. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1829. /* Non-promiscuous mode for all ports - DROP unknown packets */
  1830. pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
  1831. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1832. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
  1833. MVPP2_PRS_RI_DROP_MASK);
  1834. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1835. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1836. /* Unmask all ports */
  1837. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1838. /* Update shadow table and hw entry */
  1839. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1840. mvpp2_prs_hw_write(priv, &pe);
  1841. /* place holders only - no ports */
  1842. mvpp2_prs_mac_drop_all_set(priv, 0, false);
  1843. mvpp2_prs_mac_promisc_set(priv, 0, false);
  1844. mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
  1845. mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
  1846. }
  1847. /* Set default entries for various types of dsa packets */
  1848. static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
  1849. {
  1850. struct mvpp2_prs_entry pe;
  1851. /* None tagged EDSA entry - place holder */
  1852. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
  1853. MVPP2_PRS_EDSA);
  1854. /* Tagged EDSA entry - place holder */
  1855. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  1856. /* None tagged DSA entry - place holder */
  1857. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
  1858. MVPP2_PRS_DSA);
  1859. /* Tagged DSA entry - place holder */
  1860. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  1861. /* None tagged EDSA ethertype entry - place holder*/
  1862. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
  1863. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  1864. /* Tagged EDSA ethertype entry - place holder*/
  1865. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
  1866. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  1867. /* None tagged DSA ethertype entry */
  1868. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
  1869. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  1870. /* Tagged DSA ethertype entry */
  1871. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
  1872. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  1873. /* Set default entry, in case DSA or EDSA tag not found */
  1874. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1875. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
  1876. pe.index = MVPP2_PE_DSA_DEFAULT;
  1877. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  1878. /* Shift 0 bytes */
  1879. mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1880. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1881. /* Clear all sram ai bits for next iteration */
  1882. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  1883. /* Unmask all ports */
  1884. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1885. mvpp2_prs_hw_write(priv, &pe);
  1886. }
  1887. /* Match basic ethertypes */
  1888. static int mvpp2_prs_etype_init(struct mvpp2 *priv)
  1889. {
  1890. struct mvpp2_prs_entry pe;
  1891. int tid;
  1892. /* Ethertype: PPPoE */
  1893. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1894. MVPP2_PE_LAST_FREE_TID);
  1895. if (tid < 0)
  1896. return tid;
  1897. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1898. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1899. pe.index = tid;
  1900. mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
  1901. mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
  1902. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1903. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  1904. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
  1905. MVPP2_PRS_RI_PPPOE_MASK);
  1906. /* Update shadow table and hw entry */
  1907. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1908. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1909. priv->prs_shadow[pe.index].finish = false;
  1910. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
  1911. MVPP2_PRS_RI_PPPOE_MASK);
  1912. mvpp2_prs_hw_write(priv, &pe);
  1913. /* Ethertype: ARP */
  1914. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1915. MVPP2_PE_LAST_FREE_TID);
  1916. if (tid < 0)
  1917. return tid;
  1918. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1919. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1920. pe.index = tid;
  1921. mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
  1922. /* Generate flow in the next iteration*/
  1923. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1924. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1925. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
  1926. MVPP2_PRS_RI_L3_PROTO_MASK);
  1927. /* Set L3 offset */
  1928. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1929. MVPP2_ETH_TYPE_LEN,
  1930. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1931. /* Update shadow table and hw entry */
  1932. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1933. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1934. priv->prs_shadow[pe.index].finish = true;
  1935. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
  1936. MVPP2_PRS_RI_L3_PROTO_MASK);
  1937. mvpp2_prs_hw_write(priv, &pe);
  1938. /* Ethertype: LBTD */
  1939. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1940. MVPP2_PE_LAST_FREE_TID);
  1941. if (tid < 0)
  1942. return tid;
  1943. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1944. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1945. pe.index = tid;
  1946. mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
  1947. /* Generate flow in the next iteration*/
  1948. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1949. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1950. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  1951. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  1952. MVPP2_PRS_RI_CPU_CODE_MASK |
  1953. MVPP2_PRS_RI_UDF3_MASK);
  1954. /* Set L3 offset */
  1955. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1956. MVPP2_ETH_TYPE_LEN,
  1957. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1958. /* Update shadow table and hw entry */
  1959. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1960. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1961. priv->prs_shadow[pe.index].finish = true;
  1962. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  1963. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  1964. MVPP2_PRS_RI_CPU_CODE_MASK |
  1965. MVPP2_PRS_RI_UDF3_MASK);
  1966. mvpp2_prs_hw_write(priv, &pe);
  1967. /* Ethertype: IPv4 without options */
  1968. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1969. MVPP2_PE_LAST_FREE_TID);
  1970. if (tid < 0)
  1971. return tid;
  1972. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1973. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1974. pe.index = tid;
  1975. mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
  1976. mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
  1977. MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
  1978. MVPP2_PRS_IPV4_HEAD_MASK |
  1979. MVPP2_PRS_IPV4_IHL_MASK);
  1980. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1981. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
  1982. MVPP2_PRS_RI_L3_PROTO_MASK);
  1983. /* Skip eth_type + 4 bytes of IP header */
  1984. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
  1985. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1986. /* Set L3 offset */
  1987. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1988. MVPP2_ETH_TYPE_LEN,
  1989. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1990. /* Update shadow table and hw entry */
  1991. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1992. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1993. priv->prs_shadow[pe.index].finish = false;
  1994. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
  1995. MVPP2_PRS_RI_L3_PROTO_MASK);
  1996. mvpp2_prs_hw_write(priv, &pe);
  1997. /* Ethertype: IPv4 with options */
  1998. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1999. MVPP2_PE_LAST_FREE_TID);
  2000. if (tid < 0)
  2001. return tid;
  2002. pe.index = tid;
  2003. /* Clear tcam data before updating */
  2004. pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
  2005. pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
  2006. mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
  2007. MVPP2_PRS_IPV4_HEAD,
  2008. MVPP2_PRS_IPV4_HEAD_MASK);
  2009. /* Clear ri before updating */
  2010. pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
  2011. pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
  2012. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
  2013. MVPP2_PRS_RI_L3_PROTO_MASK);
  2014. /* Update shadow table and hw entry */
  2015. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  2016. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  2017. priv->prs_shadow[pe.index].finish = false;
  2018. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
  2019. MVPP2_PRS_RI_L3_PROTO_MASK);
  2020. mvpp2_prs_hw_write(priv, &pe);
  2021. /* Ethertype: IPv6 without options */
  2022. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  2023. MVPP2_PE_LAST_FREE_TID);
  2024. if (tid < 0)
  2025. return tid;
  2026. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2027. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  2028. pe.index = tid;
  2029. mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
  2030. /* Skip DIP of IPV6 header */
  2031. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
  2032. MVPP2_MAX_L3_ADDR_SIZE,
  2033. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  2034. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  2035. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
  2036. MVPP2_PRS_RI_L3_PROTO_MASK);
  2037. /* Set L3 offset */
  2038. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  2039. MVPP2_ETH_TYPE_LEN,
  2040. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  2041. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  2042. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  2043. priv->prs_shadow[pe.index].finish = false;
  2044. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
  2045. MVPP2_PRS_RI_L3_PROTO_MASK);
  2046. mvpp2_prs_hw_write(priv, &pe);
  2047. /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
  2048. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2049. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  2050. pe.index = MVPP2_PE_ETH_TYPE_UN;
  2051. /* Unmask all ports */
  2052. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2053. /* Generate flow in the next iteration*/
  2054. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  2055. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  2056. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
  2057. MVPP2_PRS_RI_L3_PROTO_MASK);
  2058. /* Set L3 offset even it's unknown L3 */
  2059. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  2060. MVPP2_ETH_TYPE_LEN,
  2061. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  2062. /* Update shadow table and hw entry */
  2063. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  2064. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  2065. priv->prs_shadow[pe.index].finish = true;
  2066. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
  2067. MVPP2_PRS_RI_L3_PROTO_MASK);
  2068. mvpp2_prs_hw_write(priv, &pe);
  2069. return 0;
  2070. }
  2071. /* Configure vlan entries and detect up to 2 successive VLAN tags.
  2072. * Possible options:
  2073. * 0x8100, 0x88A8
  2074. * 0x8100, 0x8100
  2075. * 0x8100
  2076. * 0x88A8
  2077. */
  2078. static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
  2079. {
  2080. struct mvpp2_prs_entry pe;
  2081. int err;
  2082. priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
  2083. MVPP2_PRS_DBL_VLANS_MAX,
  2084. GFP_KERNEL);
  2085. if (!priv->prs_double_vlans)
  2086. return -ENOMEM;
  2087. /* Double VLAN: 0x8100, 0x88A8 */
  2088. err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
  2089. MVPP2_PRS_PORT_MASK);
  2090. if (err)
  2091. return err;
  2092. /* Double VLAN: 0x8100, 0x8100 */
  2093. err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
  2094. MVPP2_PRS_PORT_MASK);
  2095. if (err)
  2096. return err;
  2097. /* Single VLAN: 0x88a8 */
  2098. err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
  2099. MVPP2_PRS_PORT_MASK);
  2100. if (err)
  2101. return err;
  2102. /* Single VLAN: 0x8100 */
  2103. err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
  2104. MVPP2_PRS_PORT_MASK);
  2105. if (err)
  2106. return err;
  2107. /* Set default double vlan entry */
  2108. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2109. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  2110. pe.index = MVPP2_PE_VLAN_DBL;
  2111. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  2112. /* Clear ai for next iterations */
  2113. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  2114. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
  2115. MVPP2_PRS_RI_VLAN_MASK);
  2116. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
  2117. MVPP2_PRS_DBL_VLAN_AI_BIT);
  2118. /* Unmask all ports */
  2119. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2120. /* Update shadow table and hw entry */
  2121. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
  2122. mvpp2_prs_hw_write(priv, &pe);
  2123. /* Set default vlan none entry */
  2124. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2125. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  2126. pe.index = MVPP2_PE_VLAN_NONE;
  2127. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  2128. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
  2129. MVPP2_PRS_RI_VLAN_MASK);
  2130. /* Unmask all ports */
  2131. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2132. /* Update shadow table and hw entry */
  2133. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
  2134. mvpp2_prs_hw_write(priv, &pe);
  2135. return 0;
  2136. }
  2137. /* Set entries for PPPoE ethertype */
  2138. static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
  2139. {
  2140. struct mvpp2_prs_entry pe;
  2141. int tid;
  2142. /* IPv4 over PPPoE with options */
  2143. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  2144. MVPP2_PE_LAST_FREE_TID);
  2145. if (tid < 0)
  2146. return tid;
  2147. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2148. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  2149. pe.index = tid;
  2150. mvpp2_prs_match_etype(&pe, 0, PPP_IP);
  2151. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  2152. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
  2153. MVPP2_PRS_RI_L3_PROTO_MASK);
  2154. /* Skip eth_type + 4 bytes of IP header */
  2155. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
  2156. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  2157. /* Set L3 offset */
  2158. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  2159. MVPP2_ETH_TYPE_LEN,
  2160. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  2161. /* Update shadow table and hw entry */
  2162. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  2163. mvpp2_prs_hw_write(priv, &pe);
  2164. /* IPv4 over PPPoE without options */
  2165. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  2166. MVPP2_PE_LAST_FREE_TID);
  2167. if (tid < 0)
  2168. return tid;
  2169. pe.index = tid;
  2170. mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
  2171. MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
  2172. MVPP2_PRS_IPV4_HEAD_MASK |
  2173. MVPP2_PRS_IPV4_IHL_MASK);
  2174. /* Clear ri before updating */
  2175. pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
  2176. pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
  2177. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
  2178. MVPP2_PRS_RI_L3_PROTO_MASK);
  2179. /* Update shadow table and hw entry */
  2180. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  2181. mvpp2_prs_hw_write(priv, &pe);
  2182. /* IPv6 over PPPoE */
  2183. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  2184. MVPP2_PE_LAST_FREE_TID);
  2185. if (tid < 0)
  2186. return tid;
  2187. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2188. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  2189. pe.index = tid;
  2190. mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
  2191. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  2192. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
  2193. MVPP2_PRS_RI_L3_PROTO_MASK);
  2194. /* Skip eth_type + 4 bytes of IPv6 header */
  2195. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
  2196. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  2197. /* Set L3 offset */
  2198. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  2199. MVPP2_ETH_TYPE_LEN,
  2200. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  2201. /* Update shadow table and hw entry */
  2202. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  2203. mvpp2_prs_hw_write(priv, &pe);
  2204. /* Non-IP over PPPoE */
  2205. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  2206. MVPP2_PE_LAST_FREE_TID);
  2207. if (tid < 0)
  2208. return tid;
  2209. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2210. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  2211. pe.index = tid;
  2212. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
  2213. MVPP2_PRS_RI_L3_PROTO_MASK);
  2214. /* Finished: go to flowid generation */
  2215. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  2216. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  2217. /* Set L3 offset even if it's unknown L3 */
  2218. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  2219. MVPP2_ETH_TYPE_LEN,
  2220. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  2221. /* Update shadow table and hw entry */
  2222. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  2223. mvpp2_prs_hw_write(priv, &pe);
  2224. return 0;
  2225. }
  2226. /* Initialize entries for IPv4 */
  2227. static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
  2228. {
  2229. struct mvpp2_prs_entry pe;
  2230. int err;
  2231. /* Set entries for TCP, UDP and IGMP over IPv4 */
  2232. err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
  2233. MVPP2_PRS_RI_L4_PROTO_MASK);
  2234. if (err)
  2235. return err;
  2236. err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
  2237. MVPP2_PRS_RI_L4_PROTO_MASK);
  2238. if (err)
  2239. return err;
  2240. err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
  2241. MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  2242. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  2243. MVPP2_PRS_RI_CPU_CODE_MASK |
  2244. MVPP2_PRS_RI_UDF3_MASK);
  2245. if (err)
  2246. return err;
  2247. /* IPv4 Broadcast */
  2248. err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
  2249. if (err)
  2250. return err;
  2251. /* IPv4 Multicast */
  2252. err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
  2253. if (err)
  2254. return err;
  2255. /* Default IPv4 entry for unknown protocols */
  2256. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2257. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  2258. pe.index = MVPP2_PE_IP4_PROTO_UN;
  2259. /* Set next lu to IPv4 */
  2260. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  2261. mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  2262. /* Set L4 offset */
  2263. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  2264. sizeof(struct iphdr) - 4,
  2265. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  2266. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  2267. MVPP2_PRS_IPV4_DIP_AI_BIT);
  2268. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
  2269. MVPP2_PRS_RI_L4_PROTO_MASK);
  2270. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
  2271. /* Unmask all ports */
  2272. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2273. /* Update shadow table and hw entry */
  2274. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  2275. mvpp2_prs_hw_write(priv, &pe);
  2276. /* Default IPv4 entry for unicast address */
  2277. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2278. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  2279. pe.index = MVPP2_PE_IP4_ADDR_UN;
  2280. /* Finished: go to flowid generation */
  2281. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  2282. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  2283. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
  2284. MVPP2_PRS_RI_L3_ADDR_MASK);
  2285. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  2286. MVPP2_PRS_IPV4_DIP_AI_BIT);
  2287. /* Unmask all ports */
  2288. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2289. /* Update shadow table and hw entry */
  2290. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  2291. mvpp2_prs_hw_write(priv, &pe);
  2292. return 0;
  2293. }
  2294. /* Initialize entries for IPv6 */
  2295. static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
  2296. {
  2297. struct mvpp2_prs_entry pe;
  2298. int tid, err;
  2299. /* Set entries for TCP, UDP and ICMP over IPv6 */
  2300. err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
  2301. MVPP2_PRS_RI_L4_TCP,
  2302. MVPP2_PRS_RI_L4_PROTO_MASK);
  2303. if (err)
  2304. return err;
  2305. err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
  2306. MVPP2_PRS_RI_L4_UDP,
  2307. MVPP2_PRS_RI_L4_PROTO_MASK);
  2308. if (err)
  2309. return err;
  2310. err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
  2311. MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  2312. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  2313. MVPP2_PRS_RI_CPU_CODE_MASK |
  2314. MVPP2_PRS_RI_UDF3_MASK);
  2315. if (err)
  2316. return err;
  2317. /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
  2318. /* Result Info: UDF7=1, DS lite */
  2319. err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
  2320. MVPP2_PRS_RI_UDF7_IP6_LITE,
  2321. MVPP2_PRS_RI_UDF7_MASK);
  2322. if (err)
  2323. return err;
  2324. /* IPv6 multicast */
  2325. err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
  2326. if (err)
  2327. return err;
  2328. /* Entry for checking hop limit */
  2329. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  2330. MVPP2_PE_LAST_FREE_TID);
  2331. if (tid < 0)
  2332. return tid;
  2333. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2334. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  2335. pe.index = tid;
  2336. /* Finished: go to flowid generation */
  2337. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  2338. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  2339. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
  2340. MVPP2_PRS_RI_DROP_MASK,
  2341. MVPP2_PRS_RI_L3_PROTO_MASK |
  2342. MVPP2_PRS_RI_DROP_MASK);
  2343. mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
  2344. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  2345. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  2346. /* Update shadow table and hw entry */
  2347. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  2348. mvpp2_prs_hw_write(priv, &pe);
  2349. /* Default IPv6 entry for unknown protocols */
  2350. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2351. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  2352. pe.index = MVPP2_PE_IP6_PROTO_UN;
  2353. /* Finished: go to flowid generation */
  2354. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  2355. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  2356. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
  2357. MVPP2_PRS_RI_L4_PROTO_MASK);
  2358. /* Set L4 offset relatively to our current place */
  2359. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  2360. sizeof(struct ipv6hdr) - 4,
  2361. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  2362. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  2363. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  2364. /* Unmask all ports */
  2365. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2366. /* Update shadow table and hw entry */
  2367. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  2368. mvpp2_prs_hw_write(priv, &pe);
  2369. /* Default IPv6 entry for unknown ext protocols */
  2370. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2371. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  2372. pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
  2373. /* Finished: go to flowid generation */
  2374. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  2375. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  2376. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
  2377. MVPP2_PRS_RI_L4_PROTO_MASK);
  2378. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
  2379. MVPP2_PRS_IPV6_EXT_AI_BIT);
  2380. /* Unmask all ports */
  2381. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2382. /* Update shadow table and hw entry */
  2383. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  2384. mvpp2_prs_hw_write(priv, &pe);
  2385. /* Default IPv6 entry for unicast address */
  2386. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  2387. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  2388. pe.index = MVPP2_PE_IP6_ADDR_UN;
  2389. /* Finished: go to IPv6 again */
  2390. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  2391. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
  2392. MVPP2_PRS_RI_L3_ADDR_MASK);
  2393. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  2394. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  2395. /* Shift back to IPV6 NH */
  2396. mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  2397. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  2398. /* Unmask all ports */
  2399. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  2400. /* Update shadow table and hw entry */
  2401. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
  2402. mvpp2_prs_hw_write(priv, &pe);
  2403. return 0;
  2404. }
  2405. /* Parser default initialization */
  2406. static int mvpp2_prs_default_init(struct platform_device *pdev,
  2407. struct mvpp2 *priv)
  2408. {
  2409. int err, index, i;
  2410. /* Enable tcam table */
  2411. mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
  2412. /* Clear all tcam and sram entries */
  2413. for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
  2414. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
  2415. for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  2416. mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
  2417. mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
  2418. for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  2419. mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
  2420. }
  2421. /* Invalidate all tcam entries */
  2422. for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
  2423. mvpp2_prs_hw_inv(priv, index);
  2424. priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
  2425. sizeof(struct mvpp2_prs_shadow),
  2426. GFP_KERNEL);
  2427. if (!priv->prs_shadow)
  2428. return -ENOMEM;
  2429. /* Always start from lookup = 0 */
  2430. for (index = 0; index < MVPP2_MAX_PORTS; index++)
  2431. mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
  2432. MVPP2_PRS_PORT_LU_MAX, 0);
  2433. mvpp2_prs_def_flow_init(priv);
  2434. mvpp2_prs_mh_init(priv);
  2435. mvpp2_prs_mac_init(priv);
  2436. mvpp2_prs_dsa_init(priv);
  2437. err = mvpp2_prs_etype_init(priv);
  2438. if (err)
  2439. return err;
  2440. err = mvpp2_prs_vlan_init(pdev, priv);
  2441. if (err)
  2442. return err;
  2443. err = mvpp2_prs_pppoe_init(priv);
  2444. if (err)
  2445. return err;
  2446. err = mvpp2_prs_ip6_init(priv);
  2447. if (err)
  2448. return err;
  2449. err = mvpp2_prs_ip4_init(priv);
  2450. if (err)
  2451. return err;
  2452. return 0;
  2453. }
  2454. /* Compare MAC DA with tcam entry data */
  2455. static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
  2456. const u8 *da, unsigned char *mask)
  2457. {
  2458. unsigned char tcam_byte, tcam_mask;
  2459. int index;
  2460. for (index = 0; index < ETH_ALEN; index++) {
  2461. mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
  2462. if (tcam_mask != mask[index])
  2463. return false;
  2464. if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
  2465. return false;
  2466. }
  2467. return true;
  2468. }
  2469. /* Find tcam entry with matched pair <MAC DA, port> */
  2470. static struct mvpp2_prs_entry *
  2471. mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
  2472. unsigned char *mask, int udf_type)
  2473. {
  2474. struct mvpp2_prs_entry *pe;
  2475. int tid;
  2476. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  2477. if (!pe)
  2478. return NULL;
  2479. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
  2480. /* Go through the all entires with MVPP2_PRS_LU_MAC */
  2481. for (tid = MVPP2_PE_FIRST_FREE_TID;
  2482. tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
  2483. unsigned int entry_pmap;
  2484. if (!priv->prs_shadow[tid].valid ||
  2485. (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
  2486. (priv->prs_shadow[tid].udf != udf_type))
  2487. continue;
  2488. pe->index = tid;
  2489. mvpp2_prs_hw_read(priv, pe);
  2490. entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
  2491. if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
  2492. entry_pmap == pmap)
  2493. return pe;
  2494. }
  2495. kfree(pe);
  2496. return NULL;
  2497. }
  2498. /* Update parser's mac da entry */
  2499. static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
  2500. const u8 *da, bool add)
  2501. {
  2502. struct mvpp2_prs_entry *pe;
  2503. unsigned int pmap, len, ri;
  2504. unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  2505. int tid;
  2506. /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
  2507. pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
  2508. MVPP2_PRS_UDF_MAC_DEF);
  2509. /* No such entry */
  2510. if (!pe) {
  2511. if (!add)
  2512. return 0;
  2513. /* Create new TCAM entry */
  2514. /* Find first range mac entry*/
  2515. for (tid = MVPP2_PE_FIRST_FREE_TID;
  2516. tid <= MVPP2_PE_LAST_FREE_TID; tid++)
  2517. if (priv->prs_shadow[tid].valid &&
  2518. (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
  2519. (priv->prs_shadow[tid].udf ==
  2520. MVPP2_PRS_UDF_MAC_RANGE))
  2521. break;
  2522. /* Go through the all entries from first to last */
  2523. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  2524. tid - 1);
  2525. if (tid < 0)
  2526. return tid;
  2527. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  2528. if (!pe)
  2529. return -1;
  2530. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
  2531. pe->index = tid;
  2532. /* Mask all ports */
  2533. mvpp2_prs_tcam_port_map_set(pe, 0);
  2534. }
  2535. /* Update port mask */
  2536. mvpp2_prs_tcam_port_set(pe, port, add);
  2537. /* Invalidate the entry if no ports are left enabled */
  2538. pmap = mvpp2_prs_tcam_port_map_get(pe);
  2539. if (pmap == 0) {
  2540. if (add) {
  2541. kfree(pe);
  2542. return -1;
  2543. }
  2544. mvpp2_prs_hw_inv(priv, pe->index);
  2545. priv->prs_shadow[pe->index].valid = false;
  2546. kfree(pe);
  2547. return 0;
  2548. }
  2549. /* Continue - set next lookup */
  2550. mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
  2551. /* Set match on DA */
  2552. len = ETH_ALEN;
  2553. while (len--)
  2554. mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
  2555. /* Set result info bits */
  2556. if (is_broadcast_ether_addr(da))
  2557. ri = MVPP2_PRS_RI_L2_BCAST;
  2558. else if (is_multicast_ether_addr(da))
  2559. ri = MVPP2_PRS_RI_L2_MCAST;
  2560. else
  2561. ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
  2562. mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
  2563. MVPP2_PRS_RI_MAC_ME_MASK);
  2564. mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
  2565. MVPP2_PRS_RI_MAC_ME_MASK);
  2566. /* Shift to ethertype */
  2567. mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
  2568. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  2569. /* Update shadow table and hw entry */
  2570. priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
  2571. mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
  2572. mvpp2_prs_hw_write(priv, pe);
  2573. kfree(pe);
  2574. return 0;
  2575. }
  2576. static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
  2577. {
  2578. struct mvpp2_port *port = netdev_priv(dev);
  2579. int err;
  2580. /* Remove old parser entry */
  2581. err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
  2582. false);
  2583. if (err)
  2584. return err;
  2585. /* Add new parser entry */
  2586. err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
  2587. if (err)
  2588. return err;
  2589. /* Set addr in the device */
  2590. ether_addr_copy(dev->dev_addr, da);
  2591. return 0;
  2592. }
  2593. /* Delete all port's multicast simple (not range) entries */
  2594. static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
  2595. {
  2596. struct mvpp2_prs_entry pe;
  2597. int index, tid;
  2598. for (tid = MVPP2_PE_FIRST_FREE_TID;
  2599. tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
  2600. unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
  2601. if (!priv->prs_shadow[tid].valid ||
  2602. (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
  2603. (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
  2604. continue;
  2605. /* Only simple mac entries */
  2606. pe.index = tid;
  2607. mvpp2_prs_hw_read(priv, &pe);
  2608. /* Read mac addr from entry */
  2609. for (index = 0; index < ETH_ALEN; index++)
  2610. mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
  2611. &da_mask[index]);
  2612. if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
  2613. /* Delete this entry */
  2614. mvpp2_prs_mac_da_accept(priv, port, da, false);
  2615. }
  2616. }
  2617. static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
  2618. {
  2619. switch (type) {
  2620. case MVPP2_TAG_TYPE_EDSA:
  2621. /* Add port to EDSA entries */
  2622. mvpp2_prs_dsa_tag_set(priv, port, true,
  2623. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  2624. mvpp2_prs_dsa_tag_set(priv, port, true,
  2625. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  2626. /* Remove port from DSA entries */
  2627. mvpp2_prs_dsa_tag_set(priv, port, false,
  2628. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  2629. mvpp2_prs_dsa_tag_set(priv, port, false,
  2630. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  2631. break;
  2632. case MVPP2_TAG_TYPE_DSA:
  2633. /* Add port to DSA entries */
  2634. mvpp2_prs_dsa_tag_set(priv, port, true,
  2635. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  2636. mvpp2_prs_dsa_tag_set(priv, port, true,
  2637. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  2638. /* Remove port from EDSA entries */
  2639. mvpp2_prs_dsa_tag_set(priv, port, false,
  2640. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  2641. mvpp2_prs_dsa_tag_set(priv, port, false,
  2642. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  2643. break;
  2644. case MVPP2_TAG_TYPE_MH:
  2645. case MVPP2_TAG_TYPE_NONE:
  2646. /* Remove port form EDSA and DSA entries */
  2647. mvpp2_prs_dsa_tag_set(priv, port, false,
  2648. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  2649. mvpp2_prs_dsa_tag_set(priv, port, false,
  2650. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  2651. mvpp2_prs_dsa_tag_set(priv, port, false,
  2652. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  2653. mvpp2_prs_dsa_tag_set(priv, port, false,
  2654. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  2655. break;
  2656. default:
  2657. if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
  2658. return -EINVAL;
  2659. }
  2660. return 0;
  2661. }
  2662. /* Set prs flow for the port */
  2663. static int mvpp2_prs_def_flow(struct mvpp2_port *port)
  2664. {
  2665. struct mvpp2_prs_entry *pe;
  2666. int tid;
  2667. pe = mvpp2_prs_flow_find(port->priv, port->id);
  2668. /* Such entry not exist */
  2669. if (!pe) {
  2670. /* Go through the all entires from last to first */
  2671. tid = mvpp2_prs_tcam_first_free(port->priv,
  2672. MVPP2_PE_LAST_FREE_TID,
  2673. MVPP2_PE_FIRST_FREE_TID);
  2674. if (tid < 0)
  2675. return tid;
  2676. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  2677. if (!pe)
  2678. return -ENOMEM;
  2679. mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
  2680. pe->index = tid;
  2681. /* Set flow ID*/
  2682. mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
  2683. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
  2684. /* Update shadow table */
  2685. mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
  2686. }
  2687. mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
  2688. mvpp2_prs_hw_write(port->priv, pe);
  2689. kfree(pe);
  2690. return 0;
  2691. }
  2692. /* Classifier configuration routines */
  2693. /* Update classification flow table registers */
  2694. static void mvpp2_cls_flow_write(struct mvpp2 *priv,
  2695. struct mvpp2_cls_flow_entry *fe)
  2696. {
  2697. mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
  2698. mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
  2699. mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
  2700. mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
  2701. }
  2702. /* Update classification lookup table register */
  2703. static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
  2704. struct mvpp2_cls_lookup_entry *le)
  2705. {
  2706. u32 val;
  2707. val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
  2708. mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
  2709. mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
  2710. }
  2711. /* Classifier default initialization */
  2712. static void mvpp2_cls_init(struct mvpp2 *priv)
  2713. {
  2714. struct mvpp2_cls_lookup_entry le;
  2715. struct mvpp2_cls_flow_entry fe;
  2716. int index;
  2717. /* Enable classifier */
  2718. mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
  2719. /* Clear classifier flow table */
  2720. memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
  2721. for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
  2722. fe.index = index;
  2723. mvpp2_cls_flow_write(priv, &fe);
  2724. }
  2725. /* Clear classifier lookup table */
  2726. le.data = 0;
  2727. for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
  2728. le.lkpid = index;
  2729. le.way = 0;
  2730. mvpp2_cls_lookup_write(priv, &le);
  2731. le.way = 1;
  2732. mvpp2_cls_lookup_write(priv, &le);
  2733. }
  2734. }
  2735. static void mvpp2_cls_port_config(struct mvpp2_port *port)
  2736. {
  2737. struct mvpp2_cls_lookup_entry le;
  2738. u32 val;
  2739. /* Set way for the port */
  2740. val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
  2741. val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
  2742. mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
  2743. /* Pick the entry to be accessed in lookup ID decoding table
  2744. * according to the way and lkpid.
  2745. */
  2746. le.lkpid = port->id;
  2747. le.way = 0;
  2748. le.data = 0;
  2749. /* Set initial CPU queue for receiving packets */
  2750. le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
  2751. le.data |= port->first_rxq;
  2752. /* Disable classification engines */
  2753. le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
  2754. /* Update lookup ID table entry */
  2755. mvpp2_cls_lookup_write(port->priv, &le);
  2756. }
  2757. /* Set CPU queue number for oversize packets */
  2758. static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
  2759. {
  2760. u32 val;
  2761. mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
  2762. port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
  2763. mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
  2764. (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
  2765. val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
  2766. val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
  2767. mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
  2768. }
  2769. /* Buffer Manager configuration routines */
  2770. /* Create pool */
  2771. static int mvpp2_bm_pool_create(struct platform_device *pdev,
  2772. struct mvpp2 *priv,
  2773. struct mvpp2_bm_pool *bm_pool, int size)
  2774. {
  2775. int size_bytes;
  2776. u32 val;
  2777. size_bytes = sizeof(u32) * size;
  2778. bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
  2779. &bm_pool->phys_addr,
  2780. GFP_KERNEL);
  2781. if (!bm_pool->virt_addr)
  2782. return -ENOMEM;
  2783. if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
  2784. dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
  2785. bm_pool->phys_addr);
  2786. dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
  2787. bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
  2788. return -ENOMEM;
  2789. }
  2790. mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
  2791. bm_pool->phys_addr);
  2792. mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
  2793. val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
  2794. val |= MVPP2_BM_START_MASK;
  2795. mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
  2796. bm_pool->type = MVPP2_BM_FREE;
  2797. bm_pool->size = size;
  2798. bm_pool->pkt_size = 0;
  2799. bm_pool->buf_num = 0;
  2800. atomic_set(&bm_pool->in_use, 0);
  2801. return 0;
  2802. }
  2803. /* Set pool buffer size */
  2804. static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
  2805. struct mvpp2_bm_pool *bm_pool,
  2806. int buf_size)
  2807. {
  2808. u32 val;
  2809. bm_pool->buf_size = buf_size;
  2810. val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
  2811. mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
  2812. }
  2813. /* Free all buffers from the pool */
  2814. static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
  2815. {
  2816. int i;
  2817. for (i = 0; i < bm_pool->buf_num; i++) {
  2818. u32 vaddr;
  2819. /* Get buffer virtual address (indirect access) */
  2820. mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
  2821. vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
  2822. if (!vaddr)
  2823. break;
  2824. dev_kfree_skb_any((struct sk_buff *)vaddr);
  2825. }
  2826. /* Update BM driver with number of buffers removed from pool */
  2827. bm_pool->buf_num -= i;
  2828. }
  2829. /* Cleanup pool */
  2830. static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
  2831. struct mvpp2 *priv,
  2832. struct mvpp2_bm_pool *bm_pool)
  2833. {
  2834. u32 val;
  2835. mvpp2_bm_bufs_free(priv, bm_pool);
  2836. if (bm_pool->buf_num) {
  2837. WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
  2838. return 0;
  2839. }
  2840. val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
  2841. val |= MVPP2_BM_STOP_MASK;
  2842. mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
  2843. dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
  2844. bm_pool->virt_addr,
  2845. bm_pool->phys_addr);
  2846. return 0;
  2847. }
  2848. static int mvpp2_bm_pools_init(struct platform_device *pdev,
  2849. struct mvpp2 *priv)
  2850. {
  2851. int i, err, size;
  2852. struct mvpp2_bm_pool *bm_pool;
  2853. /* Create all pools with maximum size */
  2854. size = MVPP2_BM_POOL_SIZE_MAX;
  2855. for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
  2856. bm_pool = &priv->bm_pools[i];
  2857. bm_pool->id = i;
  2858. err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
  2859. if (err)
  2860. goto err_unroll_pools;
  2861. mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
  2862. }
  2863. return 0;
  2864. err_unroll_pools:
  2865. dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
  2866. for (i = i - 1; i >= 0; i--)
  2867. mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
  2868. return err;
  2869. }
  2870. static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
  2871. {
  2872. int i, err;
  2873. for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
  2874. /* Mask BM all interrupts */
  2875. mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
  2876. /* Clear BM cause register */
  2877. mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
  2878. }
  2879. /* Allocate and initialize BM pools */
  2880. priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
  2881. sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
  2882. if (!priv->bm_pools)
  2883. return -ENOMEM;
  2884. err = mvpp2_bm_pools_init(pdev, priv);
  2885. if (err < 0)
  2886. return err;
  2887. return 0;
  2888. }
  2889. /* Attach long pool to rxq */
  2890. static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
  2891. int lrxq, int long_pool)
  2892. {
  2893. u32 val;
  2894. int prxq;
  2895. /* Get queue physical ID */
  2896. prxq = port->rxqs[lrxq]->id;
  2897. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
  2898. val &= ~MVPP2_RXQ_POOL_LONG_MASK;
  2899. val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
  2900. MVPP2_RXQ_POOL_LONG_MASK);
  2901. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
  2902. }
  2903. /* Attach short pool to rxq */
  2904. static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
  2905. int lrxq, int short_pool)
  2906. {
  2907. u32 val;
  2908. int prxq;
  2909. /* Get queue physical ID */
  2910. prxq = port->rxqs[lrxq]->id;
  2911. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
  2912. val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
  2913. val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
  2914. MVPP2_RXQ_POOL_SHORT_MASK);
  2915. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
  2916. }
  2917. /* Allocate skb for BM pool */
  2918. static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port,
  2919. struct mvpp2_bm_pool *bm_pool,
  2920. dma_addr_t *buf_phys_addr,
  2921. gfp_t gfp_mask)
  2922. {
  2923. struct sk_buff *skb;
  2924. dma_addr_t phys_addr;
  2925. skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
  2926. if (!skb)
  2927. return NULL;
  2928. phys_addr = dma_map_single(port->dev->dev.parent, skb->head,
  2929. MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
  2930. DMA_FROM_DEVICE);
  2931. if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
  2932. dev_kfree_skb_any(skb);
  2933. return NULL;
  2934. }
  2935. *buf_phys_addr = phys_addr;
  2936. return skb;
  2937. }
  2938. /* Set pool number in a BM cookie */
  2939. static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
  2940. {
  2941. u32 bm;
  2942. bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
  2943. bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
  2944. return bm;
  2945. }
  2946. /* Get pool number from a BM cookie */
  2947. static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
  2948. {
  2949. return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
  2950. }
  2951. /* Release buffer to BM */
  2952. static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
  2953. u32 buf_phys_addr, u32 buf_virt_addr)
  2954. {
  2955. mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
  2956. mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
  2957. }
  2958. /* Release multicast buffer */
  2959. static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
  2960. u32 buf_phys_addr, u32 buf_virt_addr,
  2961. int mc_id)
  2962. {
  2963. u32 val = 0;
  2964. val |= (mc_id & MVPP2_BM_MC_ID_MASK);
  2965. mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
  2966. mvpp2_bm_pool_put(port, pool,
  2967. buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
  2968. buf_virt_addr);
  2969. }
  2970. /* Refill BM pool */
  2971. static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
  2972. u32 phys_addr, u32 cookie)
  2973. {
  2974. int pool = mvpp2_bm_cookie_pool_get(bm);
  2975. mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
  2976. }
  2977. /* Allocate buffers for the pool */
  2978. static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
  2979. struct mvpp2_bm_pool *bm_pool, int buf_num)
  2980. {
  2981. struct sk_buff *skb;
  2982. int i, buf_size, total_size;
  2983. u32 bm;
  2984. dma_addr_t phys_addr;
  2985. buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
  2986. total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
  2987. if (buf_num < 0 ||
  2988. (buf_num + bm_pool->buf_num > bm_pool->size)) {
  2989. netdev_err(port->dev,
  2990. "cannot allocate %d buffers for pool %d\n",
  2991. buf_num, bm_pool->id);
  2992. return 0;
  2993. }
  2994. bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
  2995. for (i = 0; i < buf_num; i++) {
  2996. skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
  2997. if (!skb)
  2998. break;
  2999. mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
  3000. }
  3001. /* Update BM driver with number of buffers added to pool */
  3002. bm_pool->buf_num += i;
  3003. bm_pool->in_use_thresh = bm_pool->buf_num / 4;
  3004. netdev_dbg(port->dev,
  3005. "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
  3006. bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
  3007. bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
  3008. netdev_dbg(port->dev,
  3009. "%s pool %d: %d of %d buffers added\n",
  3010. bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
  3011. bm_pool->id, i, buf_num);
  3012. return i;
  3013. }
  3014. /* Notify the driver that BM pool is being used as specific type and return the
  3015. * pool pointer on success
  3016. */
  3017. static struct mvpp2_bm_pool *
  3018. mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
  3019. int pkt_size)
  3020. {
  3021. struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
  3022. int num;
  3023. if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
  3024. netdev_err(port->dev, "mixing pool types is forbidden\n");
  3025. return NULL;
  3026. }
  3027. if (new_pool->type == MVPP2_BM_FREE)
  3028. new_pool->type = type;
  3029. /* Allocate buffers in case BM pool is used as long pool, but packet
  3030. * size doesn't match MTU or BM pool hasn't being used yet
  3031. */
  3032. if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
  3033. (new_pool->pkt_size == 0)) {
  3034. int pkts_num;
  3035. /* Set default buffer number or free all the buffers in case
  3036. * the pool is not empty
  3037. */
  3038. pkts_num = new_pool->buf_num;
  3039. if (pkts_num == 0)
  3040. pkts_num = type == MVPP2_BM_SWF_LONG ?
  3041. MVPP2_BM_LONG_BUF_NUM :
  3042. MVPP2_BM_SHORT_BUF_NUM;
  3043. else
  3044. mvpp2_bm_bufs_free(port->priv, new_pool);
  3045. new_pool->pkt_size = pkt_size;
  3046. /* Allocate buffers for this pool */
  3047. num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
  3048. if (num != pkts_num) {
  3049. WARN(1, "pool %d: %d of %d allocated\n",
  3050. new_pool->id, num, pkts_num);
  3051. return NULL;
  3052. }
  3053. }
  3054. mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
  3055. MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
  3056. return new_pool;
  3057. }
  3058. /* Initialize pools for swf */
  3059. static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
  3060. {
  3061. int rxq;
  3062. if (!port->pool_long) {
  3063. port->pool_long =
  3064. mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
  3065. MVPP2_BM_SWF_LONG,
  3066. port->pkt_size);
  3067. if (!port->pool_long)
  3068. return -ENOMEM;
  3069. port->pool_long->port_map |= (1 << port->id);
  3070. for (rxq = 0; rxq < rxq_number; rxq++)
  3071. mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
  3072. }
  3073. if (!port->pool_short) {
  3074. port->pool_short =
  3075. mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
  3076. MVPP2_BM_SWF_SHORT,
  3077. MVPP2_BM_SHORT_PKT_SIZE);
  3078. if (!port->pool_short)
  3079. return -ENOMEM;
  3080. port->pool_short->port_map |= (1 << port->id);
  3081. for (rxq = 0; rxq < rxq_number; rxq++)
  3082. mvpp2_rxq_short_pool_set(port, rxq,
  3083. port->pool_short->id);
  3084. }
  3085. return 0;
  3086. }
  3087. static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
  3088. {
  3089. struct mvpp2_port *port = netdev_priv(dev);
  3090. struct mvpp2_bm_pool *port_pool = port->pool_long;
  3091. int num, pkts_num = port_pool->buf_num;
  3092. int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
  3093. /* Update BM pool with new buffer size */
  3094. mvpp2_bm_bufs_free(port->priv, port_pool);
  3095. if (port_pool->buf_num) {
  3096. WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
  3097. return -EIO;
  3098. }
  3099. port_pool->pkt_size = pkt_size;
  3100. num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
  3101. if (num != pkts_num) {
  3102. WARN(1, "pool %d: %d of %d allocated\n",
  3103. port_pool->id, num, pkts_num);
  3104. return -EIO;
  3105. }
  3106. mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
  3107. MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
  3108. dev->mtu = mtu;
  3109. netdev_update_features(dev);
  3110. return 0;
  3111. }
  3112. static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
  3113. {
  3114. int cpu, cpu_mask = 0;
  3115. for_each_present_cpu(cpu)
  3116. cpu_mask |= 1 << cpu;
  3117. mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
  3118. MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
  3119. }
  3120. static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
  3121. {
  3122. int cpu, cpu_mask = 0;
  3123. for_each_present_cpu(cpu)
  3124. cpu_mask |= 1 << cpu;
  3125. mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
  3126. MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
  3127. }
  3128. /* Mask the current CPU's Rx/Tx interrupts */
  3129. static void mvpp2_interrupts_mask(void *arg)
  3130. {
  3131. struct mvpp2_port *port = arg;
  3132. mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
  3133. }
  3134. /* Unmask the current CPU's Rx/Tx interrupts */
  3135. static void mvpp2_interrupts_unmask(void *arg)
  3136. {
  3137. struct mvpp2_port *port = arg;
  3138. mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
  3139. (MVPP2_CAUSE_MISC_SUM_MASK |
  3140. MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
  3141. }
  3142. /* Port configuration routines */
  3143. static void mvpp2_port_mii_set(struct mvpp2_port *port)
  3144. {
  3145. u32 val;
  3146. val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
  3147. switch (port->phy_interface) {
  3148. case PHY_INTERFACE_MODE_SGMII:
  3149. val |= MVPP2_GMAC_INBAND_AN_MASK;
  3150. break;
  3151. case PHY_INTERFACE_MODE_RGMII:
  3152. val |= MVPP2_GMAC_PORT_RGMII_MASK;
  3153. default:
  3154. val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
  3155. }
  3156. writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
  3157. }
  3158. static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
  3159. {
  3160. u32 val;
  3161. val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  3162. val |= MVPP2_GMAC_FC_ADV_EN;
  3163. writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  3164. }
  3165. static void mvpp2_port_enable(struct mvpp2_port *port)
  3166. {
  3167. u32 val;
  3168. val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
  3169. val |= MVPP2_GMAC_PORT_EN_MASK;
  3170. val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
  3171. writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
  3172. }
  3173. static void mvpp2_port_disable(struct mvpp2_port *port)
  3174. {
  3175. u32 val;
  3176. val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
  3177. val &= ~(MVPP2_GMAC_PORT_EN_MASK);
  3178. writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
  3179. }
  3180. /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
  3181. static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
  3182. {
  3183. u32 val;
  3184. val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
  3185. ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
  3186. writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
  3187. }
  3188. /* Configure loopback port */
  3189. static void mvpp2_port_loopback_set(struct mvpp2_port *port)
  3190. {
  3191. u32 val;
  3192. val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
  3193. if (port->speed == 1000)
  3194. val |= MVPP2_GMAC_GMII_LB_EN_MASK;
  3195. else
  3196. val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
  3197. if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
  3198. val |= MVPP2_GMAC_PCS_LB_EN_MASK;
  3199. else
  3200. val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
  3201. writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
  3202. }
  3203. static void mvpp2_port_reset(struct mvpp2_port *port)
  3204. {
  3205. u32 val;
  3206. val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
  3207. ~MVPP2_GMAC_PORT_RESET_MASK;
  3208. writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
  3209. while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
  3210. MVPP2_GMAC_PORT_RESET_MASK)
  3211. continue;
  3212. }
  3213. /* Change maximum receive size of the port */
  3214. static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
  3215. {
  3216. u32 val;
  3217. val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
  3218. val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
  3219. val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
  3220. MVPP2_GMAC_MAX_RX_SIZE_OFFS);
  3221. writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
  3222. }
  3223. /* Set defaults to the MVPP2 port */
  3224. static void mvpp2_defaults_set(struct mvpp2_port *port)
  3225. {
  3226. int tx_port_num, val, queue, ptxq, lrxq;
  3227. /* Configure port to loopback if needed */
  3228. if (port->flags & MVPP2_F_LOOPBACK)
  3229. mvpp2_port_loopback_set(port);
  3230. /* Update TX FIFO MIN Threshold */
  3231. val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
  3232. val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
  3233. /* Min. TX threshold must be less than minimal packet length */
  3234. val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
  3235. writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
  3236. /* Disable Legacy WRR, Disable EJP, Release from reset */
  3237. tx_port_num = mvpp2_egress_port(port);
  3238. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
  3239. tx_port_num);
  3240. mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
  3241. /* Close bandwidth for all queues */
  3242. for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
  3243. ptxq = mvpp2_txq_phys(port->id, queue);
  3244. mvpp2_write(port->priv,
  3245. MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
  3246. }
  3247. /* Set refill period to 1 usec, refill tokens
  3248. * and bucket size to maximum
  3249. */
  3250. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
  3251. port->priv->tclk / USEC_PER_SEC);
  3252. val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
  3253. val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
  3254. val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
  3255. val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
  3256. mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
  3257. val = MVPP2_TXP_TOKEN_SIZE_MAX;
  3258. mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
  3259. /* Set MaximumLowLatencyPacketSize value to 256 */
  3260. mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
  3261. MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
  3262. MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
  3263. /* Enable Rx cache snoop */
  3264. for (lrxq = 0; lrxq < rxq_number; lrxq++) {
  3265. queue = port->rxqs[lrxq]->id;
  3266. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
  3267. val |= MVPP2_SNOOP_PKT_SIZE_MASK |
  3268. MVPP2_SNOOP_BUF_HDR_MASK;
  3269. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
  3270. }
  3271. /* At default, mask all interrupts to all present cpus */
  3272. mvpp2_interrupts_disable(port);
  3273. }
  3274. /* Enable/disable receiving packets */
  3275. static void mvpp2_ingress_enable(struct mvpp2_port *port)
  3276. {
  3277. u32 val;
  3278. int lrxq, queue;
  3279. for (lrxq = 0; lrxq < rxq_number; lrxq++) {
  3280. queue = port->rxqs[lrxq]->id;
  3281. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
  3282. val &= ~MVPP2_RXQ_DISABLE_MASK;
  3283. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
  3284. }
  3285. }
  3286. static void mvpp2_ingress_disable(struct mvpp2_port *port)
  3287. {
  3288. u32 val;
  3289. int lrxq, queue;
  3290. for (lrxq = 0; lrxq < rxq_number; lrxq++) {
  3291. queue = port->rxqs[lrxq]->id;
  3292. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
  3293. val |= MVPP2_RXQ_DISABLE_MASK;
  3294. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
  3295. }
  3296. }
  3297. /* Enable transmit via physical egress queue
  3298. * - HW starts take descriptors from DRAM
  3299. */
  3300. static void mvpp2_egress_enable(struct mvpp2_port *port)
  3301. {
  3302. u32 qmap;
  3303. int queue;
  3304. int tx_port_num = mvpp2_egress_port(port);
  3305. /* Enable all initialized TXs. */
  3306. qmap = 0;
  3307. for (queue = 0; queue < txq_number; queue++) {
  3308. struct mvpp2_tx_queue *txq = port->txqs[queue];
  3309. if (txq->descs != NULL)
  3310. qmap |= (1 << queue);
  3311. }
  3312. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
  3313. mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
  3314. }
  3315. /* Disable transmit via physical egress queue
  3316. * - HW doesn't take descriptors from DRAM
  3317. */
  3318. static void mvpp2_egress_disable(struct mvpp2_port *port)
  3319. {
  3320. u32 reg_data;
  3321. int delay;
  3322. int tx_port_num = mvpp2_egress_port(port);
  3323. /* Issue stop command for active channels only */
  3324. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
  3325. reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
  3326. MVPP2_TXP_SCHED_ENQ_MASK;
  3327. if (reg_data != 0)
  3328. mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
  3329. (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
  3330. /* Wait for all Tx activity to terminate. */
  3331. delay = 0;
  3332. do {
  3333. if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
  3334. netdev_warn(port->dev,
  3335. "Tx stop timed out, status=0x%08x\n",
  3336. reg_data);
  3337. break;
  3338. }
  3339. mdelay(1);
  3340. delay++;
  3341. /* Check port TX Command register that all
  3342. * Tx queues are stopped
  3343. */
  3344. reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
  3345. } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
  3346. }
  3347. /* Rx descriptors helper methods */
  3348. /* Get number of Rx descriptors occupied by received packets */
  3349. static inline int
  3350. mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
  3351. {
  3352. u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
  3353. return val & MVPP2_RXQ_OCCUPIED_MASK;
  3354. }
  3355. /* Update Rx queue status with the number of occupied and available
  3356. * Rx descriptor slots.
  3357. */
  3358. static inline void
  3359. mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
  3360. int used_count, int free_count)
  3361. {
  3362. /* Decrement the number of used descriptors and increment count
  3363. * increment the number of free descriptors.
  3364. */
  3365. u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
  3366. mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
  3367. }
  3368. /* Get pointer to next RX descriptor to be processed by SW */
  3369. static inline struct mvpp2_rx_desc *
  3370. mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
  3371. {
  3372. int rx_desc = rxq->next_desc_to_proc;
  3373. rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
  3374. prefetch(rxq->descs + rxq->next_desc_to_proc);
  3375. return rxq->descs + rx_desc;
  3376. }
  3377. /* Set rx queue offset */
  3378. static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
  3379. int prxq, int offset)
  3380. {
  3381. u32 val;
  3382. /* Convert offset from bytes to units of 32 bytes */
  3383. offset = offset >> 5;
  3384. val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
  3385. val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
  3386. /* Offset is in */
  3387. val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
  3388. MVPP2_RXQ_PACKET_OFFSET_MASK);
  3389. mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
  3390. }
  3391. /* Obtain BM cookie information from descriptor */
  3392. static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
  3393. {
  3394. int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
  3395. MVPP2_RXD_BM_POOL_ID_OFFS;
  3396. int cpu = smp_processor_id();
  3397. return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
  3398. ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
  3399. }
  3400. /* Tx descriptors helper methods */
  3401. /* Get number of Tx descriptors waiting to be transmitted by HW */
  3402. static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
  3403. struct mvpp2_tx_queue *txq)
  3404. {
  3405. u32 val;
  3406. mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
  3407. val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
  3408. return val & MVPP2_TXQ_PENDING_MASK;
  3409. }
  3410. /* Get pointer to next Tx descriptor to be processed (send) by HW */
  3411. static struct mvpp2_tx_desc *
  3412. mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
  3413. {
  3414. int tx_desc = txq->next_desc_to_proc;
  3415. txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
  3416. return txq->descs + tx_desc;
  3417. }
  3418. /* Update HW with number of aggregated Tx descriptors to be sent */
  3419. static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
  3420. {
  3421. /* aggregated access - relevant TXQ number is written in TX desc */
  3422. mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
  3423. }
  3424. /* Check if there are enough free descriptors in aggregated txq.
  3425. * If not, update the number of occupied descriptors and repeat the check.
  3426. */
  3427. static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
  3428. struct mvpp2_tx_queue *aggr_txq, int num)
  3429. {
  3430. if ((aggr_txq->count + num) > aggr_txq->size) {
  3431. /* Update number of occupied aggregated Tx descriptors */
  3432. int cpu = smp_processor_id();
  3433. u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
  3434. aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
  3435. }
  3436. if ((aggr_txq->count + num) > aggr_txq->size)
  3437. return -ENOMEM;
  3438. return 0;
  3439. }
  3440. /* Reserved Tx descriptors allocation request */
  3441. static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
  3442. struct mvpp2_tx_queue *txq, int num)
  3443. {
  3444. u32 val;
  3445. val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
  3446. mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
  3447. val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
  3448. return val & MVPP2_TXQ_RSVD_RSLT_MASK;
  3449. }
  3450. /* Check if there are enough reserved descriptors for transmission.
  3451. * If not, request chunk of reserved descriptors and check again.
  3452. */
  3453. static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
  3454. struct mvpp2_tx_queue *txq,
  3455. struct mvpp2_txq_pcpu *txq_pcpu,
  3456. int num)
  3457. {
  3458. int req, cpu, desc_count;
  3459. if (txq_pcpu->reserved_num >= num)
  3460. return 0;
  3461. /* Not enough descriptors reserved! Update the reserved descriptor
  3462. * count and check again.
  3463. */
  3464. desc_count = 0;
  3465. /* Compute total of used descriptors */
  3466. for_each_present_cpu(cpu) {
  3467. struct mvpp2_txq_pcpu *txq_pcpu_aux;
  3468. txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
  3469. desc_count += txq_pcpu_aux->count;
  3470. desc_count += txq_pcpu_aux->reserved_num;
  3471. }
  3472. req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
  3473. desc_count += req;
  3474. if (desc_count >
  3475. (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
  3476. return -ENOMEM;
  3477. txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
  3478. /* OK, the descriptor cound has been updated: check again. */
  3479. if (txq_pcpu->reserved_num < num)
  3480. return -ENOMEM;
  3481. return 0;
  3482. }
  3483. /* Release the last allocated Tx descriptor. Useful to handle DMA
  3484. * mapping failures in the Tx path.
  3485. */
  3486. static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
  3487. {
  3488. if (txq->next_desc_to_proc == 0)
  3489. txq->next_desc_to_proc = txq->last_desc - 1;
  3490. else
  3491. txq->next_desc_to_proc--;
  3492. }
  3493. /* Set Tx descriptors fields relevant for CSUM calculation */
  3494. static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
  3495. int ip_hdr_len, int l4_proto)
  3496. {
  3497. u32 command;
  3498. /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
  3499. * G_L4_chk, L4_type required only for checksum calculation
  3500. */
  3501. command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
  3502. command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
  3503. command |= MVPP2_TXD_IP_CSUM_DISABLE;
  3504. if (l3_proto == swab16(ETH_P_IP)) {
  3505. command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
  3506. command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
  3507. } else {
  3508. command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
  3509. }
  3510. if (l4_proto == IPPROTO_TCP) {
  3511. command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
  3512. command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
  3513. } else if (l4_proto == IPPROTO_UDP) {
  3514. command |= MVPP2_TXD_L4_UDP; /* enable UDP */
  3515. command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
  3516. } else {
  3517. command |= MVPP2_TXD_L4_CSUM_NOT;
  3518. }
  3519. return command;
  3520. }
  3521. /* Get number of sent descriptors and decrement counter.
  3522. * The number of sent descriptors is returned.
  3523. * Per-CPU access
  3524. */
  3525. static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
  3526. struct mvpp2_tx_queue *txq)
  3527. {
  3528. u32 val;
  3529. /* Reading status reg resets transmitted descriptor counter */
  3530. val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
  3531. return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
  3532. MVPP2_TRANSMITTED_COUNT_OFFSET;
  3533. }
  3534. static void mvpp2_txq_sent_counter_clear(void *arg)
  3535. {
  3536. struct mvpp2_port *port = arg;
  3537. int queue;
  3538. for (queue = 0; queue < txq_number; queue++) {
  3539. int id = port->txqs[queue]->id;
  3540. mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
  3541. }
  3542. }
  3543. /* Set max sizes for Tx queues */
  3544. static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
  3545. {
  3546. u32 val, size, mtu;
  3547. int txq, tx_port_num;
  3548. mtu = port->pkt_size * 8;
  3549. if (mtu > MVPP2_TXP_MTU_MAX)
  3550. mtu = MVPP2_TXP_MTU_MAX;
  3551. /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
  3552. mtu = 3 * mtu;
  3553. /* Indirect access to registers */
  3554. tx_port_num = mvpp2_egress_port(port);
  3555. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
  3556. /* Set MTU */
  3557. val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
  3558. val &= ~MVPP2_TXP_MTU_MAX;
  3559. val |= mtu;
  3560. mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
  3561. /* TXP token size and all TXQs token size must be larger that MTU */
  3562. val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
  3563. size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
  3564. if (size < mtu) {
  3565. size = mtu;
  3566. val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
  3567. val |= size;
  3568. mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
  3569. }
  3570. for (txq = 0; txq < txq_number; txq++) {
  3571. val = mvpp2_read(port->priv,
  3572. MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
  3573. size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
  3574. if (size < mtu) {
  3575. size = mtu;
  3576. val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
  3577. val |= size;
  3578. mvpp2_write(port->priv,
  3579. MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
  3580. val);
  3581. }
  3582. }
  3583. }
  3584. /* Set the number of packets that will be received before Rx interrupt
  3585. * will be generated by HW.
  3586. */
  3587. static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
  3588. struct mvpp2_rx_queue *rxq, u32 pkts)
  3589. {
  3590. u32 val;
  3591. val = (pkts & MVPP2_OCCUPIED_THRESH_MASK);
  3592. mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
  3593. mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
  3594. rxq->pkts_coal = pkts;
  3595. }
  3596. /* Set the time delay in usec before Rx interrupt */
  3597. static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
  3598. struct mvpp2_rx_queue *rxq, u32 usec)
  3599. {
  3600. u32 val;
  3601. val = (port->priv->tclk / USEC_PER_SEC) * usec;
  3602. mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
  3603. rxq->time_coal = usec;
  3604. }
  3605. /* Free Tx queue skbuffs */
  3606. static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
  3607. struct mvpp2_tx_queue *txq,
  3608. struct mvpp2_txq_pcpu *txq_pcpu, int num)
  3609. {
  3610. int i;
  3611. for (i = 0; i < num; i++) {
  3612. dma_addr_t buf_phys_addr =
  3613. txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
  3614. struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
  3615. mvpp2_txq_inc_get(txq_pcpu);
  3616. if (!skb)
  3617. continue;
  3618. dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
  3619. skb_headlen(skb), DMA_TO_DEVICE);
  3620. dev_kfree_skb_any(skb);
  3621. }
  3622. }
  3623. static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
  3624. u32 cause)
  3625. {
  3626. int queue = fls(cause) - 1;
  3627. return port->rxqs[queue];
  3628. }
  3629. static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
  3630. u32 cause)
  3631. {
  3632. int queue = fls(cause) - 1;
  3633. return port->txqs[queue];
  3634. }
  3635. /* Handle end of transmission */
  3636. static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
  3637. struct mvpp2_txq_pcpu *txq_pcpu)
  3638. {
  3639. struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
  3640. int tx_done;
  3641. if (txq_pcpu->cpu != smp_processor_id())
  3642. netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
  3643. tx_done = mvpp2_txq_sent_desc_proc(port, txq);
  3644. if (!tx_done)
  3645. return;
  3646. mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
  3647. txq_pcpu->count -= tx_done;
  3648. if (netif_tx_queue_stopped(nq))
  3649. if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
  3650. netif_tx_wake_queue(nq);
  3651. }
  3652. static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
  3653. {
  3654. struct mvpp2_tx_queue *txq;
  3655. struct mvpp2_txq_pcpu *txq_pcpu;
  3656. unsigned int tx_todo = 0;
  3657. while (cause) {
  3658. txq = mvpp2_get_tx_queue(port, cause);
  3659. if (!txq)
  3660. break;
  3661. txq_pcpu = this_cpu_ptr(txq->pcpu);
  3662. if (txq_pcpu->count) {
  3663. mvpp2_txq_done(port, txq, txq_pcpu);
  3664. tx_todo += txq_pcpu->count;
  3665. }
  3666. cause &= ~(1 << txq->log_id);
  3667. }
  3668. return tx_todo;
  3669. }
  3670. /* Rx/Tx queue initialization/cleanup methods */
  3671. /* Allocate and initialize descriptors for aggr TXQ */
  3672. static int mvpp2_aggr_txq_init(struct platform_device *pdev,
  3673. struct mvpp2_tx_queue *aggr_txq,
  3674. int desc_num, int cpu,
  3675. struct mvpp2 *priv)
  3676. {
  3677. /* Allocate memory for TX descriptors */
  3678. aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
  3679. desc_num * MVPP2_DESC_ALIGNED_SIZE,
  3680. &aggr_txq->descs_phys, GFP_KERNEL);
  3681. if (!aggr_txq->descs)
  3682. return -ENOMEM;
  3683. /* Make sure descriptor address is cache line size aligned */
  3684. BUG_ON(aggr_txq->descs !=
  3685. PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
  3686. aggr_txq->last_desc = aggr_txq->size - 1;
  3687. /* Aggr TXQ no reset WA */
  3688. aggr_txq->next_desc_to_proc = mvpp2_read(priv,
  3689. MVPP2_AGGR_TXQ_INDEX_REG(cpu));
  3690. /* Set Tx descriptors queue starting address */
  3691. /* indirect access */
  3692. mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
  3693. aggr_txq->descs_phys);
  3694. mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
  3695. return 0;
  3696. }
  3697. /* Create a specified Rx queue */
  3698. static int mvpp2_rxq_init(struct mvpp2_port *port,
  3699. struct mvpp2_rx_queue *rxq)
  3700. {
  3701. rxq->size = port->rx_ring_size;
  3702. /* Allocate memory for RX descriptors */
  3703. rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
  3704. rxq->size * MVPP2_DESC_ALIGNED_SIZE,
  3705. &rxq->descs_phys, GFP_KERNEL);
  3706. if (!rxq->descs)
  3707. return -ENOMEM;
  3708. BUG_ON(rxq->descs !=
  3709. PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
  3710. rxq->last_desc = rxq->size - 1;
  3711. /* Zero occupied and non-occupied counters - direct access */
  3712. mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
  3713. /* Set Rx descriptors queue starting address - indirect access */
  3714. mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
  3715. mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
  3716. mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
  3717. mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
  3718. /* Set Offset */
  3719. mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
  3720. /* Set coalescing pkts and time */
  3721. mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
  3722. mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
  3723. /* Add number of descriptors ready for receiving packets */
  3724. mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
  3725. return 0;
  3726. }
  3727. /* Push packets received by the RXQ to BM pool */
  3728. static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
  3729. struct mvpp2_rx_queue *rxq)
  3730. {
  3731. int rx_received, i;
  3732. rx_received = mvpp2_rxq_received(port, rxq->id);
  3733. if (!rx_received)
  3734. return;
  3735. for (i = 0; i < rx_received; i++) {
  3736. struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
  3737. u32 bm = mvpp2_bm_cookie_build(rx_desc);
  3738. mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
  3739. rx_desc->buf_cookie);
  3740. }
  3741. mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
  3742. }
  3743. /* Cleanup Rx queue */
  3744. static void mvpp2_rxq_deinit(struct mvpp2_port *port,
  3745. struct mvpp2_rx_queue *rxq)
  3746. {
  3747. mvpp2_rxq_drop_pkts(port, rxq);
  3748. if (rxq->descs)
  3749. dma_free_coherent(port->dev->dev.parent,
  3750. rxq->size * MVPP2_DESC_ALIGNED_SIZE,
  3751. rxq->descs,
  3752. rxq->descs_phys);
  3753. rxq->descs = NULL;
  3754. rxq->last_desc = 0;
  3755. rxq->next_desc_to_proc = 0;
  3756. rxq->descs_phys = 0;
  3757. /* Clear Rx descriptors queue starting address and size;
  3758. * free descriptor number
  3759. */
  3760. mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
  3761. mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
  3762. mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
  3763. mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
  3764. }
  3765. /* Create and initialize a Tx queue */
  3766. static int mvpp2_txq_init(struct mvpp2_port *port,
  3767. struct mvpp2_tx_queue *txq)
  3768. {
  3769. u32 val;
  3770. int cpu, desc, desc_per_txq, tx_port_num;
  3771. struct mvpp2_txq_pcpu *txq_pcpu;
  3772. txq->size = port->tx_ring_size;
  3773. /* Allocate memory for Tx descriptors */
  3774. txq->descs = dma_alloc_coherent(port->dev->dev.parent,
  3775. txq->size * MVPP2_DESC_ALIGNED_SIZE,
  3776. &txq->descs_phys, GFP_KERNEL);
  3777. if (!txq->descs)
  3778. return -ENOMEM;
  3779. /* Make sure descriptor address is cache line size aligned */
  3780. BUG_ON(txq->descs !=
  3781. PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
  3782. txq->last_desc = txq->size - 1;
  3783. /* Set Tx descriptors queue starting address - indirect access */
  3784. mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
  3785. mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
  3786. mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
  3787. MVPP2_TXQ_DESC_SIZE_MASK);
  3788. mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
  3789. mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
  3790. txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
  3791. val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
  3792. val &= ~MVPP2_TXQ_PENDING_MASK;
  3793. mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
  3794. /* Calculate base address in prefetch buffer. We reserve 16 descriptors
  3795. * for each existing TXQ.
  3796. * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
  3797. * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
  3798. */
  3799. desc_per_txq = 16;
  3800. desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
  3801. (txq->log_id * desc_per_txq);
  3802. mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
  3803. MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
  3804. MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
  3805. /* WRR / EJP configuration - indirect access */
  3806. tx_port_num = mvpp2_egress_port(port);
  3807. mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
  3808. val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
  3809. val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
  3810. val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
  3811. val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
  3812. mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
  3813. val = MVPP2_TXQ_TOKEN_SIZE_MAX;
  3814. mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
  3815. val);
  3816. for_each_present_cpu(cpu) {
  3817. txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
  3818. txq_pcpu->size = txq->size;
  3819. txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
  3820. sizeof(*txq_pcpu->tx_skb),
  3821. GFP_KERNEL);
  3822. if (!txq_pcpu->tx_skb)
  3823. goto error;
  3824. txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
  3825. sizeof(dma_addr_t), GFP_KERNEL);
  3826. if (!txq_pcpu->tx_buffs)
  3827. goto error;
  3828. txq_pcpu->count = 0;
  3829. txq_pcpu->reserved_num = 0;
  3830. txq_pcpu->txq_put_index = 0;
  3831. txq_pcpu->txq_get_index = 0;
  3832. }
  3833. return 0;
  3834. error:
  3835. for_each_present_cpu(cpu) {
  3836. txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
  3837. kfree(txq_pcpu->tx_skb);
  3838. kfree(txq_pcpu->tx_buffs);
  3839. }
  3840. dma_free_coherent(port->dev->dev.parent,
  3841. txq->size * MVPP2_DESC_ALIGNED_SIZE,
  3842. txq->descs, txq->descs_phys);
  3843. return -ENOMEM;
  3844. }
  3845. /* Free allocated TXQ resources */
  3846. static void mvpp2_txq_deinit(struct mvpp2_port *port,
  3847. struct mvpp2_tx_queue *txq)
  3848. {
  3849. struct mvpp2_txq_pcpu *txq_pcpu;
  3850. int cpu;
  3851. for_each_present_cpu(cpu) {
  3852. txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
  3853. kfree(txq_pcpu->tx_skb);
  3854. kfree(txq_pcpu->tx_buffs);
  3855. }
  3856. if (txq->descs)
  3857. dma_free_coherent(port->dev->dev.parent,
  3858. txq->size * MVPP2_DESC_ALIGNED_SIZE,
  3859. txq->descs, txq->descs_phys);
  3860. txq->descs = NULL;
  3861. txq->last_desc = 0;
  3862. txq->next_desc_to_proc = 0;
  3863. txq->descs_phys = 0;
  3864. /* Set minimum bandwidth for disabled TXQs */
  3865. mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
  3866. /* Set Tx descriptors queue starting address and size */
  3867. mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
  3868. mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
  3869. mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
  3870. }
  3871. /* Cleanup Tx ports */
  3872. static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
  3873. {
  3874. struct mvpp2_txq_pcpu *txq_pcpu;
  3875. int delay, pending, cpu;
  3876. u32 val;
  3877. mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
  3878. val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
  3879. val |= MVPP2_TXQ_DRAIN_EN_MASK;
  3880. mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
  3881. /* The napi queue has been stopped so wait for all packets
  3882. * to be transmitted.
  3883. */
  3884. delay = 0;
  3885. do {
  3886. if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
  3887. netdev_warn(port->dev,
  3888. "port %d: cleaning queue %d timed out\n",
  3889. port->id, txq->log_id);
  3890. break;
  3891. }
  3892. mdelay(1);
  3893. delay++;
  3894. pending = mvpp2_txq_pend_desc_num_get(port, txq);
  3895. } while (pending);
  3896. val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
  3897. mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
  3898. for_each_present_cpu(cpu) {
  3899. txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
  3900. /* Release all packets */
  3901. mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
  3902. /* Reset queue */
  3903. txq_pcpu->count = 0;
  3904. txq_pcpu->txq_put_index = 0;
  3905. txq_pcpu->txq_get_index = 0;
  3906. }
  3907. }
  3908. /* Cleanup all Tx queues */
  3909. static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
  3910. {
  3911. struct mvpp2_tx_queue *txq;
  3912. int queue;
  3913. u32 val;
  3914. val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
  3915. /* Reset Tx ports and delete Tx queues */
  3916. val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
  3917. mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
  3918. for (queue = 0; queue < txq_number; queue++) {
  3919. txq = port->txqs[queue];
  3920. mvpp2_txq_clean(port, txq);
  3921. mvpp2_txq_deinit(port, txq);
  3922. }
  3923. on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
  3924. val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
  3925. mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
  3926. }
  3927. /* Cleanup all Rx queues */
  3928. static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
  3929. {
  3930. int queue;
  3931. for (queue = 0; queue < rxq_number; queue++)
  3932. mvpp2_rxq_deinit(port, port->rxqs[queue]);
  3933. }
  3934. /* Init all Rx queues for port */
  3935. static int mvpp2_setup_rxqs(struct mvpp2_port *port)
  3936. {
  3937. int queue, err;
  3938. for (queue = 0; queue < rxq_number; queue++) {
  3939. err = mvpp2_rxq_init(port, port->rxqs[queue]);
  3940. if (err)
  3941. goto err_cleanup;
  3942. }
  3943. return 0;
  3944. err_cleanup:
  3945. mvpp2_cleanup_rxqs(port);
  3946. return err;
  3947. }
  3948. /* Init all tx queues for port */
  3949. static int mvpp2_setup_txqs(struct mvpp2_port *port)
  3950. {
  3951. struct mvpp2_tx_queue *txq;
  3952. int queue, err;
  3953. for (queue = 0; queue < txq_number; queue++) {
  3954. txq = port->txqs[queue];
  3955. err = mvpp2_txq_init(port, txq);
  3956. if (err)
  3957. goto err_cleanup;
  3958. }
  3959. on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
  3960. return 0;
  3961. err_cleanup:
  3962. mvpp2_cleanup_txqs(port);
  3963. return err;
  3964. }
  3965. /* The callback for per-port interrupt */
  3966. static irqreturn_t mvpp2_isr(int irq, void *dev_id)
  3967. {
  3968. struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
  3969. mvpp2_interrupts_disable(port);
  3970. napi_schedule(&port->napi);
  3971. return IRQ_HANDLED;
  3972. }
  3973. /* Adjust link */
  3974. static void mvpp2_link_event(struct net_device *dev)
  3975. {
  3976. struct mvpp2_port *port = netdev_priv(dev);
  3977. struct phy_device *phydev = port->phy_dev;
  3978. int status_change = 0;
  3979. u32 val;
  3980. if (phydev->link) {
  3981. if ((port->speed != phydev->speed) ||
  3982. (port->duplex != phydev->duplex)) {
  3983. u32 val;
  3984. val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  3985. val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
  3986. MVPP2_GMAC_CONFIG_GMII_SPEED |
  3987. MVPP2_GMAC_CONFIG_FULL_DUPLEX |
  3988. MVPP2_GMAC_AN_SPEED_EN |
  3989. MVPP2_GMAC_AN_DUPLEX_EN);
  3990. if (phydev->duplex)
  3991. val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
  3992. if (phydev->speed == SPEED_1000)
  3993. val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
  3994. else if (phydev->speed == SPEED_100)
  3995. val |= MVPP2_GMAC_CONFIG_MII_SPEED;
  3996. writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  3997. port->duplex = phydev->duplex;
  3998. port->speed = phydev->speed;
  3999. }
  4000. }
  4001. if (phydev->link != port->link) {
  4002. if (!phydev->link) {
  4003. port->duplex = -1;
  4004. port->speed = 0;
  4005. }
  4006. port->link = phydev->link;
  4007. status_change = 1;
  4008. }
  4009. if (status_change) {
  4010. if (phydev->link) {
  4011. val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  4012. val |= (MVPP2_GMAC_FORCE_LINK_PASS |
  4013. MVPP2_GMAC_FORCE_LINK_DOWN);
  4014. writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
  4015. mvpp2_egress_enable(port);
  4016. mvpp2_ingress_enable(port);
  4017. } else {
  4018. mvpp2_ingress_disable(port);
  4019. mvpp2_egress_disable(port);
  4020. }
  4021. phy_print_status(phydev);
  4022. }
  4023. }
  4024. static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
  4025. {
  4026. ktime_t interval;
  4027. if (!port_pcpu->timer_scheduled) {
  4028. port_pcpu->timer_scheduled = true;
  4029. interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
  4030. hrtimer_start(&port_pcpu->tx_done_timer, interval,
  4031. HRTIMER_MODE_REL_PINNED);
  4032. }
  4033. }
  4034. static void mvpp2_tx_proc_cb(unsigned long data)
  4035. {
  4036. struct net_device *dev = (struct net_device *)data;
  4037. struct mvpp2_port *port = netdev_priv(dev);
  4038. struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
  4039. unsigned int tx_todo, cause;
  4040. if (!netif_running(dev))
  4041. return;
  4042. port_pcpu->timer_scheduled = false;
  4043. /* Process all the Tx queues */
  4044. cause = (1 << txq_number) - 1;
  4045. tx_todo = mvpp2_tx_done(port, cause);
  4046. /* Set the timer in case not all the packets were processed */
  4047. if (tx_todo)
  4048. mvpp2_timer_set(port_pcpu);
  4049. }
  4050. static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
  4051. {
  4052. struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
  4053. struct mvpp2_port_pcpu,
  4054. tx_done_timer);
  4055. tasklet_schedule(&port_pcpu->tx_done_tasklet);
  4056. return HRTIMER_NORESTART;
  4057. }
  4058. /* Main RX/TX processing routines */
  4059. /* Display more error info */
  4060. static void mvpp2_rx_error(struct mvpp2_port *port,
  4061. struct mvpp2_rx_desc *rx_desc)
  4062. {
  4063. u32 status = rx_desc->status;
  4064. switch (status & MVPP2_RXD_ERR_CODE_MASK) {
  4065. case MVPP2_RXD_ERR_CRC:
  4066. netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
  4067. status, rx_desc->data_size);
  4068. break;
  4069. case MVPP2_RXD_ERR_OVERRUN:
  4070. netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
  4071. status, rx_desc->data_size);
  4072. break;
  4073. case MVPP2_RXD_ERR_RESOURCE:
  4074. netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
  4075. status, rx_desc->data_size);
  4076. break;
  4077. }
  4078. }
  4079. /* Handle RX checksum offload */
  4080. static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
  4081. struct sk_buff *skb)
  4082. {
  4083. if (((status & MVPP2_RXD_L3_IP4) &&
  4084. !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
  4085. (status & MVPP2_RXD_L3_IP6))
  4086. if (((status & MVPP2_RXD_L4_UDP) ||
  4087. (status & MVPP2_RXD_L4_TCP)) &&
  4088. (status & MVPP2_RXD_L4_CSUM_OK)) {
  4089. skb->csum = 0;
  4090. skb->ip_summed = CHECKSUM_UNNECESSARY;
  4091. return;
  4092. }
  4093. skb->ip_summed = CHECKSUM_NONE;
  4094. }
  4095. /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
  4096. static int mvpp2_rx_refill(struct mvpp2_port *port,
  4097. struct mvpp2_bm_pool *bm_pool,
  4098. u32 bm, int is_recycle)
  4099. {
  4100. struct sk_buff *skb;
  4101. dma_addr_t phys_addr;
  4102. if (is_recycle &&
  4103. (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh))
  4104. return 0;
  4105. /* No recycle or too many buffers are in use, so allocate a new skb */
  4106. skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
  4107. if (!skb)
  4108. return -ENOMEM;
  4109. mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
  4110. atomic_dec(&bm_pool->in_use);
  4111. return 0;
  4112. }
  4113. /* Handle tx checksum */
  4114. static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
  4115. {
  4116. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  4117. int ip_hdr_len = 0;
  4118. u8 l4_proto;
  4119. if (skb->protocol == htons(ETH_P_IP)) {
  4120. struct iphdr *ip4h = ip_hdr(skb);
  4121. /* Calculate IPv4 checksum and L4 checksum */
  4122. ip_hdr_len = ip4h->ihl;
  4123. l4_proto = ip4h->protocol;
  4124. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  4125. struct ipv6hdr *ip6h = ipv6_hdr(skb);
  4126. /* Read l4_protocol from one of IPv6 extra headers */
  4127. if (skb_network_header_len(skb) > 0)
  4128. ip_hdr_len = (skb_network_header_len(skb) >> 2);
  4129. l4_proto = ip6h->nexthdr;
  4130. } else {
  4131. return MVPP2_TXD_L4_CSUM_NOT;
  4132. }
  4133. return mvpp2_txq_desc_csum(skb_network_offset(skb),
  4134. skb->protocol, ip_hdr_len, l4_proto);
  4135. }
  4136. return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
  4137. }
  4138. static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
  4139. struct mvpp2_rx_desc *rx_desc)
  4140. {
  4141. struct mvpp2_buff_hdr *buff_hdr;
  4142. struct sk_buff *skb;
  4143. u32 rx_status = rx_desc->status;
  4144. u32 buff_phys_addr;
  4145. u32 buff_virt_addr;
  4146. u32 buff_phys_addr_next;
  4147. u32 buff_virt_addr_next;
  4148. int mc_id;
  4149. int pool_id;
  4150. pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
  4151. MVPP2_RXD_BM_POOL_ID_OFFS;
  4152. buff_phys_addr = rx_desc->buf_phys_addr;
  4153. buff_virt_addr = rx_desc->buf_cookie;
  4154. do {
  4155. skb = (struct sk_buff *)buff_virt_addr;
  4156. buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
  4157. mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
  4158. buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
  4159. buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
  4160. /* Release buffer */
  4161. mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
  4162. buff_virt_addr, mc_id);
  4163. buff_phys_addr = buff_phys_addr_next;
  4164. buff_virt_addr = buff_virt_addr_next;
  4165. } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
  4166. }
  4167. /* Main rx processing */
  4168. static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
  4169. struct mvpp2_rx_queue *rxq)
  4170. {
  4171. struct net_device *dev = port->dev;
  4172. int rx_received, rx_filled, i;
  4173. u32 rcvd_pkts = 0;
  4174. u32 rcvd_bytes = 0;
  4175. /* Get number of received packets and clamp the to-do */
  4176. rx_received = mvpp2_rxq_received(port, rxq->id);
  4177. if (rx_todo > rx_received)
  4178. rx_todo = rx_received;
  4179. rx_filled = 0;
  4180. for (i = 0; i < rx_todo; i++) {
  4181. struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
  4182. struct mvpp2_bm_pool *bm_pool;
  4183. struct sk_buff *skb;
  4184. u32 bm, rx_status;
  4185. int pool, rx_bytes, err;
  4186. rx_filled++;
  4187. rx_status = rx_desc->status;
  4188. rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
  4189. bm = mvpp2_bm_cookie_build(rx_desc);
  4190. pool = mvpp2_bm_cookie_pool_get(bm);
  4191. bm_pool = &port->priv->bm_pools[pool];
  4192. /* Check if buffer header is used */
  4193. if (rx_status & MVPP2_RXD_BUF_HDR) {
  4194. mvpp2_buff_hdr_rx(port, rx_desc);
  4195. continue;
  4196. }
  4197. /* In case of an error, release the requested buffer pointer
  4198. * to the Buffer Manager. This request process is controlled
  4199. * by the hardware, and the information about the buffer is
  4200. * comprised by the RX descriptor.
  4201. */
  4202. if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
  4203. dev->stats.rx_errors++;
  4204. mvpp2_rx_error(port, rx_desc);
  4205. mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
  4206. rx_desc->buf_cookie);
  4207. continue;
  4208. }
  4209. skb = (struct sk_buff *)rx_desc->buf_cookie;
  4210. rcvd_pkts++;
  4211. rcvd_bytes += rx_bytes;
  4212. atomic_inc(&bm_pool->in_use);
  4213. skb_reserve(skb, MVPP2_MH_SIZE);
  4214. skb_put(skb, rx_bytes);
  4215. skb->protocol = eth_type_trans(skb, dev);
  4216. mvpp2_rx_csum(port, rx_status, skb);
  4217. napi_gro_receive(&port->napi, skb);
  4218. err = mvpp2_rx_refill(port, bm_pool, bm, 0);
  4219. if (err) {
  4220. netdev_err(port->dev, "failed to refill BM pools\n");
  4221. rx_filled--;
  4222. }
  4223. }
  4224. if (rcvd_pkts) {
  4225. struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
  4226. u64_stats_update_begin(&stats->syncp);
  4227. stats->rx_packets += rcvd_pkts;
  4228. stats->rx_bytes += rcvd_bytes;
  4229. u64_stats_update_end(&stats->syncp);
  4230. }
  4231. /* Update Rx queue management counters */
  4232. wmb();
  4233. mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled);
  4234. return rx_todo;
  4235. }
  4236. static inline void
  4237. tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
  4238. struct mvpp2_tx_desc *desc)
  4239. {
  4240. dma_unmap_single(dev, desc->buf_phys_addr,
  4241. desc->data_size, DMA_TO_DEVICE);
  4242. mvpp2_txq_desc_put(txq);
  4243. }
  4244. /* Handle tx fragmentation processing */
  4245. static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
  4246. struct mvpp2_tx_queue *aggr_txq,
  4247. struct mvpp2_tx_queue *txq)
  4248. {
  4249. struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
  4250. struct mvpp2_tx_desc *tx_desc;
  4251. int i;
  4252. dma_addr_t buf_phys_addr;
  4253. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  4254. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  4255. void *addr = page_address(frag->page.p) + frag->page_offset;
  4256. tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
  4257. tx_desc->phys_txq = txq->id;
  4258. tx_desc->data_size = frag->size;
  4259. buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
  4260. tx_desc->data_size,
  4261. DMA_TO_DEVICE);
  4262. if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
  4263. mvpp2_txq_desc_put(txq);
  4264. goto error;
  4265. }
  4266. tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
  4267. tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
  4268. if (i == (skb_shinfo(skb)->nr_frags - 1)) {
  4269. /* Last descriptor */
  4270. tx_desc->command = MVPP2_TXD_L_DESC;
  4271. mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
  4272. } else {
  4273. /* Descriptor in the middle: Not First, Not Last */
  4274. tx_desc->command = 0;
  4275. mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
  4276. }
  4277. }
  4278. return 0;
  4279. error:
  4280. /* Release all descriptors that were used to map fragments of
  4281. * this packet, as well as the corresponding DMA mappings
  4282. */
  4283. for (i = i - 1; i >= 0; i--) {
  4284. tx_desc = txq->descs + i;
  4285. tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
  4286. }
  4287. return -ENOMEM;
  4288. }
  4289. /* Main tx processing */
  4290. static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
  4291. {
  4292. struct mvpp2_port *port = netdev_priv(dev);
  4293. struct mvpp2_tx_queue *txq, *aggr_txq;
  4294. struct mvpp2_txq_pcpu *txq_pcpu;
  4295. struct mvpp2_tx_desc *tx_desc;
  4296. dma_addr_t buf_phys_addr;
  4297. int frags = 0;
  4298. u16 txq_id;
  4299. u32 tx_cmd;
  4300. txq_id = skb_get_queue_mapping(skb);
  4301. txq = port->txqs[txq_id];
  4302. txq_pcpu = this_cpu_ptr(txq->pcpu);
  4303. aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
  4304. frags = skb_shinfo(skb)->nr_frags + 1;
  4305. /* Check number of available descriptors */
  4306. if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
  4307. mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
  4308. txq_pcpu, frags)) {
  4309. frags = 0;
  4310. goto out;
  4311. }
  4312. /* Get a descriptor for the first part of the packet */
  4313. tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
  4314. tx_desc->phys_txq = txq->id;
  4315. tx_desc->data_size = skb_headlen(skb);
  4316. buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
  4317. tx_desc->data_size, DMA_TO_DEVICE);
  4318. if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
  4319. mvpp2_txq_desc_put(txq);
  4320. frags = 0;
  4321. goto out;
  4322. }
  4323. tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
  4324. tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
  4325. tx_cmd = mvpp2_skb_tx_csum(port, skb);
  4326. if (frags == 1) {
  4327. /* First and Last descriptor */
  4328. tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
  4329. tx_desc->command = tx_cmd;
  4330. mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
  4331. } else {
  4332. /* First but not Last */
  4333. tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
  4334. tx_desc->command = tx_cmd;
  4335. mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
  4336. /* Continue with other skb fragments */
  4337. if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
  4338. tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
  4339. frags = 0;
  4340. goto out;
  4341. }
  4342. }
  4343. txq_pcpu->reserved_num -= frags;
  4344. txq_pcpu->count += frags;
  4345. aggr_txq->count += frags;
  4346. /* Enable transmit */
  4347. wmb();
  4348. mvpp2_aggr_txq_pend_desc_add(port, frags);
  4349. if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
  4350. struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
  4351. netif_tx_stop_queue(nq);
  4352. }
  4353. out:
  4354. if (frags > 0) {
  4355. struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
  4356. u64_stats_update_begin(&stats->syncp);
  4357. stats->tx_packets++;
  4358. stats->tx_bytes += skb->len;
  4359. u64_stats_update_end(&stats->syncp);
  4360. } else {
  4361. dev->stats.tx_dropped++;
  4362. dev_kfree_skb_any(skb);
  4363. }
  4364. /* Finalize TX processing */
  4365. if (txq_pcpu->count >= txq->done_pkts_coal)
  4366. mvpp2_txq_done(port, txq, txq_pcpu);
  4367. /* Set the timer in case not all frags were processed */
  4368. if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
  4369. struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
  4370. mvpp2_timer_set(port_pcpu);
  4371. }
  4372. return NETDEV_TX_OK;
  4373. }
  4374. static inline void mvpp2_cause_error(struct net_device *dev, int cause)
  4375. {
  4376. if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
  4377. netdev_err(dev, "FCS error\n");
  4378. if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
  4379. netdev_err(dev, "rx fifo overrun error\n");
  4380. if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
  4381. netdev_err(dev, "tx fifo underrun error\n");
  4382. }
  4383. static int mvpp2_poll(struct napi_struct *napi, int budget)
  4384. {
  4385. u32 cause_rx_tx, cause_rx, cause_misc;
  4386. int rx_done = 0;
  4387. struct mvpp2_port *port = netdev_priv(napi->dev);
  4388. /* Rx/Tx cause register
  4389. *
  4390. * Bits 0-15: each bit indicates received packets on the Rx queue
  4391. * (bit 0 is for Rx queue 0).
  4392. *
  4393. * Bits 16-23: each bit indicates transmitted packets on the Tx queue
  4394. * (bit 16 is for Tx queue 0).
  4395. *
  4396. * Each CPU has its own Rx/Tx cause register
  4397. */
  4398. cause_rx_tx = mvpp2_read(port->priv,
  4399. MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
  4400. cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
  4401. cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
  4402. if (cause_misc) {
  4403. mvpp2_cause_error(port->dev, cause_misc);
  4404. /* Clear the cause register */
  4405. mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
  4406. mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
  4407. cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
  4408. }
  4409. cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
  4410. /* Process RX packets */
  4411. cause_rx |= port->pending_cause_rx;
  4412. while (cause_rx && budget > 0) {
  4413. int count;
  4414. struct mvpp2_rx_queue *rxq;
  4415. rxq = mvpp2_get_rx_queue(port, cause_rx);
  4416. if (!rxq)
  4417. break;
  4418. count = mvpp2_rx(port, budget, rxq);
  4419. rx_done += count;
  4420. budget -= count;
  4421. if (budget > 0) {
  4422. /* Clear the bit associated to this Rx queue
  4423. * so that next iteration will continue from
  4424. * the next Rx queue.
  4425. */
  4426. cause_rx &= ~(1 << rxq->logic_rxq);
  4427. }
  4428. }
  4429. if (budget > 0) {
  4430. cause_rx = 0;
  4431. napi_complete(napi);
  4432. mvpp2_interrupts_enable(port);
  4433. }
  4434. port->pending_cause_rx = cause_rx;
  4435. return rx_done;
  4436. }
  4437. /* Set hw internals when starting port */
  4438. static void mvpp2_start_dev(struct mvpp2_port *port)
  4439. {
  4440. mvpp2_gmac_max_rx_size_set(port);
  4441. mvpp2_txp_max_tx_size_set(port);
  4442. napi_enable(&port->napi);
  4443. /* Enable interrupts on all CPUs */
  4444. mvpp2_interrupts_enable(port);
  4445. mvpp2_port_enable(port);
  4446. phy_start(port->phy_dev);
  4447. netif_tx_start_all_queues(port->dev);
  4448. }
  4449. /* Set hw internals when stopping port */
  4450. static void mvpp2_stop_dev(struct mvpp2_port *port)
  4451. {
  4452. /* Stop new packets from arriving to RXQs */
  4453. mvpp2_ingress_disable(port);
  4454. mdelay(10);
  4455. /* Disable interrupts on all CPUs */
  4456. mvpp2_interrupts_disable(port);
  4457. napi_disable(&port->napi);
  4458. netif_carrier_off(port->dev);
  4459. netif_tx_stop_all_queues(port->dev);
  4460. mvpp2_egress_disable(port);
  4461. mvpp2_port_disable(port);
  4462. phy_stop(port->phy_dev);
  4463. }
  4464. /* Return positive if MTU is valid */
  4465. static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu)
  4466. {
  4467. if (mtu < 68) {
  4468. netdev_err(dev, "cannot change mtu to less than 68\n");
  4469. return -EINVAL;
  4470. }
  4471. /* 9676 == 9700 - 20 and rounding to 8 */
  4472. if (mtu > 9676) {
  4473. netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu);
  4474. mtu = 9676;
  4475. }
  4476. if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
  4477. netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
  4478. ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
  4479. mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
  4480. }
  4481. return mtu;
  4482. }
  4483. static int mvpp2_check_ringparam_valid(struct net_device *dev,
  4484. struct ethtool_ringparam *ring)
  4485. {
  4486. u16 new_rx_pending = ring->rx_pending;
  4487. u16 new_tx_pending = ring->tx_pending;
  4488. if (ring->rx_pending == 0 || ring->tx_pending == 0)
  4489. return -EINVAL;
  4490. if (ring->rx_pending > MVPP2_MAX_RXD)
  4491. new_rx_pending = MVPP2_MAX_RXD;
  4492. else if (!IS_ALIGNED(ring->rx_pending, 16))
  4493. new_rx_pending = ALIGN(ring->rx_pending, 16);
  4494. if (ring->tx_pending > MVPP2_MAX_TXD)
  4495. new_tx_pending = MVPP2_MAX_TXD;
  4496. else if (!IS_ALIGNED(ring->tx_pending, 32))
  4497. new_tx_pending = ALIGN(ring->tx_pending, 32);
  4498. if (ring->rx_pending != new_rx_pending) {
  4499. netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
  4500. ring->rx_pending, new_rx_pending);
  4501. ring->rx_pending = new_rx_pending;
  4502. }
  4503. if (ring->tx_pending != new_tx_pending) {
  4504. netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
  4505. ring->tx_pending, new_tx_pending);
  4506. ring->tx_pending = new_tx_pending;
  4507. }
  4508. return 0;
  4509. }
  4510. static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
  4511. {
  4512. u32 mac_addr_l, mac_addr_m, mac_addr_h;
  4513. mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
  4514. mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
  4515. mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
  4516. addr[0] = (mac_addr_h >> 24) & 0xFF;
  4517. addr[1] = (mac_addr_h >> 16) & 0xFF;
  4518. addr[2] = (mac_addr_h >> 8) & 0xFF;
  4519. addr[3] = mac_addr_h & 0xFF;
  4520. addr[4] = mac_addr_m & 0xFF;
  4521. addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
  4522. }
  4523. static int mvpp2_phy_connect(struct mvpp2_port *port)
  4524. {
  4525. struct phy_device *phy_dev;
  4526. phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
  4527. port->phy_interface);
  4528. if (!phy_dev) {
  4529. netdev_err(port->dev, "cannot connect to phy\n");
  4530. return -ENODEV;
  4531. }
  4532. phy_dev->supported &= PHY_GBIT_FEATURES;
  4533. phy_dev->advertising = phy_dev->supported;
  4534. port->phy_dev = phy_dev;
  4535. port->link = 0;
  4536. port->duplex = 0;
  4537. port->speed = 0;
  4538. return 0;
  4539. }
  4540. static void mvpp2_phy_disconnect(struct mvpp2_port *port)
  4541. {
  4542. phy_disconnect(port->phy_dev);
  4543. port->phy_dev = NULL;
  4544. }
  4545. static int mvpp2_open(struct net_device *dev)
  4546. {
  4547. struct mvpp2_port *port = netdev_priv(dev);
  4548. unsigned char mac_bcast[ETH_ALEN] = {
  4549. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  4550. int err;
  4551. err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
  4552. if (err) {
  4553. netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
  4554. return err;
  4555. }
  4556. err = mvpp2_prs_mac_da_accept(port->priv, port->id,
  4557. dev->dev_addr, true);
  4558. if (err) {
  4559. netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
  4560. return err;
  4561. }
  4562. err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
  4563. if (err) {
  4564. netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
  4565. return err;
  4566. }
  4567. err = mvpp2_prs_def_flow(port);
  4568. if (err) {
  4569. netdev_err(dev, "mvpp2_prs_def_flow failed\n");
  4570. return err;
  4571. }
  4572. /* Allocate the Rx/Tx queues */
  4573. err = mvpp2_setup_rxqs(port);
  4574. if (err) {
  4575. netdev_err(port->dev, "cannot allocate Rx queues\n");
  4576. return err;
  4577. }
  4578. err = mvpp2_setup_txqs(port);
  4579. if (err) {
  4580. netdev_err(port->dev, "cannot allocate Tx queues\n");
  4581. goto err_cleanup_rxqs;
  4582. }
  4583. err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
  4584. if (err) {
  4585. netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
  4586. goto err_cleanup_txqs;
  4587. }
  4588. /* In default link is down */
  4589. netif_carrier_off(port->dev);
  4590. err = mvpp2_phy_connect(port);
  4591. if (err < 0)
  4592. goto err_free_irq;
  4593. /* Unmask interrupts on all CPUs */
  4594. on_each_cpu(mvpp2_interrupts_unmask, port, 1);
  4595. mvpp2_start_dev(port);
  4596. return 0;
  4597. err_free_irq:
  4598. free_irq(port->irq, port);
  4599. err_cleanup_txqs:
  4600. mvpp2_cleanup_txqs(port);
  4601. err_cleanup_rxqs:
  4602. mvpp2_cleanup_rxqs(port);
  4603. return err;
  4604. }
  4605. static int mvpp2_stop(struct net_device *dev)
  4606. {
  4607. struct mvpp2_port *port = netdev_priv(dev);
  4608. struct mvpp2_port_pcpu *port_pcpu;
  4609. int cpu;
  4610. mvpp2_stop_dev(port);
  4611. mvpp2_phy_disconnect(port);
  4612. /* Mask interrupts on all CPUs */
  4613. on_each_cpu(mvpp2_interrupts_mask, port, 1);
  4614. free_irq(port->irq, port);
  4615. for_each_present_cpu(cpu) {
  4616. port_pcpu = per_cpu_ptr(port->pcpu, cpu);
  4617. hrtimer_cancel(&port_pcpu->tx_done_timer);
  4618. port_pcpu->timer_scheduled = false;
  4619. tasklet_kill(&port_pcpu->tx_done_tasklet);
  4620. }
  4621. mvpp2_cleanup_rxqs(port);
  4622. mvpp2_cleanup_txqs(port);
  4623. return 0;
  4624. }
  4625. static void mvpp2_set_rx_mode(struct net_device *dev)
  4626. {
  4627. struct mvpp2_port *port = netdev_priv(dev);
  4628. struct mvpp2 *priv = port->priv;
  4629. struct netdev_hw_addr *ha;
  4630. int id = port->id;
  4631. bool allmulti = dev->flags & IFF_ALLMULTI;
  4632. mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
  4633. mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
  4634. mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
  4635. /* Remove all port->id's mcast enries */
  4636. mvpp2_prs_mcast_del_all(priv, id);
  4637. if (allmulti && !netdev_mc_empty(dev)) {
  4638. netdev_for_each_mc_addr(ha, dev)
  4639. mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
  4640. }
  4641. }
  4642. static int mvpp2_set_mac_address(struct net_device *dev, void *p)
  4643. {
  4644. struct mvpp2_port *port = netdev_priv(dev);
  4645. const struct sockaddr *addr = p;
  4646. int err;
  4647. if (!is_valid_ether_addr(addr->sa_data)) {
  4648. err = -EADDRNOTAVAIL;
  4649. goto error;
  4650. }
  4651. if (!netif_running(dev)) {
  4652. err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
  4653. if (!err)
  4654. return 0;
  4655. /* Reconfigure parser to accept the original MAC address */
  4656. err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
  4657. if (err)
  4658. goto error;
  4659. }
  4660. mvpp2_stop_dev(port);
  4661. err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
  4662. if (!err)
  4663. goto out_start;
  4664. /* Reconfigure parser accept the original MAC address */
  4665. err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
  4666. if (err)
  4667. goto error;
  4668. out_start:
  4669. mvpp2_start_dev(port);
  4670. mvpp2_egress_enable(port);
  4671. mvpp2_ingress_enable(port);
  4672. return 0;
  4673. error:
  4674. netdev_err(dev, "fail to change MAC address\n");
  4675. return err;
  4676. }
  4677. static int mvpp2_change_mtu(struct net_device *dev, int mtu)
  4678. {
  4679. struct mvpp2_port *port = netdev_priv(dev);
  4680. int err;
  4681. mtu = mvpp2_check_mtu_valid(dev, mtu);
  4682. if (mtu < 0) {
  4683. err = mtu;
  4684. goto error;
  4685. }
  4686. if (!netif_running(dev)) {
  4687. err = mvpp2_bm_update_mtu(dev, mtu);
  4688. if (!err) {
  4689. port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
  4690. return 0;
  4691. }
  4692. /* Reconfigure BM to the original MTU */
  4693. err = mvpp2_bm_update_mtu(dev, dev->mtu);
  4694. if (err)
  4695. goto error;
  4696. }
  4697. mvpp2_stop_dev(port);
  4698. err = mvpp2_bm_update_mtu(dev, mtu);
  4699. if (!err) {
  4700. port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
  4701. goto out_start;
  4702. }
  4703. /* Reconfigure BM to the original MTU */
  4704. err = mvpp2_bm_update_mtu(dev, dev->mtu);
  4705. if (err)
  4706. goto error;
  4707. out_start:
  4708. mvpp2_start_dev(port);
  4709. mvpp2_egress_enable(port);
  4710. mvpp2_ingress_enable(port);
  4711. return 0;
  4712. error:
  4713. netdev_err(dev, "fail to change MTU\n");
  4714. return err;
  4715. }
  4716. static struct rtnl_link_stats64 *
  4717. mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
  4718. {
  4719. struct mvpp2_port *port = netdev_priv(dev);
  4720. unsigned int start;
  4721. int cpu;
  4722. for_each_possible_cpu(cpu) {
  4723. struct mvpp2_pcpu_stats *cpu_stats;
  4724. u64 rx_packets;
  4725. u64 rx_bytes;
  4726. u64 tx_packets;
  4727. u64 tx_bytes;
  4728. cpu_stats = per_cpu_ptr(port->stats, cpu);
  4729. do {
  4730. start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
  4731. rx_packets = cpu_stats->rx_packets;
  4732. rx_bytes = cpu_stats->rx_bytes;
  4733. tx_packets = cpu_stats->tx_packets;
  4734. tx_bytes = cpu_stats->tx_bytes;
  4735. } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
  4736. stats->rx_packets += rx_packets;
  4737. stats->rx_bytes += rx_bytes;
  4738. stats->tx_packets += tx_packets;
  4739. stats->tx_bytes += tx_bytes;
  4740. }
  4741. stats->rx_errors = dev->stats.rx_errors;
  4742. stats->rx_dropped = dev->stats.rx_dropped;
  4743. stats->tx_dropped = dev->stats.tx_dropped;
  4744. return stats;
  4745. }
  4746. static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  4747. {
  4748. struct mvpp2_port *port = netdev_priv(dev);
  4749. int ret;
  4750. if (!port->phy_dev)
  4751. return -ENOTSUPP;
  4752. ret = phy_mii_ioctl(port->phy_dev, ifr, cmd);
  4753. if (!ret)
  4754. mvpp2_link_event(dev);
  4755. return ret;
  4756. }
  4757. /* Ethtool methods */
  4758. /* Get settings (phy address, speed) for ethtools */
  4759. static int mvpp2_ethtool_get_settings(struct net_device *dev,
  4760. struct ethtool_cmd *cmd)
  4761. {
  4762. struct mvpp2_port *port = netdev_priv(dev);
  4763. if (!port->phy_dev)
  4764. return -ENODEV;
  4765. return phy_ethtool_gset(port->phy_dev, cmd);
  4766. }
  4767. /* Set settings (phy address, speed) for ethtools */
  4768. static int mvpp2_ethtool_set_settings(struct net_device *dev,
  4769. struct ethtool_cmd *cmd)
  4770. {
  4771. struct mvpp2_port *port = netdev_priv(dev);
  4772. if (!port->phy_dev)
  4773. return -ENODEV;
  4774. return phy_ethtool_sset(port->phy_dev, cmd);
  4775. }
  4776. /* Set interrupt coalescing for ethtools */
  4777. static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
  4778. struct ethtool_coalesce *c)
  4779. {
  4780. struct mvpp2_port *port = netdev_priv(dev);
  4781. int queue;
  4782. for (queue = 0; queue < rxq_number; queue++) {
  4783. struct mvpp2_rx_queue *rxq = port->rxqs[queue];
  4784. rxq->time_coal = c->rx_coalesce_usecs;
  4785. rxq->pkts_coal = c->rx_max_coalesced_frames;
  4786. mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
  4787. mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
  4788. }
  4789. for (queue = 0; queue < txq_number; queue++) {
  4790. struct mvpp2_tx_queue *txq = port->txqs[queue];
  4791. txq->done_pkts_coal = c->tx_max_coalesced_frames;
  4792. }
  4793. return 0;
  4794. }
  4795. /* get coalescing for ethtools */
  4796. static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
  4797. struct ethtool_coalesce *c)
  4798. {
  4799. struct mvpp2_port *port = netdev_priv(dev);
  4800. c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
  4801. c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
  4802. c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
  4803. return 0;
  4804. }
  4805. static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
  4806. struct ethtool_drvinfo *drvinfo)
  4807. {
  4808. strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
  4809. sizeof(drvinfo->driver));
  4810. strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
  4811. sizeof(drvinfo->version));
  4812. strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
  4813. sizeof(drvinfo->bus_info));
  4814. }
  4815. static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
  4816. struct ethtool_ringparam *ring)
  4817. {
  4818. struct mvpp2_port *port = netdev_priv(dev);
  4819. ring->rx_max_pending = MVPP2_MAX_RXD;
  4820. ring->tx_max_pending = MVPP2_MAX_TXD;
  4821. ring->rx_pending = port->rx_ring_size;
  4822. ring->tx_pending = port->tx_ring_size;
  4823. }
  4824. static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
  4825. struct ethtool_ringparam *ring)
  4826. {
  4827. struct mvpp2_port *port = netdev_priv(dev);
  4828. u16 prev_rx_ring_size = port->rx_ring_size;
  4829. u16 prev_tx_ring_size = port->tx_ring_size;
  4830. int err;
  4831. err = mvpp2_check_ringparam_valid(dev, ring);
  4832. if (err)
  4833. return err;
  4834. if (!netif_running(dev)) {
  4835. port->rx_ring_size = ring->rx_pending;
  4836. port->tx_ring_size = ring->tx_pending;
  4837. return 0;
  4838. }
  4839. /* The interface is running, so we have to force a
  4840. * reallocation of the queues
  4841. */
  4842. mvpp2_stop_dev(port);
  4843. mvpp2_cleanup_rxqs(port);
  4844. mvpp2_cleanup_txqs(port);
  4845. port->rx_ring_size = ring->rx_pending;
  4846. port->tx_ring_size = ring->tx_pending;
  4847. err = mvpp2_setup_rxqs(port);
  4848. if (err) {
  4849. /* Reallocate Rx queues with the original ring size */
  4850. port->rx_ring_size = prev_rx_ring_size;
  4851. ring->rx_pending = prev_rx_ring_size;
  4852. err = mvpp2_setup_rxqs(port);
  4853. if (err)
  4854. goto err_out;
  4855. }
  4856. err = mvpp2_setup_txqs(port);
  4857. if (err) {
  4858. /* Reallocate Tx queues with the original ring size */
  4859. port->tx_ring_size = prev_tx_ring_size;
  4860. ring->tx_pending = prev_tx_ring_size;
  4861. err = mvpp2_setup_txqs(port);
  4862. if (err)
  4863. goto err_clean_rxqs;
  4864. }
  4865. mvpp2_start_dev(port);
  4866. mvpp2_egress_enable(port);
  4867. mvpp2_ingress_enable(port);
  4868. return 0;
  4869. err_clean_rxqs:
  4870. mvpp2_cleanup_rxqs(port);
  4871. err_out:
  4872. netdev_err(dev, "fail to change ring parameters");
  4873. return err;
  4874. }
  4875. /* Device ops */
  4876. static const struct net_device_ops mvpp2_netdev_ops = {
  4877. .ndo_open = mvpp2_open,
  4878. .ndo_stop = mvpp2_stop,
  4879. .ndo_start_xmit = mvpp2_tx,
  4880. .ndo_set_rx_mode = mvpp2_set_rx_mode,
  4881. .ndo_set_mac_address = mvpp2_set_mac_address,
  4882. .ndo_change_mtu = mvpp2_change_mtu,
  4883. .ndo_get_stats64 = mvpp2_get_stats64,
  4884. .ndo_do_ioctl = mvpp2_ioctl,
  4885. };
  4886. static const struct ethtool_ops mvpp2_eth_tool_ops = {
  4887. .get_link = ethtool_op_get_link,
  4888. .get_settings = mvpp2_ethtool_get_settings,
  4889. .set_settings = mvpp2_ethtool_set_settings,
  4890. .set_coalesce = mvpp2_ethtool_set_coalesce,
  4891. .get_coalesce = mvpp2_ethtool_get_coalesce,
  4892. .get_drvinfo = mvpp2_ethtool_get_drvinfo,
  4893. .get_ringparam = mvpp2_ethtool_get_ringparam,
  4894. .set_ringparam = mvpp2_ethtool_set_ringparam,
  4895. };
  4896. /* Driver initialization */
  4897. static void mvpp2_port_power_up(struct mvpp2_port *port)
  4898. {
  4899. mvpp2_port_mii_set(port);
  4900. mvpp2_port_periodic_xon_disable(port);
  4901. mvpp2_port_fc_adv_enable(port);
  4902. mvpp2_port_reset(port);
  4903. }
  4904. /* Initialize port HW */
  4905. static int mvpp2_port_init(struct mvpp2_port *port)
  4906. {
  4907. struct device *dev = port->dev->dev.parent;
  4908. struct mvpp2 *priv = port->priv;
  4909. struct mvpp2_txq_pcpu *txq_pcpu;
  4910. int queue, cpu, err;
  4911. if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
  4912. return -EINVAL;
  4913. /* Disable port */
  4914. mvpp2_egress_disable(port);
  4915. mvpp2_port_disable(port);
  4916. port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
  4917. GFP_KERNEL);
  4918. if (!port->txqs)
  4919. return -ENOMEM;
  4920. /* Associate physical Tx queues to this port and initialize.
  4921. * The mapping is predefined.
  4922. */
  4923. for (queue = 0; queue < txq_number; queue++) {
  4924. int queue_phy_id = mvpp2_txq_phys(port->id, queue);
  4925. struct mvpp2_tx_queue *txq;
  4926. txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
  4927. if (!txq)
  4928. return -ENOMEM;
  4929. txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
  4930. if (!txq->pcpu) {
  4931. err = -ENOMEM;
  4932. goto err_free_percpu;
  4933. }
  4934. txq->id = queue_phy_id;
  4935. txq->log_id = queue;
  4936. txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
  4937. for_each_present_cpu(cpu) {
  4938. txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
  4939. txq_pcpu->cpu = cpu;
  4940. }
  4941. port->txqs[queue] = txq;
  4942. }
  4943. port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
  4944. GFP_KERNEL);
  4945. if (!port->rxqs) {
  4946. err = -ENOMEM;
  4947. goto err_free_percpu;
  4948. }
  4949. /* Allocate and initialize Rx queue for this port */
  4950. for (queue = 0; queue < rxq_number; queue++) {
  4951. struct mvpp2_rx_queue *rxq;
  4952. /* Map physical Rx queue to port's logical Rx queue */
  4953. rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
  4954. if (!rxq)
  4955. goto err_free_percpu;
  4956. /* Map this Rx queue to a physical queue */
  4957. rxq->id = port->first_rxq + queue;
  4958. rxq->port = port->id;
  4959. rxq->logic_rxq = queue;
  4960. port->rxqs[queue] = rxq;
  4961. }
  4962. /* Configure Rx queue group interrupt for this port */
  4963. mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
  4964. /* Create Rx descriptor rings */
  4965. for (queue = 0; queue < rxq_number; queue++) {
  4966. struct mvpp2_rx_queue *rxq = port->rxqs[queue];
  4967. rxq->size = port->rx_ring_size;
  4968. rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
  4969. rxq->time_coal = MVPP2_RX_COAL_USEC;
  4970. }
  4971. mvpp2_ingress_disable(port);
  4972. /* Port default configuration */
  4973. mvpp2_defaults_set(port);
  4974. /* Port's classifier configuration */
  4975. mvpp2_cls_oversize_rxq_set(port);
  4976. mvpp2_cls_port_config(port);
  4977. /* Provide an initial Rx packet size */
  4978. port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
  4979. /* Initialize pools for swf */
  4980. err = mvpp2_swf_bm_pool_init(port);
  4981. if (err)
  4982. goto err_free_percpu;
  4983. return 0;
  4984. err_free_percpu:
  4985. for (queue = 0; queue < txq_number; queue++) {
  4986. if (!port->txqs[queue])
  4987. continue;
  4988. free_percpu(port->txqs[queue]->pcpu);
  4989. }
  4990. return err;
  4991. }
  4992. /* Ports initialization */
  4993. static int mvpp2_port_probe(struct platform_device *pdev,
  4994. struct device_node *port_node,
  4995. struct mvpp2 *priv,
  4996. int *next_first_rxq)
  4997. {
  4998. struct device_node *phy_node;
  4999. struct mvpp2_port *port;
  5000. struct mvpp2_port_pcpu *port_pcpu;
  5001. struct net_device *dev;
  5002. struct resource *res;
  5003. const char *dt_mac_addr;
  5004. const char *mac_from;
  5005. char hw_mac_addr[ETH_ALEN];
  5006. u32 id;
  5007. int features;
  5008. int phy_mode;
  5009. int priv_common_regs_num = 2;
  5010. int err, i, cpu;
  5011. dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
  5012. rxq_number);
  5013. if (!dev)
  5014. return -ENOMEM;
  5015. phy_node = of_parse_phandle(port_node, "phy", 0);
  5016. if (!phy_node) {
  5017. dev_err(&pdev->dev, "missing phy\n");
  5018. err = -ENODEV;
  5019. goto err_free_netdev;
  5020. }
  5021. phy_mode = of_get_phy_mode(port_node);
  5022. if (phy_mode < 0) {
  5023. dev_err(&pdev->dev, "incorrect phy mode\n");
  5024. err = phy_mode;
  5025. goto err_free_netdev;
  5026. }
  5027. if (of_property_read_u32(port_node, "port-id", &id)) {
  5028. err = -EINVAL;
  5029. dev_err(&pdev->dev, "missing port-id value\n");
  5030. goto err_free_netdev;
  5031. }
  5032. dev->tx_queue_len = MVPP2_MAX_TXD;
  5033. dev->watchdog_timeo = 5 * HZ;
  5034. dev->netdev_ops = &mvpp2_netdev_ops;
  5035. dev->ethtool_ops = &mvpp2_eth_tool_ops;
  5036. port = netdev_priv(dev);
  5037. port->irq = irq_of_parse_and_map(port_node, 0);
  5038. if (port->irq <= 0) {
  5039. err = -EINVAL;
  5040. goto err_free_netdev;
  5041. }
  5042. if (of_property_read_bool(port_node, "marvell,loopback"))
  5043. port->flags |= MVPP2_F_LOOPBACK;
  5044. port->priv = priv;
  5045. port->id = id;
  5046. port->first_rxq = *next_first_rxq;
  5047. port->phy_node = phy_node;
  5048. port->phy_interface = phy_mode;
  5049. res = platform_get_resource(pdev, IORESOURCE_MEM,
  5050. priv_common_regs_num + id);
  5051. port->base = devm_ioremap_resource(&pdev->dev, res);
  5052. if (IS_ERR(port->base)) {
  5053. err = PTR_ERR(port->base);
  5054. goto err_free_irq;
  5055. }
  5056. /* Alloc per-cpu stats */
  5057. port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
  5058. if (!port->stats) {
  5059. err = -ENOMEM;
  5060. goto err_free_irq;
  5061. }
  5062. dt_mac_addr = of_get_mac_address(port_node);
  5063. if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
  5064. mac_from = "device tree";
  5065. ether_addr_copy(dev->dev_addr, dt_mac_addr);
  5066. } else {
  5067. mvpp2_get_mac_address(port, hw_mac_addr);
  5068. if (is_valid_ether_addr(hw_mac_addr)) {
  5069. mac_from = "hardware";
  5070. ether_addr_copy(dev->dev_addr, hw_mac_addr);
  5071. } else {
  5072. mac_from = "random";
  5073. eth_hw_addr_random(dev);
  5074. }
  5075. }
  5076. port->tx_ring_size = MVPP2_MAX_TXD;
  5077. port->rx_ring_size = MVPP2_MAX_RXD;
  5078. port->dev = dev;
  5079. SET_NETDEV_DEV(dev, &pdev->dev);
  5080. err = mvpp2_port_init(port);
  5081. if (err < 0) {
  5082. dev_err(&pdev->dev, "failed to init port %d\n", id);
  5083. goto err_free_stats;
  5084. }
  5085. mvpp2_port_power_up(port);
  5086. port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
  5087. if (!port->pcpu) {
  5088. err = -ENOMEM;
  5089. goto err_free_txq_pcpu;
  5090. }
  5091. for_each_present_cpu(cpu) {
  5092. port_pcpu = per_cpu_ptr(port->pcpu, cpu);
  5093. hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
  5094. HRTIMER_MODE_REL_PINNED);
  5095. port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
  5096. port_pcpu->timer_scheduled = false;
  5097. tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
  5098. (unsigned long)dev);
  5099. }
  5100. netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
  5101. features = NETIF_F_SG | NETIF_F_IP_CSUM;
  5102. dev->features = features | NETIF_F_RXCSUM;
  5103. dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
  5104. dev->vlan_features |= features;
  5105. err = register_netdev(dev);
  5106. if (err < 0) {
  5107. dev_err(&pdev->dev, "failed to register netdev\n");
  5108. goto err_free_port_pcpu;
  5109. }
  5110. netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
  5111. /* Increment the first Rx queue number to be used by the next port */
  5112. *next_first_rxq += rxq_number;
  5113. priv->port_list[id] = port;
  5114. return 0;
  5115. err_free_port_pcpu:
  5116. free_percpu(port->pcpu);
  5117. err_free_txq_pcpu:
  5118. for (i = 0; i < txq_number; i++)
  5119. free_percpu(port->txqs[i]->pcpu);
  5120. err_free_stats:
  5121. free_percpu(port->stats);
  5122. err_free_irq:
  5123. irq_dispose_mapping(port->irq);
  5124. err_free_netdev:
  5125. free_netdev(dev);
  5126. return err;
  5127. }
  5128. /* Ports removal routine */
  5129. static void mvpp2_port_remove(struct mvpp2_port *port)
  5130. {
  5131. int i;
  5132. unregister_netdev(port->dev);
  5133. free_percpu(port->pcpu);
  5134. free_percpu(port->stats);
  5135. for (i = 0; i < txq_number; i++)
  5136. free_percpu(port->txqs[i]->pcpu);
  5137. irq_dispose_mapping(port->irq);
  5138. free_netdev(port->dev);
  5139. }
  5140. /* Initialize decoding windows */
  5141. static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
  5142. struct mvpp2 *priv)
  5143. {
  5144. u32 win_enable;
  5145. int i;
  5146. for (i = 0; i < 6; i++) {
  5147. mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
  5148. mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
  5149. if (i < 4)
  5150. mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
  5151. }
  5152. win_enable = 0;
  5153. for (i = 0; i < dram->num_cs; i++) {
  5154. const struct mbus_dram_window *cs = dram->cs + i;
  5155. mvpp2_write(priv, MVPP2_WIN_BASE(i),
  5156. (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
  5157. dram->mbus_dram_target_id);
  5158. mvpp2_write(priv, MVPP2_WIN_SIZE(i),
  5159. (cs->size - 1) & 0xffff0000);
  5160. win_enable |= (1 << i);
  5161. }
  5162. mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
  5163. }
  5164. /* Initialize Rx FIFO's */
  5165. static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
  5166. {
  5167. int port;
  5168. for (port = 0; port < MVPP2_MAX_PORTS; port++) {
  5169. mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
  5170. MVPP2_RX_FIFO_PORT_DATA_SIZE);
  5171. mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
  5172. MVPP2_RX_FIFO_PORT_ATTR_SIZE);
  5173. }
  5174. mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
  5175. MVPP2_RX_FIFO_PORT_MIN_PKT);
  5176. mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
  5177. }
  5178. /* Initialize network controller common part HW */
  5179. static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
  5180. {
  5181. const struct mbus_dram_target_info *dram_target_info;
  5182. int err, i;
  5183. u32 val;
  5184. /* Checks for hardware constraints */
  5185. if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
  5186. (txq_number > MVPP2_MAX_TXQ)) {
  5187. dev_err(&pdev->dev, "invalid queue size parameter\n");
  5188. return -EINVAL;
  5189. }
  5190. /* MBUS windows configuration */
  5191. dram_target_info = mv_mbus_dram_info();
  5192. if (dram_target_info)
  5193. mvpp2_conf_mbus_windows(dram_target_info, priv);
  5194. /* Disable HW PHY polling */
  5195. val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
  5196. val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
  5197. writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
  5198. /* Allocate and initialize aggregated TXQs */
  5199. priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
  5200. sizeof(struct mvpp2_tx_queue),
  5201. GFP_KERNEL);
  5202. if (!priv->aggr_txqs)
  5203. return -ENOMEM;
  5204. for_each_present_cpu(i) {
  5205. priv->aggr_txqs[i].id = i;
  5206. priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
  5207. err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
  5208. MVPP2_AGGR_TXQ_SIZE, i, priv);
  5209. if (err < 0)
  5210. return err;
  5211. }
  5212. /* Rx Fifo Init */
  5213. mvpp2_rx_fifo_init(priv);
  5214. /* Reset Rx queue group interrupt configuration */
  5215. for (i = 0; i < MVPP2_MAX_PORTS; i++)
  5216. mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
  5217. writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
  5218. priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
  5219. /* Allow cache snoop when transmiting packets */
  5220. mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
  5221. /* Buffer Manager initialization */
  5222. err = mvpp2_bm_init(pdev, priv);
  5223. if (err < 0)
  5224. return err;
  5225. /* Parser default initialization */
  5226. err = mvpp2_prs_default_init(pdev, priv);
  5227. if (err < 0)
  5228. return err;
  5229. /* Classifier default initialization */
  5230. mvpp2_cls_init(priv);
  5231. return 0;
  5232. }
  5233. static int mvpp2_probe(struct platform_device *pdev)
  5234. {
  5235. struct device_node *dn = pdev->dev.of_node;
  5236. struct device_node *port_node;
  5237. struct mvpp2 *priv;
  5238. struct resource *res;
  5239. int port_count, first_rxq;
  5240. int err;
  5241. priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
  5242. if (!priv)
  5243. return -ENOMEM;
  5244. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  5245. priv->base = devm_ioremap_resource(&pdev->dev, res);
  5246. if (IS_ERR(priv->base))
  5247. return PTR_ERR(priv->base);
  5248. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  5249. priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
  5250. if (IS_ERR(priv->lms_base))
  5251. return PTR_ERR(priv->lms_base);
  5252. priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
  5253. if (IS_ERR(priv->pp_clk))
  5254. return PTR_ERR(priv->pp_clk);
  5255. err = clk_prepare_enable(priv->pp_clk);
  5256. if (err < 0)
  5257. return err;
  5258. priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
  5259. if (IS_ERR(priv->gop_clk)) {
  5260. err = PTR_ERR(priv->gop_clk);
  5261. goto err_pp_clk;
  5262. }
  5263. err = clk_prepare_enable(priv->gop_clk);
  5264. if (err < 0)
  5265. goto err_pp_clk;
  5266. /* Get system's tclk rate */
  5267. priv->tclk = clk_get_rate(priv->pp_clk);
  5268. /* Initialize network controller */
  5269. err = mvpp2_init(pdev, priv);
  5270. if (err < 0) {
  5271. dev_err(&pdev->dev, "failed to initialize controller\n");
  5272. goto err_gop_clk;
  5273. }
  5274. port_count = of_get_available_child_count(dn);
  5275. if (port_count == 0) {
  5276. dev_err(&pdev->dev, "no ports enabled\n");
  5277. err = -ENODEV;
  5278. goto err_gop_clk;
  5279. }
  5280. priv->port_list = devm_kcalloc(&pdev->dev, port_count,
  5281. sizeof(struct mvpp2_port *),
  5282. GFP_KERNEL);
  5283. if (!priv->port_list) {
  5284. err = -ENOMEM;
  5285. goto err_gop_clk;
  5286. }
  5287. /* Initialize ports */
  5288. first_rxq = 0;
  5289. for_each_available_child_of_node(dn, port_node) {
  5290. err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
  5291. if (err < 0)
  5292. goto err_gop_clk;
  5293. }
  5294. platform_set_drvdata(pdev, priv);
  5295. return 0;
  5296. err_gop_clk:
  5297. clk_disable_unprepare(priv->gop_clk);
  5298. err_pp_clk:
  5299. clk_disable_unprepare(priv->pp_clk);
  5300. return err;
  5301. }
  5302. static int mvpp2_remove(struct platform_device *pdev)
  5303. {
  5304. struct mvpp2 *priv = platform_get_drvdata(pdev);
  5305. struct device_node *dn = pdev->dev.of_node;
  5306. struct device_node *port_node;
  5307. int i = 0;
  5308. for_each_available_child_of_node(dn, port_node) {
  5309. if (priv->port_list[i])
  5310. mvpp2_port_remove(priv->port_list[i]);
  5311. i++;
  5312. }
  5313. for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
  5314. struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
  5315. mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
  5316. }
  5317. for_each_present_cpu(i) {
  5318. struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
  5319. dma_free_coherent(&pdev->dev,
  5320. MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
  5321. aggr_txq->descs,
  5322. aggr_txq->descs_phys);
  5323. }
  5324. clk_disable_unprepare(priv->pp_clk);
  5325. clk_disable_unprepare(priv->gop_clk);
  5326. return 0;
  5327. }
  5328. static const struct of_device_id mvpp2_match[] = {
  5329. { .compatible = "marvell,armada-375-pp2" },
  5330. { }
  5331. };
  5332. MODULE_DEVICE_TABLE(of, mvpp2_match);
  5333. static struct platform_driver mvpp2_driver = {
  5334. .probe = mvpp2_probe,
  5335. .remove = mvpp2_remove,
  5336. .driver = {
  5337. .name = MVPP2_DRIVER_NAME,
  5338. .of_match_table = mvpp2_match,
  5339. },
  5340. };
  5341. module_platform_driver(mvpp2_driver);
  5342. MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
  5343. MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
  5344. MODULE_LICENSE("GPL v2");