main.c 172 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441
  1. /*
  2. * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/debugfs.h>
  33. #include <linux/highmem.h>
  34. #include <linux/module.h>
  35. #include <linux/init.h>
  36. #include <linux/errno.h>
  37. #include <linux/pci.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/slab.h>
  40. #include <linux/bitmap.h>
  41. #if defined(CONFIG_X86)
  42. #include <asm/pat.h>
  43. #endif
  44. #include <linux/sched.h>
  45. #include <linux/sched/mm.h>
  46. #include <linux/sched/task.h>
  47. #include <linux/delay.h>
  48. #include <rdma/ib_user_verbs.h>
  49. #include <rdma/ib_addr.h>
  50. #include <rdma/ib_cache.h>
  51. #include <linux/mlx5/port.h>
  52. #include <linux/mlx5/vport.h>
  53. #include <linux/mlx5/fs.h>
  54. #include <linux/list.h>
  55. #include <rdma/ib_smi.h>
  56. #include <rdma/ib_umem.h>
  57. #include <linux/in.h>
  58. #include <linux/etherdevice.h>
  59. #include "mlx5_ib.h"
  60. #include "ib_rep.h"
  61. #include "cmd.h"
  62. #include <linux/mlx5/fs_helpers.h>
  63. #include <linux/mlx5/accel.h>
  64. #include <rdma/uverbs_std_types.h>
  65. #include <rdma/mlx5_user_ioctl_verbs.h>
  66. #include <rdma/mlx5_user_ioctl_cmds.h>
  67. #define UVERBS_MODULE_NAME mlx5_ib
  68. #include <rdma/uverbs_named_ioctl.h>
  69. #define DRIVER_NAME "mlx5_ib"
  70. #define DRIVER_VERSION "5.0-0"
  71. MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
  72. MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
  73. MODULE_LICENSE("Dual BSD/GPL");
  74. static char mlx5_version[] =
  75. DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
  76. DRIVER_VERSION "\n";
  77. struct mlx5_ib_event_work {
  78. struct work_struct work;
  79. struct mlx5_core_dev *dev;
  80. void *context;
  81. enum mlx5_dev_event event;
  82. unsigned long param;
  83. };
  84. enum {
  85. MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
  86. };
  87. static struct workqueue_struct *mlx5_ib_event_wq;
  88. static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
  89. static LIST_HEAD(mlx5_ib_dev_list);
  90. /*
  91. * This mutex should be held when accessing either of the above lists
  92. */
  93. static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
  94. /* We can't use an array for xlt_emergency_page because dma_map_single
  95. * doesn't work on kernel modules memory
  96. */
  97. static unsigned long xlt_emergency_page;
  98. static struct mutex xlt_emergency_page_mutex;
  99. struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
  100. {
  101. struct mlx5_ib_dev *dev;
  102. mutex_lock(&mlx5_ib_multiport_mutex);
  103. dev = mpi->ibdev;
  104. mutex_unlock(&mlx5_ib_multiport_mutex);
  105. return dev;
  106. }
  107. static enum rdma_link_layer
  108. mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
  109. {
  110. switch (port_type_cap) {
  111. case MLX5_CAP_PORT_TYPE_IB:
  112. return IB_LINK_LAYER_INFINIBAND;
  113. case MLX5_CAP_PORT_TYPE_ETH:
  114. return IB_LINK_LAYER_ETHERNET;
  115. default:
  116. return IB_LINK_LAYER_UNSPECIFIED;
  117. }
  118. }
  119. static enum rdma_link_layer
  120. mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
  121. {
  122. struct mlx5_ib_dev *dev = to_mdev(device);
  123. int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
  124. return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
  125. }
  126. static int get_port_state(struct ib_device *ibdev,
  127. u8 port_num,
  128. enum ib_port_state *state)
  129. {
  130. struct ib_port_attr attr;
  131. int ret;
  132. memset(&attr, 0, sizeof(attr));
  133. ret = ibdev->query_port(ibdev, port_num, &attr);
  134. if (!ret)
  135. *state = attr.state;
  136. return ret;
  137. }
  138. static int mlx5_netdev_event(struct notifier_block *this,
  139. unsigned long event, void *ptr)
  140. {
  141. struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
  142. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  143. u8 port_num = roce->native_port_num;
  144. struct mlx5_core_dev *mdev;
  145. struct mlx5_ib_dev *ibdev;
  146. ibdev = roce->dev;
  147. mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
  148. if (!mdev)
  149. return NOTIFY_DONE;
  150. switch (event) {
  151. case NETDEV_REGISTER:
  152. case NETDEV_UNREGISTER:
  153. write_lock(&roce->netdev_lock);
  154. if (ibdev->rep) {
  155. struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch;
  156. struct net_device *rep_ndev;
  157. rep_ndev = mlx5_ib_get_rep_netdev(esw,
  158. ibdev->rep->vport);
  159. if (rep_ndev == ndev)
  160. roce->netdev = (event == NETDEV_UNREGISTER) ?
  161. NULL : ndev;
  162. } else if (ndev->dev.parent == &mdev->pdev->dev) {
  163. roce->netdev = (event == NETDEV_UNREGISTER) ?
  164. NULL : ndev;
  165. }
  166. write_unlock(&roce->netdev_lock);
  167. break;
  168. case NETDEV_CHANGE:
  169. case NETDEV_UP:
  170. case NETDEV_DOWN: {
  171. struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
  172. struct net_device *upper = NULL;
  173. if (lag_ndev) {
  174. upper = netdev_master_upper_dev_get(lag_ndev);
  175. dev_put(lag_ndev);
  176. }
  177. if ((upper == ndev || (!upper && ndev == roce->netdev))
  178. && ibdev->ib_active) {
  179. struct ib_event ibev = { };
  180. enum ib_port_state port_state;
  181. if (get_port_state(&ibdev->ib_dev, port_num,
  182. &port_state))
  183. goto done;
  184. if (roce->last_port_state == port_state)
  185. goto done;
  186. roce->last_port_state = port_state;
  187. ibev.device = &ibdev->ib_dev;
  188. if (port_state == IB_PORT_DOWN)
  189. ibev.event = IB_EVENT_PORT_ERR;
  190. else if (port_state == IB_PORT_ACTIVE)
  191. ibev.event = IB_EVENT_PORT_ACTIVE;
  192. else
  193. goto done;
  194. ibev.element.port_num = port_num;
  195. ib_dispatch_event(&ibev);
  196. }
  197. break;
  198. }
  199. default:
  200. break;
  201. }
  202. done:
  203. mlx5_ib_put_native_port_mdev(ibdev, port_num);
  204. return NOTIFY_DONE;
  205. }
  206. static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
  207. u8 port_num)
  208. {
  209. struct mlx5_ib_dev *ibdev = to_mdev(device);
  210. struct net_device *ndev;
  211. struct mlx5_core_dev *mdev;
  212. mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
  213. if (!mdev)
  214. return NULL;
  215. ndev = mlx5_lag_get_roce_netdev(mdev);
  216. if (ndev)
  217. goto out;
  218. /* Ensure ndev does not disappear before we invoke dev_hold()
  219. */
  220. read_lock(&ibdev->roce[port_num - 1].netdev_lock);
  221. ndev = ibdev->roce[port_num - 1].netdev;
  222. if (ndev)
  223. dev_hold(ndev);
  224. read_unlock(&ibdev->roce[port_num - 1].netdev_lock);
  225. out:
  226. mlx5_ib_put_native_port_mdev(ibdev, port_num);
  227. return ndev;
  228. }
  229. struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
  230. u8 ib_port_num,
  231. u8 *native_port_num)
  232. {
  233. enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
  234. ib_port_num);
  235. struct mlx5_core_dev *mdev = NULL;
  236. struct mlx5_ib_multiport_info *mpi;
  237. struct mlx5_ib_port *port;
  238. if (!mlx5_core_mp_enabled(ibdev->mdev) ||
  239. ll != IB_LINK_LAYER_ETHERNET) {
  240. if (native_port_num)
  241. *native_port_num = ib_port_num;
  242. return ibdev->mdev;
  243. }
  244. if (native_port_num)
  245. *native_port_num = 1;
  246. port = &ibdev->port[ib_port_num - 1];
  247. if (!port)
  248. return NULL;
  249. spin_lock(&port->mp.mpi_lock);
  250. mpi = ibdev->port[ib_port_num - 1].mp.mpi;
  251. if (mpi && !mpi->unaffiliate) {
  252. mdev = mpi->mdev;
  253. /* If it's the master no need to refcount, it'll exist
  254. * as long as the ib_dev exists.
  255. */
  256. if (!mpi->is_master)
  257. mpi->mdev_refcnt++;
  258. }
  259. spin_unlock(&port->mp.mpi_lock);
  260. return mdev;
  261. }
  262. void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
  263. {
  264. enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
  265. port_num);
  266. struct mlx5_ib_multiport_info *mpi;
  267. struct mlx5_ib_port *port;
  268. if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
  269. return;
  270. port = &ibdev->port[port_num - 1];
  271. spin_lock(&port->mp.mpi_lock);
  272. mpi = ibdev->port[port_num - 1].mp.mpi;
  273. if (mpi->is_master)
  274. goto out;
  275. mpi->mdev_refcnt--;
  276. if (mpi->unaffiliate)
  277. complete(&mpi->unref_comp);
  278. out:
  279. spin_unlock(&port->mp.mpi_lock);
  280. }
  281. static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
  282. u8 *active_width)
  283. {
  284. switch (eth_proto_oper) {
  285. case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
  286. case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
  287. case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
  288. case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
  289. *active_width = IB_WIDTH_1X;
  290. *active_speed = IB_SPEED_SDR;
  291. break;
  292. case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
  293. case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
  294. case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
  295. case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
  296. case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
  297. case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
  298. case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
  299. *active_width = IB_WIDTH_1X;
  300. *active_speed = IB_SPEED_QDR;
  301. break;
  302. case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
  303. case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
  304. case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
  305. *active_width = IB_WIDTH_1X;
  306. *active_speed = IB_SPEED_EDR;
  307. break;
  308. case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
  309. case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
  310. case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
  311. case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
  312. *active_width = IB_WIDTH_4X;
  313. *active_speed = IB_SPEED_QDR;
  314. break;
  315. case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
  316. case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
  317. case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
  318. *active_width = IB_WIDTH_1X;
  319. *active_speed = IB_SPEED_HDR;
  320. break;
  321. case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
  322. *active_width = IB_WIDTH_4X;
  323. *active_speed = IB_SPEED_FDR;
  324. break;
  325. case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
  326. case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
  327. case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
  328. case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
  329. *active_width = IB_WIDTH_4X;
  330. *active_speed = IB_SPEED_EDR;
  331. break;
  332. default:
  333. return -EINVAL;
  334. }
  335. return 0;
  336. }
  337. static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
  338. struct ib_port_attr *props)
  339. {
  340. struct mlx5_ib_dev *dev = to_mdev(device);
  341. struct mlx5_core_dev *mdev;
  342. struct net_device *ndev, *upper;
  343. enum ib_mtu ndev_ib_mtu;
  344. bool put_mdev = true;
  345. u16 qkey_viol_cntr;
  346. u32 eth_prot_oper;
  347. u8 mdev_port_num;
  348. int err;
  349. mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
  350. if (!mdev) {
  351. /* This means the port isn't affiliated yet. Get the
  352. * info for the master port instead.
  353. */
  354. put_mdev = false;
  355. mdev = dev->mdev;
  356. mdev_port_num = 1;
  357. port_num = 1;
  358. }
  359. /* Possible bad flows are checked before filling out props so in case
  360. * of an error it will still be zeroed out.
  361. */
  362. err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper,
  363. mdev_port_num);
  364. if (err)
  365. goto out;
  366. props->active_width = IB_WIDTH_4X;
  367. props->active_speed = IB_SPEED_QDR;
  368. translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
  369. &props->active_width);
  370. props->port_cap_flags |= IB_PORT_CM_SUP;
  371. props->ip_gids = true;
  372. props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
  373. roce_address_table_size);
  374. props->max_mtu = IB_MTU_4096;
  375. props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
  376. props->pkey_tbl_len = 1;
  377. props->state = IB_PORT_DOWN;
  378. props->phys_state = 3;
  379. mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
  380. props->qkey_viol_cntr = qkey_viol_cntr;
  381. /* If this is a stub query for an unaffiliated port stop here */
  382. if (!put_mdev)
  383. goto out;
  384. ndev = mlx5_ib_get_netdev(device, port_num);
  385. if (!ndev)
  386. goto out;
  387. if (mlx5_lag_is_active(dev->mdev)) {
  388. rcu_read_lock();
  389. upper = netdev_master_upper_dev_get_rcu(ndev);
  390. if (upper) {
  391. dev_put(ndev);
  392. ndev = upper;
  393. dev_hold(ndev);
  394. }
  395. rcu_read_unlock();
  396. }
  397. if (netif_running(ndev) && netif_carrier_ok(ndev)) {
  398. props->state = IB_PORT_ACTIVE;
  399. props->phys_state = 5;
  400. }
  401. ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
  402. dev_put(ndev);
  403. props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
  404. out:
  405. if (put_mdev)
  406. mlx5_ib_put_native_port_mdev(dev, port_num);
  407. return err;
  408. }
  409. static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
  410. unsigned int index, const union ib_gid *gid,
  411. const struct ib_gid_attr *attr)
  412. {
  413. enum ib_gid_type gid_type = IB_GID_TYPE_IB;
  414. u8 roce_version = 0;
  415. u8 roce_l3_type = 0;
  416. bool vlan = false;
  417. u8 mac[ETH_ALEN];
  418. u16 vlan_id = 0;
  419. if (gid) {
  420. gid_type = attr->gid_type;
  421. ether_addr_copy(mac, attr->ndev->dev_addr);
  422. if (is_vlan_dev(attr->ndev)) {
  423. vlan = true;
  424. vlan_id = vlan_dev_vlan_id(attr->ndev);
  425. }
  426. }
  427. switch (gid_type) {
  428. case IB_GID_TYPE_IB:
  429. roce_version = MLX5_ROCE_VERSION_1;
  430. break;
  431. case IB_GID_TYPE_ROCE_UDP_ENCAP:
  432. roce_version = MLX5_ROCE_VERSION_2;
  433. if (ipv6_addr_v4mapped((void *)gid))
  434. roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
  435. else
  436. roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
  437. break;
  438. default:
  439. mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
  440. }
  441. return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
  442. roce_l3_type, gid->raw, mac, vlan,
  443. vlan_id, port_num);
  444. }
  445. static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
  446. __always_unused void **context)
  447. {
  448. return set_roce_addr(to_mdev(attr->device), attr->port_num,
  449. attr->index, &attr->gid, attr);
  450. }
  451. static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
  452. __always_unused void **context)
  453. {
  454. return set_roce_addr(to_mdev(attr->device), attr->port_num,
  455. attr->index, NULL, NULL);
  456. }
  457. __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
  458. const struct ib_gid_attr *attr)
  459. {
  460. if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
  461. return 0;
  462. return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
  463. }
  464. static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
  465. {
  466. if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
  467. return !MLX5_CAP_GEN(dev->mdev, ib_virt);
  468. return 0;
  469. }
  470. enum {
  471. MLX5_VPORT_ACCESS_METHOD_MAD,
  472. MLX5_VPORT_ACCESS_METHOD_HCA,
  473. MLX5_VPORT_ACCESS_METHOD_NIC,
  474. };
  475. static int mlx5_get_vport_access_method(struct ib_device *ibdev)
  476. {
  477. if (mlx5_use_mad_ifc(to_mdev(ibdev)))
  478. return MLX5_VPORT_ACCESS_METHOD_MAD;
  479. if (mlx5_ib_port_link_layer(ibdev, 1) ==
  480. IB_LINK_LAYER_ETHERNET)
  481. return MLX5_VPORT_ACCESS_METHOD_NIC;
  482. return MLX5_VPORT_ACCESS_METHOD_HCA;
  483. }
  484. static void get_atomic_caps(struct mlx5_ib_dev *dev,
  485. u8 atomic_size_qp,
  486. struct ib_device_attr *props)
  487. {
  488. u8 tmp;
  489. u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
  490. u8 atomic_req_8B_endianness_mode =
  491. MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
  492. /* Check if HW supports 8 bytes standard atomic operations and capable
  493. * of host endianness respond
  494. */
  495. tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
  496. if (((atomic_operations & tmp) == tmp) &&
  497. (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
  498. (atomic_req_8B_endianness_mode)) {
  499. props->atomic_cap = IB_ATOMIC_HCA;
  500. } else {
  501. props->atomic_cap = IB_ATOMIC_NONE;
  502. }
  503. }
  504. static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
  505. struct ib_device_attr *props)
  506. {
  507. u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
  508. get_atomic_caps(dev, atomic_size_qp, props);
  509. }
  510. static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
  511. struct ib_device_attr *props)
  512. {
  513. u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
  514. get_atomic_caps(dev, atomic_size_qp, props);
  515. }
  516. bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
  517. {
  518. struct ib_device_attr props = {};
  519. get_atomic_caps_dc(dev, &props);
  520. return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
  521. }
  522. static int mlx5_query_system_image_guid(struct ib_device *ibdev,
  523. __be64 *sys_image_guid)
  524. {
  525. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  526. struct mlx5_core_dev *mdev = dev->mdev;
  527. u64 tmp;
  528. int err;
  529. switch (mlx5_get_vport_access_method(ibdev)) {
  530. case MLX5_VPORT_ACCESS_METHOD_MAD:
  531. return mlx5_query_mad_ifc_system_image_guid(ibdev,
  532. sys_image_guid);
  533. case MLX5_VPORT_ACCESS_METHOD_HCA:
  534. err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
  535. break;
  536. case MLX5_VPORT_ACCESS_METHOD_NIC:
  537. err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
  538. break;
  539. default:
  540. return -EINVAL;
  541. }
  542. if (!err)
  543. *sys_image_guid = cpu_to_be64(tmp);
  544. return err;
  545. }
  546. static int mlx5_query_max_pkeys(struct ib_device *ibdev,
  547. u16 *max_pkeys)
  548. {
  549. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  550. struct mlx5_core_dev *mdev = dev->mdev;
  551. switch (mlx5_get_vport_access_method(ibdev)) {
  552. case MLX5_VPORT_ACCESS_METHOD_MAD:
  553. return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
  554. case MLX5_VPORT_ACCESS_METHOD_HCA:
  555. case MLX5_VPORT_ACCESS_METHOD_NIC:
  556. *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
  557. pkey_table_size));
  558. return 0;
  559. default:
  560. return -EINVAL;
  561. }
  562. }
  563. static int mlx5_query_vendor_id(struct ib_device *ibdev,
  564. u32 *vendor_id)
  565. {
  566. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  567. switch (mlx5_get_vport_access_method(ibdev)) {
  568. case MLX5_VPORT_ACCESS_METHOD_MAD:
  569. return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
  570. case MLX5_VPORT_ACCESS_METHOD_HCA:
  571. case MLX5_VPORT_ACCESS_METHOD_NIC:
  572. return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
  573. default:
  574. return -EINVAL;
  575. }
  576. }
  577. static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
  578. __be64 *node_guid)
  579. {
  580. u64 tmp;
  581. int err;
  582. switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
  583. case MLX5_VPORT_ACCESS_METHOD_MAD:
  584. return mlx5_query_mad_ifc_node_guid(dev, node_guid);
  585. case MLX5_VPORT_ACCESS_METHOD_HCA:
  586. err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
  587. break;
  588. case MLX5_VPORT_ACCESS_METHOD_NIC:
  589. err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
  590. break;
  591. default:
  592. return -EINVAL;
  593. }
  594. if (!err)
  595. *node_guid = cpu_to_be64(tmp);
  596. return err;
  597. }
  598. struct mlx5_reg_node_desc {
  599. u8 desc[IB_DEVICE_NODE_DESC_MAX];
  600. };
  601. static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
  602. {
  603. struct mlx5_reg_node_desc in;
  604. if (mlx5_use_mad_ifc(dev))
  605. return mlx5_query_mad_ifc_node_desc(dev, node_desc);
  606. memset(&in, 0, sizeof(in));
  607. return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
  608. sizeof(struct mlx5_reg_node_desc),
  609. MLX5_REG_NODE_DESC, 0, 0);
  610. }
  611. static int mlx5_ib_query_device(struct ib_device *ibdev,
  612. struct ib_device_attr *props,
  613. struct ib_udata *uhw)
  614. {
  615. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  616. struct mlx5_core_dev *mdev = dev->mdev;
  617. int err = -ENOMEM;
  618. int max_sq_desc;
  619. int max_rq_sg;
  620. int max_sq_sg;
  621. u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
  622. bool raw_support = !mlx5_core_mp_enabled(mdev);
  623. struct mlx5_ib_query_device_resp resp = {};
  624. size_t resp_len;
  625. u64 max_tso;
  626. resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
  627. if (uhw->outlen && uhw->outlen < resp_len)
  628. return -EINVAL;
  629. else
  630. resp.response_length = resp_len;
  631. if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
  632. return -EINVAL;
  633. memset(props, 0, sizeof(*props));
  634. err = mlx5_query_system_image_guid(ibdev,
  635. &props->sys_image_guid);
  636. if (err)
  637. return err;
  638. err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
  639. if (err)
  640. return err;
  641. err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
  642. if (err)
  643. return err;
  644. props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
  645. (fw_rev_min(dev->mdev) << 16) |
  646. fw_rev_sub(dev->mdev);
  647. props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
  648. IB_DEVICE_PORT_ACTIVE_EVENT |
  649. IB_DEVICE_SYS_IMAGE_GUID |
  650. IB_DEVICE_RC_RNR_NAK_GEN;
  651. if (MLX5_CAP_GEN(mdev, pkv))
  652. props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
  653. if (MLX5_CAP_GEN(mdev, qkv))
  654. props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
  655. if (MLX5_CAP_GEN(mdev, apm))
  656. props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
  657. if (MLX5_CAP_GEN(mdev, xrc))
  658. props->device_cap_flags |= IB_DEVICE_XRC;
  659. if (MLX5_CAP_GEN(mdev, imaicl)) {
  660. props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
  661. IB_DEVICE_MEM_WINDOW_TYPE_2B;
  662. props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
  663. /* We support 'Gappy' memory registration too */
  664. props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
  665. }
  666. props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
  667. if (MLX5_CAP_GEN(mdev, sho)) {
  668. props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
  669. /* At this stage no support for signature handover */
  670. props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
  671. IB_PROT_T10DIF_TYPE_2 |
  672. IB_PROT_T10DIF_TYPE_3;
  673. props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
  674. IB_GUARD_T10DIF_CSUM;
  675. }
  676. if (MLX5_CAP_GEN(mdev, block_lb_mc))
  677. props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
  678. if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
  679. if (MLX5_CAP_ETH(mdev, csum_cap)) {
  680. /* Legacy bit to support old userspace libraries */
  681. props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
  682. props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
  683. }
  684. if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
  685. props->raw_packet_caps |=
  686. IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
  687. if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
  688. max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
  689. if (max_tso) {
  690. resp.tso_caps.max_tso = 1 << max_tso;
  691. resp.tso_caps.supported_qpts |=
  692. 1 << IB_QPT_RAW_PACKET;
  693. resp.response_length += sizeof(resp.tso_caps);
  694. }
  695. }
  696. if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
  697. resp.rss_caps.rx_hash_function =
  698. MLX5_RX_HASH_FUNC_TOEPLITZ;
  699. resp.rss_caps.rx_hash_fields_mask =
  700. MLX5_RX_HASH_SRC_IPV4 |
  701. MLX5_RX_HASH_DST_IPV4 |
  702. MLX5_RX_HASH_SRC_IPV6 |
  703. MLX5_RX_HASH_DST_IPV6 |
  704. MLX5_RX_HASH_SRC_PORT_TCP |
  705. MLX5_RX_HASH_DST_PORT_TCP |
  706. MLX5_RX_HASH_SRC_PORT_UDP |
  707. MLX5_RX_HASH_DST_PORT_UDP |
  708. MLX5_RX_HASH_INNER;
  709. if (mlx5_accel_ipsec_device_caps(dev->mdev) &
  710. MLX5_ACCEL_IPSEC_CAP_DEVICE)
  711. resp.rss_caps.rx_hash_fields_mask |=
  712. MLX5_RX_HASH_IPSEC_SPI;
  713. resp.response_length += sizeof(resp.rss_caps);
  714. }
  715. } else {
  716. if (field_avail(typeof(resp), tso_caps, uhw->outlen))
  717. resp.response_length += sizeof(resp.tso_caps);
  718. if (field_avail(typeof(resp), rss_caps, uhw->outlen))
  719. resp.response_length += sizeof(resp.rss_caps);
  720. }
  721. if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
  722. props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
  723. props->device_cap_flags |= IB_DEVICE_UD_TSO;
  724. }
  725. if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
  726. MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
  727. raw_support)
  728. props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
  729. if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
  730. MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
  731. props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
  732. if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
  733. MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
  734. raw_support) {
  735. /* Legacy bit to support old userspace libraries */
  736. props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
  737. props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
  738. }
  739. if (MLX5_CAP_DEV_MEM(mdev, memic)) {
  740. props->max_dm_size =
  741. MLX5_CAP_DEV_MEM(mdev, max_memic_size);
  742. }
  743. if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
  744. props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
  745. if (MLX5_CAP_GEN(mdev, end_pad))
  746. props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
  747. props->vendor_part_id = mdev->pdev->device;
  748. props->hw_ver = mdev->pdev->revision;
  749. props->max_mr_size = ~0ull;
  750. props->page_size_cap = ~(min_page_size - 1);
  751. props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
  752. props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
  753. max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
  754. sizeof(struct mlx5_wqe_data_seg);
  755. max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
  756. max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
  757. sizeof(struct mlx5_wqe_raddr_seg)) /
  758. sizeof(struct mlx5_wqe_data_seg);
  759. props->max_send_sge = max_sq_sg;
  760. props->max_recv_sge = max_rq_sg;
  761. props->max_sge_rd = MLX5_MAX_SGE_RD;
  762. props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
  763. props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
  764. props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
  765. props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
  766. props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
  767. props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
  768. props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
  769. props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
  770. props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
  771. props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
  772. props->max_srq_sge = max_rq_sg - 1;
  773. props->max_fast_reg_page_list_len =
  774. 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
  775. get_atomic_caps_qp(dev, props);
  776. props->masked_atomic_cap = IB_ATOMIC_NONE;
  777. props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
  778. props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
  779. props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
  780. props->max_mcast_grp;
  781. props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
  782. props->max_ah = INT_MAX;
  783. props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
  784. props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
  785. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  786. if (MLX5_CAP_GEN(mdev, pg))
  787. props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
  788. props->odp_caps = dev->odp_caps;
  789. #endif
  790. if (MLX5_CAP_GEN(mdev, cd))
  791. props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
  792. if (!mlx5_core_is_pf(mdev))
  793. props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
  794. if (mlx5_ib_port_link_layer(ibdev, 1) ==
  795. IB_LINK_LAYER_ETHERNET && raw_support) {
  796. props->rss_caps.max_rwq_indirection_tables =
  797. 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
  798. props->rss_caps.max_rwq_indirection_table_size =
  799. 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
  800. props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
  801. props->max_wq_type_rq =
  802. 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
  803. }
  804. if (MLX5_CAP_GEN(mdev, tag_matching)) {
  805. props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
  806. props->tm_caps.max_num_tags =
  807. (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
  808. props->tm_caps.flags = IB_TM_CAP_RC;
  809. props->tm_caps.max_ops =
  810. 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
  811. props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
  812. }
  813. if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
  814. props->cq_caps.max_cq_moderation_count =
  815. MLX5_MAX_CQ_COUNT;
  816. props->cq_caps.max_cq_moderation_period =
  817. MLX5_MAX_CQ_PERIOD;
  818. }
  819. if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
  820. resp.response_length += sizeof(resp.cqe_comp_caps);
  821. if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
  822. resp.cqe_comp_caps.max_num =
  823. MLX5_CAP_GEN(dev->mdev,
  824. cqe_compression_max_num);
  825. resp.cqe_comp_caps.supported_format =
  826. MLX5_IB_CQE_RES_FORMAT_HASH |
  827. MLX5_IB_CQE_RES_FORMAT_CSUM;
  828. if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
  829. resp.cqe_comp_caps.supported_format |=
  830. MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
  831. }
  832. }
  833. if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
  834. raw_support) {
  835. if (MLX5_CAP_QOS(mdev, packet_pacing) &&
  836. MLX5_CAP_GEN(mdev, qos)) {
  837. resp.packet_pacing_caps.qp_rate_limit_max =
  838. MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
  839. resp.packet_pacing_caps.qp_rate_limit_min =
  840. MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
  841. resp.packet_pacing_caps.supported_qpts |=
  842. 1 << IB_QPT_RAW_PACKET;
  843. if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
  844. MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
  845. resp.packet_pacing_caps.cap_flags |=
  846. MLX5_IB_PP_SUPPORT_BURST;
  847. }
  848. resp.response_length += sizeof(resp.packet_pacing_caps);
  849. }
  850. if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
  851. uhw->outlen)) {
  852. if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
  853. resp.mlx5_ib_support_multi_pkt_send_wqes =
  854. MLX5_IB_ALLOW_MPW;
  855. if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
  856. resp.mlx5_ib_support_multi_pkt_send_wqes |=
  857. MLX5_IB_SUPPORT_EMPW;
  858. resp.response_length +=
  859. sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
  860. }
  861. if (field_avail(typeof(resp), flags, uhw->outlen)) {
  862. resp.response_length += sizeof(resp.flags);
  863. if (MLX5_CAP_GEN(mdev, cqe_compression_128))
  864. resp.flags |=
  865. MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
  866. if (MLX5_CAP_GEN(mdev, cqe_128_always))
  867. resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
  868. }
  869. if (field_avail(typeof(resp), sw_parsing_caps,
  870. uhw->outlen)) {
  871. resp.response_length += sizeof(resp.sw_parsing_caps);
  872. if (MLX5_CAP_ETH(mdev, swp)) {
  873. resp.sw_parsing_caps.sw_parsing_offloads |=
  874. MLX5_IB_SW_PARSING;
  875. if (MLX5_CAP_ETH(mdev, swp_csum))
  876. resp.sw_parsing_caps.sw_parsing_offloads |=
  877. MLX5_IB_SW_PARSING_CSUM;
  878. if (MLX5_CAP_ETH(mdev, swp_lso))
  879. resp.sw_parsing_caps.sw_parsing_offloads |=
  880. MLX5_IB_SW_PARSING_LSO;
  881. if (resp.sw_parsing_caps.sw_parsing_offloads)
  882. resp.sw_parsing_caps.supported_qpts =
  883. BIT(IB_QPT_RAW_PACKET);
  884. }
  885. }
  886. if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
  887. raw_support) {
  888. resp.response_length += sizeof(resp.striding_rq_caps);
  889. if (MLX5_CAP_GEN(mdev, striding_rq)) {
  890. resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
  891. MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
  892. resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
  893. MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
  894. resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
  895. MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
  896. resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
  897. MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
  898. resp.striding_rq_caps.supported_qpts =
  899. BIT(IB_QPT_RAW_PACKET);
  900. }
  901. }
  902. if (field_avail(typeof(resp), tunnel_offloads_caps,
  903. uhw->outlen)) {
  904. resp.response_length += sizeof(resp.tunnel_offloads_caps);
  905. if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
  906. resp.tunnel_offloads_caps |=
  907. MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
  908. if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
  909. resp.tunnel_offloads_caps |=
  910. MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
  911. if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
  912. resp.tunnel_offloads_caps |=
  913. MLX5_IB_TUNNELED_OFFLOADS_GRE;
  914. if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
  915. MLX5_FLEX_PROTO_CW_MPLS_GRE)
  916. resp.tunnel_offloads_caps |=
  917. MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
  918. if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
  919. MLX5_FLEX_PROTO_CW_MPLS_UDP)
  920. resp.tunnel_offloads_caps |=
  921. MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
  922. }
  923. if (uhw->outlen) {
  924. err = ib_copy_to_udata(uhw, &resp, resp.response_length);
  925. if (err)
  926. return err;
  927. }
  928. return 0;
  929. }
  930. enum mlx5_ib_width {
  931. MLX5_IB_WIDTH_1X = 1 << 0,
  932. MLX5_IB_WIDTH_2X = 1 << 1,
  933. MLX5_IB_WIDTH_4X = 1 << 2,
  934. MLX5_IB_WIDTH_8X = 1 << 3,
  935. MLX5_IB_WIDTH_12X = 1 << 4
  936. };
  937. static int translate_active_width(struct ib_device *ibdev, u8 active_width,
  938. u8 *ib_width)
  939. {
  940. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  941. int err = 0;
  942. if (active_width & MLX5_IB_WIDTH_1X) {
  943. *ib_width = IB_WIDTH_1X;
  944. } else if (active_width & MLX5_IB_WIDTH_2X) {
  945. mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
  946. (int)active_width);
  947. err = -EINVAL;
  948. } else if (active_width & MLX5_IB_WIDTH_4X) {
  949. *ib_width = IB_WIDTH_4X;
  950. } else if (active_width & MLX5_IB_WIDTH_8X) {
  951. *ib_width = IB_WIDTH_8X;
  952. } else if (active_width & MLX5_IB_WIDTH_12X) {
  953. *ib_width = IB_WIDTH_12X;
  954. } else {
  955. mlx5_ib_dbg(dev, "Invalid active_width %d\n",
  956. (int)active_width);
  957. err = -EINVAL;
  958. }
  959. return err;
  960. }
  961. static int mlx5_mtu_to_ib_mtu(int mtu)
  962. {
  963. switch (mtu) {
  964. case 256: return 1;
  965. case 512: return 2;
  966. case 1024: return 3;
  967. case 2048: return 4;
  968. case 4096: return 5;
  969. default:
  970. pr_warn("invalid mtu\n");
  971. return -1;
  972. }
  973. }
  974. enum ib_max_vl_num {
  975. __IB_MAX_VL_0 = 1,
  976. __IB_MAX_VL_0_1 = 2,
  977. __IB_MAX_VL_0_3 = 3,
  978. __IB_MAX_VL_0_7 = 4,
  979. __IB_MAX_VL_0_14 = 5,
  980. };
  981. enum mlx5_vl_hw_cap {
  982. MLX5_VL_HW_0 = 1,
  983. MLX5_VL_HW_0_1 = 2,
  984. MLX5_VL_HW_0_2 = 3,
  985. MLX5_VL_HW_0_3 = 4,
  986. MLX5_VL_HW_0_4 = 5,
  987. MLX5_VL_HW_0_5 = 6,
  988. MLX5_VL_HW_0_6 = 7,
  989. MLX5_VL_HW_0_7 = 8,
  990. MLX5_VL_HW_0_14 = 15
  991. };
  992. static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
  993. u8 *max_vl_num)
  994. {
  995. switch (vl_hw_cap) {
  996. case MLX5_VL_HW_0:
  997. *max_vl_num = __IB_MAX_VL_0;
  998. break;
  999. case MLX5_VL_HW_0_1:
  1000. *max_vl_num = __IB_MAX_VL_0_1;
  1001. break;
  1002. case MLX5_VL_HW_0_3:
  1003. *max_vl_num = __IB_MAX_VL_0_3;
  1004. break;
  1005. case MLX5_VL_HW_0_7:
  1006. *max_vl_num = __IB_MAX_VL_0_7;
  1007. break;
  1008. case MLX5_VL_HW_0_14:
  1009. *max_vl_num = __IB_MAX_VL_0_14;
  1010. break;
  1011. default:
  1012. return -EINVAL;
  1013. }
  1014. return 0;
  1015. }
  1016. static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
  1017. struct ib_port_attr *props)
  1018. {
  1019. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  1020. struct mlx5_core_dev *mdev = dev->mdev;
  1021. struct mlx5_hca_vport_context *rep;
  1022. u16 max_mtu;
  1023. u16 oper_mtu;
  1024. int err;
  1025. u8 ib_link_width_oper;
  1026. u8 vl_hw_cap;
  1027. rep = kzalloc(sizeof(*rep), GFP_KERNEL);
  1028. if (!rep) {
  1029. err = -ENOMEM;
  1030. goto out;
  1031. }
  1032. /* props being zeroed by the caller, avoid zeroing it here */
  1033. err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
  1034. if (err)
  1035. goto out;
  1036. props->lid = rep->lid;
  1037. props->lmc = rep->lmc;
  1038. props->sm_lid = rep->sm_lid;
  1039. props->sm_sl = rep->sm_sl;
  1040. props->state = rep->vport_state;
  1041. props->phys_state = rep->port_physical_state;
  1042. props->port_cap_flags = rep->cap_mask1;
  1043. props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
  1044. props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
  1045. props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
  1046. props->bad_pkey_cntr = rep->pkey_violation_counter;
  1047. props->qkey_viol_cntr = rep->qkey_violation_counter;
  1048. props->subnet_timeout = rep->subnet_timeout;
  1049. props->init_type_reply = rep->init_type_reply;
  1050. err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
  1051. if (err)
  1052. goto out;
  1053. err = translate_active_width(ibdev, ib_link_width_oper,
  1054. &props->active_width);
  1055. if (err)
  1056. goto out;
  1057. err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
  1058. if (err)
  1059. goto out;
  1060. mlx5_query_port_max_mtu(mdev, &max_mtu, port);
  1061. props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
  1062. mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
  1063. props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
  1064. err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
  1065. if (err)
  1066. goto out;
  1067. err = translate_max_vl_num(ibdev, vl_hw_cap,
  1068. &props->max_vl_num);
  1069. out:
  1070. kfree(rep);
  1071. return err;
  1072. }
  1073. int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
  1074. struct ib_port_attr *props)
  1075. {
  1076. unsigned int count;
  1077. int ret;
  1078. switch (mlx5_get_vport_access_method(ibdev)) {
  1079. case MLX5_VPORT_ACCESS_METHOD_MAD:
  1080. ret = mlx5_query_mad_ifc_port(ibdev, port, props);
  1081. break;
  1082. case MLX5_VPORT_ACCESS_METHOD_HCA:
  1083. ret = mlx5_query_hca_port(ibdev, port, props);
  1084. break;
  1085. case MLX5_VPORT_ACCESS_METHOD_NIC:
  1086. ret = mlx5_query_port_roce(ibdev, port, props);
  1087. break;
  1088. default:
  1089. ret = -EINVAL;
  1090. }
  1091. if (!ret && props) {
  1092. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  1093. struct mlx5_core_dev *mdev;
  1094. bool put_mdev = true;
  1095. mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
  1096. if (!mdev) {
  1097. /* If the port isn't affiliated yet query the master.
  1098. * The master and slave will have the same values.
  1099. */
  1100. mdev = dev->mdev;
  1101. port = 1;
  1102. put_mdev = false;
  1103. }
  1104. count = mlx5_core_reserved_gids_count(mdev);
  1105. if (put_mdev)
  1106. mlx5_ib_put_native_port_mdev(dev, port);
  1107. props->gid_tbl_len -= count;
  1108. }
  1109. return ret;
  1110. }
  1111. static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
  1112. struct ib_port_attr *props)
  1113. {
  1114. int ret;
  1115. /* Only link layer == ethernet is valid for representors */
  1116. ret = mlx5_query_port_roce(ibdev, port, props);
  1117. if (ret || !props)
  1118. return ret;
  1119. /* We don't support GIDS */
  1120. props->gid_tbl_len = 0;
  1121. return ret;
  1122. }
  1123. static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  1124. union ib_gid *gid)
  1125. {
  1126. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  1127. struct mlx5_core_dev *mdev = dev->mdev;
  1128. switch (mlx5_get_vport_access_method(ibdev)) {
  1129. case MLX5_VPORT_ACCESS_METHOD_MAD:
  1130. return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
  1131. case MLX5_VPORT_ACCESS_METHOD_HCA:
  1132. return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
  1133. default:
  1134. return -EINVAL;
  1135. }
  1136. }
  1137. static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
  1138. u16 index, u16 *pkey)
  1139. {
  1140. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  1141. struct mlx5_core_dev *mdev;
  1142. bool put_mdev = true;
  1143. u8 mdev_port_num;
  1144. int err;
  1145. mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
  1146. if (!mdev) {
  1147. /* The port isn't affiliated yet, get the PKey from the master
  1148. * port. For RoCE the PKey tables will be the same.
  1149. */
  1150. put_mdev = false;
  1151. mdev = dev->mdev;
  1152. mdev_port_num = 1;
  1153. }
  1154. err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
  1155. index, pkey);
  1156. if (put_mdev)
  1157. mlx5_ib_put_native_port_mdev(dev, port);
  1158. return err;
  1159. }
  1160. static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
  1161. u16 *pkey)
  1162. {
  1163. switch (mlx5_get_vport_access_method(ibdev)) {
  1164. case MLX5_VPORT_ACCESS_METHOD_MAD:
  1165. return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
  1166. case MLX5_VPORT_ACCESS_METHOD_HCA:
  1167. case MLX5_VPORT_ACCESS_METHOD_NIC:
  1168. return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
  1169. default:
  1170. return -EINVAL;
  1171. }
  1172. }
  1173. static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
  1174. struct ib_device_modify *props)
  1175. {
  1176. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  1177. struct mlx5_reg_node_desc in;
  1178. struct mlx5_reg_node_desc out;
  1179. int err;
  1180. if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
  1181. return -EOPNOTSUPP;
  1182. if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
  1183. return 0;
  1184. /*
  1185. * If possible, pass node desc to FW, so it can generate
  1186. * a 144 trap. If cmd fails, just ignore.
  1187. */
  1188. memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
  1189. err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
  1190. sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
  1191. if (err)
  1192. return err;
  1193. memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
  1194. return err;
  1195. }
  1196. static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
  1197. u32 value)
  1198. {
  1199. struct mlx5_hca_vport_context ctx = {};
  1200. struct mlx5_core_dev *mdev;
  1201. u8 mdev_port_num;
  1202. int err;
  1203. mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
  1204. if (!mdev)
  1205. return -ENODEV;
  1206. err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
  1207. if (err)
  1208. goto out;
  1209. if (~ctx.cap_mask1_perm & mask) {
  1210. mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
  1211. mask, ctx.cap_mask1_perm);
  1212. err = -EINVAL;
  1213. goto out;
  1214. }
  1215. ctx.cap_mask1 = value;
  1216. ctx.cap_mask1_perm = mask;
  1217. err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
  1218. 0, &ctx);
  1219. out:
  1220. mlx5_ib_put_native_port_mdev(dev, port_num);
  1221. return err;
  1222. }
  1223. static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
  1224. struct ib_port_modify *props)
  1225. {
  1226. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  1227. struct ib_port_attr attr;
  1228. u32 tmp;
  1229. int err;
  1230. u32 change_mask;
  1231. u32 value;
  1232. bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
  1233. IB_LINK_LAYER_INFINIBAND);
  1234. /* CM layer calls ib_modify_port() regardless of the link layer. For
  1235. * Ethernet ports, qkey violation and Port capabilities are meaningless.
  1236. */
  1237. if (!is_ib)
  1238. return 0;
  1239. if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
  1240. change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
  1241. value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
  1242. return set_port_caps_atomic(dev, port, change_mask, value);
  1243. }
  1244. mutex_lock(&dev->cap_mask_mutex);
  1245. err = ib_query_port(ibdev, port, &attr);
  1246. if (err)
  1247. goto out;
  1248. tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
  1249. ~props->clr_port_cap_mask;
  1250. err = mlx5_set_port_caps(dev->mdev, port, tmp);
  1251. out:
  1252. mutex_unlock(&dev->cap_mask_mutex);
  1253. return err;
  1254. }
  1255. static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
  1256. {
  1257. mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
  1258. caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
  1259. }
  1260. static u16 calc_dynamic_bfregs(int uars_per_sys_page)
  1261. {
  1262. /* Large page with non 4k uar support might limit the dynamic size */
  1263. if (uars_per_sys_page == 1 && PAGE_SIZE > 4096)
  1264. return MLX5_MIN_DYN_BFREGS;
  1265. return MLX5_MAX_DYN_BFREGS;
  1266. }
  1267. static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
  1268. struct mlx5_ib_alloc_ucontext_req_v2 *req,
  1269. struct mlx5_bfreg_info *bfregi)
  1270. {
  1271. int uars_per_sys_page;
  1272. int bfregs_per_sys_page;
  1273. int ref_bfregs = req->total_num_bfregs;
  1274. if (req->total_num_bfregs == 0)
  1275. return -EINVAL;
  1276. BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
  1277. BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
  1278. if (req->total_num_bfregs > MLX5_MAX_BFREGS)
  1279. return -ENOMEM;
  1280. uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
  1281. bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
  1282. /* This holds the required static allocation asked by the user */
  1283. req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
  1284. if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
  1285. return -EINVAL;
  1286. bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
  1287. bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
  1288. bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
  1289. bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
  1290. mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
  1291. MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
  1292. lib_uar_4k ? "yes" : "no", ref_bfregs,
  1293. req->total_num_bfregs, bfregi->total_num_bfregs,
  1294. bfregi->num_sys_pages);
  1295. return 0;
  1296. }
  1297. static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
  1298. {
  1299. struct mlx5_bfreg_info *bfregi;
  1300. int err;
  1301. int i;
  1302. bfregi = &context->bfregi;
  1303. for (i = 0; i < bfregi->num_static_sys_pages; i++) {
  1304. err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
  1305. if (err)
  1306. goto error;
  1307. mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
  1308. }
  1309. for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
  1310. bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
  1311. return 0;
  1312. error:
  1313. for (--i; i >= 0; i--)
  1314. if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
  1315. mlx5_ib_warn(dev, "failed to free uar %d\n", i);
  1316. return err;
  1317. }
  1318. static void deallocate_uars(struct mlx5_ib_dev *dev,
  1319. struct mlx5_ib_ucontext *context)
  1320. {
  1321. struct mlx5_bfreg_info *bfregi;
  1322. int i;
  1323. bfregi = &context->bfregi;
  1324. for (i = 0; i < bfregi->num_sys_pages; i++)
  1325. if (i < bfregi->num_static_sys_pages ||
  1326. bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
  1327. mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
  1328. }
  1329. static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
  1330. {
  1331. int err;
  1332. if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
  1333. return 0;
  1334. err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
  1335. if (err)
  1336. return err;
  1337. if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
  1338. (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
  1339. !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
  1340. return err;
  1341. mutex_lock(&dev->lb_mutex);
  1342. dev->user_td++;
  1343. if (dev->user_td == 2)
  1344. err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
  1345. mutex_unlock(&dev->lb_mutex);
  1346. return err;
  1347. }
  1348. static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
  1349. {
  1350. if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
  1351. return;
  1352. mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
  1353. if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
  1354. (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
  1355. !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
  1356. return;
  1357. mutex_lock(&dev->lb_mutex);
  1358. dev->user_td--;
  1359. if (dev->user_td < 2)
  1360. mlx5_nic_vport_update_local_lb(dev->mdev, false);
  1361. mutex_unlock(&dev->lb_mutex);
  1362. }
  1363. static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
  1364. struct ib_udata *udata)
  1365. {
  1366. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  1367. struct mlx5_ib_alloc_ucontext_req_v2 req = {};
  1368. struct mlx5_ib_alloc_ucontext_resp resp = {};
  1369. struct mlx5_core_dev *mdev = dev->mdev;
  1370. struct mlx5_ib_ucontext *context;
  1371. struct mlx5_bfreg_info *bfregi;
  1372. int ver;
  1373. int err;
  1374. size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
  1375. max_cqe_version);
  1376. u32 dump_fill_mkey;
  1377. bool lib_uar_4k;
  1378. if (!dev->ib_active)
  1379. return ERR_PTR(-EAGAIN);
  1380. if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
  1381. ver = 0;
  1382. else if (udata->inlen >= min_req_v2)
  1383. ver = 2;
  1384. else
  1385. return ERR_PTR(-EINVAL);
  1386. err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
  1387. if (err)
  1388. return ERR_PTR(err);
  1389. if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
  1390. return ERR_PTR(-EOPNOTSUPP);
  1391. if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
  1392. return ERR_PTR(-EOPNOTSUPP);
  1393. req.total_num_bfregs = ALIGN(req.total_num_bfregs,
  1394. MLX5_NON_FP_BFREGS_PER_UAR);
  1395. if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
  1396. return ERR_PTR(-EINVAL);
  1397. resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
  1398. if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
  1399. resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
  1400. resp.cache_line_size = cache_line_size();
  1401. resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
  1402. resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
  1403. resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
  1404. resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
  1405. resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
  1406. resp.cqe_version = min_t(__u8,
  1407. (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
  1408. req.max_cqe_version);
  1409. resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
  1410. MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
  1411. resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
  1412. MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
  1413. resp.response_length = min(offsetof(typeof(resp), response_length) +
  1414. sizeof(resp.response_length), udata->outlen);
  1415. if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) {
  1416. if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS))
  1417. resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
  1418. if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
  1419. resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
  1420. if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
  1421. resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
  1422. if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
  1423. resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
  1424. /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
  1425. }
  1426. context = kzalloc(sizeof(*context), GFP_KERNEL);
  1427. if (!context)
  1428. return ERR_PTR(-ENOMEM);
  1429. lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
  1430. bfregi = &context->bfregi;
  1431. /* updates req->total_num_bfregs */
  1432. err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
  1433. if (err)
  1434. goto out_ctx;
  1435. mutex_init(&bfregi->lock);
  1436. bfregi->lib_uar_4k = lib_uar_4k;
  1437. bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
  1438. GFP_KERNEL);
  1439. if (!bfregi->count) {
  1440. err = -ENOMEM;
  1441. goto out_ctx;
  1442. }
  1443. bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
  1444. sizeof(*bfregi->sys_pages),
  1445. GFP_KERNEL);
  1446. if (!bfregi->sys_pages) {
  1447. err = -ENOMEM;
  1448. goto out_count;
  1449. }
  1450. err = allocate_uars(dev, context);
  1451. if (err)
  1452. goto out_sys_pages;
  1453. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  1454. context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
  1455. #endif
  1456. err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
  1457. if (err)
  1458. goto out_uars;
  1459. if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
  1460. /* Block DEVX on Infiniband as of SELinux */
  1461. if (mlx5_ib_port_link_layer(ibdev, 1) != IB_LINK_LAYER_ETHERNET) {
  1462. err = -EPERM;
  1463. goto out_td;
  1464. }
  1465. err = mlx5_ib_devx_create(dev, context);
  1466. if (err)
  1467. goto out_td;
  1468. }
  1469. if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
  1470. err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
  1471. if (err)
  1472. goto out_mdev;
  1473. }
  1474. INIT_LIST_HEAD(&context->vma_private_list);
  1475. mutex_init(&context->vma_private_list_mutex);
  1476. INIT_LIST_HEAD(&context->db_page_list);
  1477. mutex_init(&context->db_page_mutex);
  1478. resp.tot_bfregs = req.total_num_bfregs;
  1479. resp.num_ports = dev->num_ports;
  1480. if (field_avail(typeof(resp), cqe_version, udata->outlen))
  1481. resp.response_length += sizeof(resp.cqe_version);
  1482. if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
  1483. resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
  1484. MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
  1485. resp.response_length += sizeof(resp.cmds_supp_uhw);
  1486. }
  1487. if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
  1488. if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
  1489. mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
  1490. resp.eth_min_inline++;
  1491. }
  1492. resp.response_length += sizeof(resp.eth_min_inline);
  1493. }
  1494. if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
  1495. if (mdev->clock_info)
  1496. resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
  1497. resp.response_length += sizeof(resp.clock_info_versions);
  1498. }
  1499. /*
  1500. * We don't want to expose information from the PCI bar that is located
  1501. * after 4096 bytes, so if the arch only supports larger pages, let's
  1502. * pretend we don't support reading the HCA's core clock. This is also
  1503. * forced by mmap function.
  1504. */
  1505. if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
  1506. if (PAGE_SIZE <= 4096) {
  1507. resp.comp_mask |=
  1508. MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
  1509. resp.hca_core_clock_offset =
  1510. offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
  1511. }
  1512. resp.response_length += sizeof(resp.hca_core_clock_offset);
  1513. }
  1514. if (field_avail(typeof(resp), log_uar_size, udata->outlen))
  1515. resp.response_length += sizeof(resp.log_uar_size);
  1516. if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
  1517. resp.response_length += sizeof(resp.num_uars_per_page);
  1518. if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
  1519. resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
  1520. resp.response_length += sizeof(resp.num_dyn_bfregs);
  1521. }
  1522. if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) {
  1523. if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
  1524. resp.dump_fill_mkey = dump_fill_mkey;
  1525. resp.comp_mask |=
  1526. MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
  1527. }
  1528. resp.response_length += sizeof(resp.dump_fill_mkey);
  1529. }
  1530. err = ib_copy_to_udata(udata, &resp, resp.response_length);
  1531. if (err)
  1532. goto out_mdev;
  1533. bfregi->ver = ver;
  1534. bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
  1535. context->cqe_version = resp.cqe_version;
  1536. context->lib_caps = req.lib_caps;
  1537. print_lib_caps(dev, context->lib_caps);
  1538. return &context->ibucontext;
  1539. out_mdev:
  1540. if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
  1541. mlx5_ib_devx_destroy(dev, context);
  1542. out_td:
  1543. mlx5_ib_dealloc_transport_domain(dev, context->tdn);
  1544. out_uars:
  1545. deallocate_uars(dev, context);
  1546. out_sys_pages:
  1547. kfree(bfregi->sys_pages);
  1548. out_count:
  1549. kfree(bfregi->count);
  1550. out_ctx:
  1551. kfree(context);
  1552. return ERR_PTR(err);
  1553. }
  1554. static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
  1555. {
  1556. struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
  1557. struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
  1558. struct mlx5_bfreg_info *bfregi;
  1559. if (context->devx_uid)
  1560. mlx5_ib_devx_destroy(dev, context);
  1561. bfregi = &context->bfregi;
  1562. mlx5_ib_dealloc_transport_domain(dev, context->tdn);
  1563. deallocate_uars(dev, context);
  1564. kfree(bfregi->sys_pages);
  1565. kfree(bfregi->count);
  1566. kfree(context);
  1567. return 0;
  1568. }
  1569. static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
  1570. int uar_idx)
  1571. {
  1572. int fw_uars_per_page;
  1573. fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
  1574. return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
  1575. }
  1576. static int get_command(unsigned long offset)
  1577. {
  1578. return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
  1579. }
  1580. static int get_arg(unsigned long offset)
  1581. {
  1582. return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
  1583. }
  1584. static int get_index(unsigned long offset)
  1585. {
  1586. return get_arg(offset);
  1587. }
  1588. /* Index resides in an extra byte to enable larger values than 255 */
  1589. static int get_extended_index(unsigned long offset)
  1590. {
  1591. return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
  1592. }
  1593. static void mlx5_ib_vma_open(struct vm_area_struct *area)
  1594. {
  1595. /* vma_open is called when a new VMA is created on top of our VMA. This
  1596. * is done through either mremap flow or split_vma (usually due to
  1597. * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
  1598. * as this VMA is strongly hardware related. Therefore we set the
  1599. * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
  1600. * calling us again and trying to do incorrect actions. We assume that
  1601. * the original VMA size is exactly a single page, and therefore all
  1602. * "splitting" operation will not happen to it.
  1603. */
  1604. area->vm_ops = NULL;
  1605. }
  1606. static void mlx5_ib_vma_close(struct vm_area_struct *area)
  1607. {
  1608. struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
  1609. /* It's guaranteed that all VMAs opened on a FD are closed before the
  1610. * file itself is closed, therefore no sync is needed with the regular
  1611. * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
  1612. * However need a sync with accessing the vma as part of
  1613. * mlx5_ib_disassociate_ucontext.
  1614. * The close operation is usually called under mm->mmap_sem except when
  1615. * process is exiting.
  1616. * The exiting case is handled explicitly as part of
  1617. * mlx5_ib_disassociate_ucontext.
  1618. */
  1619. mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
  1620. /* setting the vma context pointer to null in the mlx5_ib driver's
  1621. * private data, to protect a race condition in
  1622. * mlx5_ib_disassociate_ucontext().
  1623. */
  1624. mlx5_ib_vma_priv_data->vma = NULL;
  1625. mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
  1626. list_del(&mlx5_ib_vma_priv_data->list);
  1627. mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
  1628. kfree(mlx5_ib_vma_priv_data);
  1629. }
  1630. static const struct vm_operations_struct mlx5_ib_vm_ops = {
  1631. .open = mlx5_ib_vma_open,
  1632. .close = mlx5_ib_vma_close
  1633. };
  1634. static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
  1635. struct mlx5_ib_ucontext *ctx)
  1636. {
  1637. struct mlx5_ib_vma_private_data *vma_prv;
  1638. struct list_head *vma_head = &ctx->vma_private_list;
  1639. vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
  1640. if (!vma_prv)
  1641. return -ENOMEM;
  1642. vma_prv->vma = vma;
  1643. vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
  1644. vma->vm_private_data = vma_prv;
  1645. vma->vm_ops = &mlx5_ib_vm_ops;
  1646. mutex_lock(&ctx->vma_private_list_mutex);
  1647. list_add(&vma_prv->list, vma_head);
  1648. mutex_unlock(&ctx->vma_private_list_mutex);
  1649. return 0;
  1650. }
  1651. static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
  1652. {
  1653. struct vm_area_struct *vma;
  1654. struct mlx5_ib_vma_private_data *vma_private, *n;
  1655. struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
  1656. mutex_lock(&context->vma_private_list_mutex);
  1657. list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
  1658. list) {
  1659. vma = vma_private->vma;
  1660. zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
  1661. /* context going to be destroyed, should
  1662. * not access ops any more.
  1663. */
  1664. vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
  1665. vma->vm_ops = NULL;
  1666. list_del(&vma_private->list);
  1667. kfree(vma_private);
  1668. }
  1669. mutex_unlock(&context->vma_private_list_mutex);
  1670. }
  1671. static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
  1672. {
  1673. switch (cmd) {
  1674. case MLX5_IB_MMAP_WC_PAGE:
  1675. return "WC";
  1676. case MLX5_IB_MMAP_REGULAR_PAGE:
  1677. return "best effort WC";
  1678. case MLX5_IB_MMAP_NC_PAGE:
  1679. return "NC";
  1680. case MLX5_IB_MMAP_DEVICE_MEM:
  1681. return "Device Memory";
  1682. default:
  1683. return NULL;
  1684. }
  1685. }
  1686. static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
  1687. struct vm_area_struct *vma,
  1688. struct mlx5_ib_ucontext *context)
  1689. {
  1690. phys_addr_t pfn;
  1691. int err;
  1692. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  1693. return -EINVAL;
  1694. if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
  1695. return -EOPNOTSUPP;
  1696. if (vma->vm_flags & VM_WRITE)
  1697. return -EPERM;
  1698. if (!dev->mdev->clock_info_page)
  1699. return -EOPNOTSUPP;
  1700. pfn = page_to_pfn(dev->mdev->clock_info_page);
  1701. err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
  1702. vma->vm_page_prot);
  1703. if (err)
  1704. return err;
  1705. return mlx5_ib_set_vma_data(vma, context);
  1706. }
  1707. static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
  1708. struct vm_area_struct *vma,
  1709. struct mlx5_ib_ucontext *context)
  1710. {
  1711. struct mlx5_bfreg_info *bfregi = &context->bfregi;
  1712. int err;
  1713. unsigned long idx;
  1714. phys_addr_t pfn;
  1715. pgprot_t prot;
  1716. u32 bfreg_dyn_idx = 0;
  1717. u32 uar_index;
  1718. int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
  1719. int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
  1720. bfregi->num_static_sys_pages;
  1721. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  1722. return -EINVAL;
  1723. if (dyn_uar)
  1724. idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
  1725. else
  1726. idx = get_index(vma->vm_pgoff);
  1727. if (idx >= max_valid_idx) {
  1728. mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
  1729. idx, max_valid_idx);
  1730. return -EINVAL;
  1731. }
  1732. switch (cmd) {
  1733. case MLX5_IB_MMAP_WC_PAGE:
  1734. case MLX5_IB_MMAP_ALLOC_WC:
  1735. /* Some architectures don't support WC memory */
  1736. #if defined(CONFIG_X86)
  1737. if (!pat_enabled())
  1738. return -EPERM;
  1739. #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
  1740. return -EPERM;
  1741. #endif
  1742. /* fall through */
  1743. case MLX5_IB_MMAP_REGULAR_PAGE:
  1744. /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
  1745. prot = pgprot_writecombine(vma->vm_page_prot);
  1746. break;
  1747. case MLX5_IB_MMAP_NC_PAGE:
  1748. prot = pgprot_noncached(vma->vm_page_prot);
  1749. break;
  1750. default:
  1751. return -EINVAL;
  1752. }
  1753. if (dyn_uar) {
  1754. int uars_per_page;
  1755. uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
  1756. bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
  1757. if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
  1758. mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
  1759. bfreg_dyn_idx, bfregi->total_num_bfregs);
  1760. return -EINVAL;
  1761. }
  1762. mutex_lock(&bfregi->lock);
  1763. /* Fail if uar already allocated, first bfreg index of each
  1764. * page holds its count.
  1765. */
  1766. if (bfregi->count[bfreg_dyn_idx]) {
  1767. mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
  1768. mutex_unlock(&bfregi->lock);
  1769. return -EINVAL;
  1770. }
  1771. bfregi->count[bfreg_dyn_idx]++;
  1772. mutex_unlock(&bfregi->lock);
  1773. err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
  1774. if (err) {
  1775. mlx5_ib_warn(dev, "UAR alloc failed\n");
  1776. goto free_bfreg;
  1777. }
  1778. } else {
  1779. uar_index = bfregi->sys_pages[idx];
  1780. }
  1781. pfn = uar_index2pfn(dev, uar_index);
  1782. mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
  1783. vma->vm_page_prot = prot;
  1784. err = io_remap_pfn_range(vma, vma->vm_start, pfn,
  1785. PAGE_SIZE, vma->vm_page_prot);
  1786. if (err) {
  1787. mlx5_ib_err(dev,
  1788. "io_remap_pfn_range failed with error=%d, mmap_cmd=%s\n",
  1789. err, mmap_cmd2str(cmd));
  1790. err = -EAGAIN;
  1791. goto err;
  1792. }
  1793. err = mlx5_ib_set_vma_data(vma, context);
  1794. if (err)
  1795. goto err;
  1796. if (dyn_uar)
  1797. bfregi->sys_pages[idx] = uar_index;
  1798. return 0;
  1799. err:
  1800. if (!dyn_uar)
  1801. return err;
  1802. mlx5_cmd_free_uar(dev->mdev, idx);
  1803. free_bfreg:
  1804. mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
  1805. return err;
  1806. }
  1807. static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
  1808. {
  1809. struct mlx5_ib_ucontext *mctx = to_mucontext(context);
  1810. struct mlx5_ib_dev *dev = to_mdev(context->device);
  1811. u16 page_idx = get_extended_index(vma->vm_pgoff);
  1812. size_t map_size = vma->vm_end - vma->vm_start;
  1813. u32 npages = map_size >> PAGE_SHIFT;
  1814. phys_addr_t pfn;
  1815. pgprot_t prot;
  1816. if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
  1817. page_idx + npages)
  1818. return -EINVAL;
  1819. pfn = ((pci_resource_start(dev->mdev->pdev, 0) +
  1820. MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
  1821. PAGE_SHIFT) +
  1822. page_idx;
  1823. prot = pgprot_writecombine(vma->vm_page_prot);
  1824. vma->vm_page_prot = prot;
  1825. if (io_remap_pfn_range(vma, vma->vm_start, pfn, map_size,
  1826. vma->vm_page_prot))
  1827. return -EAGAIN;
  1828. return mlx5_ib_set_vma_data(vma, mctx);
  1829. }
  1830. static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
  1831. {
  1832. struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
  1833. struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
  1834. unsigned long command;
  1835. phys_addr_t pfn;
  1836. command = get_command(vma->vm_pgoff);
  1837. switch (command) {
  1838. case MLX5_IB_MMAP_WC_PAGE:
  1839. case MLX5_IB_MMAP_NC_PAGE:
  1840. case MLX5_IB_MMAP_REGULAR_PAGE:
  1841. case MLX5_IB_MMAP_ALLOC_WC:
  1842. return uar_mmap(dev, command, vma, context);
  1843. case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
  1844. return -ENOSYS;
  1845. case MLX5_IB_MMAP_CORE_CLOCK:
  1846. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  1847. return -EINVAL;
  1848. if (vma->vm_flags & VM_WRITE)
  1849. return -EPERM;
  1850. /* Don't expose to user-space information it shouldn't have */
  1851. if (PAGE_SIZE > 4096)
  1852. return -EOPNOTSUPP;
  1853. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  1854. pfn = (dev->mdev->iseg_base +
  1855. offsetof(struct mlx5_init_seg, internal_timer_h)) >>
  1856. PAGE_SHIFT;
  1857. if (io_remap_pfn_range(vma, vma->vm_start, pfn,
  1858. PAGE_SIZE, vma->vm_page_prot))
  1859. return -EAGAIN;
  1860. break;
  1861. case MLX5_IB_MMAP_CLOCK_INFO:
  1862. return mlx5_ib_mmap_clock_info_page(dev, vma, context);
  1863. case MLX5_IB_MMAP_DEVICE_MEM:
  1864. return dm_mmap(ibcontext, vma);
  1865. default:
  1866. return -EINVAL;
  1867. }
  1868. return 0;
  1869. }
  1870. struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
  1871. struct ib_ucontext *context,
  1872. struct ib_dm_alloc_attr *attr,
  1873. struct uverbs_attr_bundle *attrs)
  1874. {
  1875. u64 act_size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
  1876. struct mlx5_memic *memic = &to_mdev(ibdev)->memic;
  1877. phys_addr_t memic_addr;
  1878. struct mlx5_ib_dm *dm;
  1879. u64 start_offset;
  1880. u32 page_idx;
  1881. int err;
  1882. dm = kzalloc(sizeof(*dm), GFP_KERNEL);
  1883. if (!dm)
  1884. return ERR_PTR(-ENOMEM);
  1885. mlx5_ib_dbg(to_mdev(ibdev), "alloc_memic req: user_length=0x%llx act_length=0x%llx log_alignment=%d\n",
  1886. attr->length, act_size, attr->alignment);
  1887. err = mlx5_cmd_alloc_memic(memic, &memic_addr,
  1888. act_size, attr->alignment);
  1889. if (err)
  1890. goto err_free;
  1891. start_offset = memic_addr & ~PAGE_MASK;
  1892. page_idx = (memic_addr - pci_resource_start(memic->dev->pdev, 0) -
  1893. MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
  1894. PAGE_SHIFT;
  1895. err = uverbs_copy_to(attrs,
  1896. MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
  1897. &start_offset, sizeof(start_offset));
  1898. if (err)
  1899. goto err_dealloc;
  1900. err = uverbs_copy_to(attrs,
  1901. MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
  1902. &page_idx, sizeof(page_idx));
  1903. if (err)
  1904. goto err_dealloc;
  1905. bitmap_set(to_mucontext(context)->dm_pages, page_idx,
  1906. DIV_ROUND_UP(act_size, PAGE_SIZE));
  1907. dm->dev_addr = memic_addr;
  1908. return &dm->ibdm;
  1909. err_dealloc:
  1910. mlx5_cmd_dealloc_memic(memic, memic_addr,
  1911. act_size);
  1912. err_free:
  1913. kfree(dm);
  1914. return ERR_PTR(err);
  1915. }
  1916. int mlx5_ib_dealloc_dm(struct ib_dm *ibdm)
  1917. {
  1918. struct mlx5_memic *memic = &to_mdev(ibdm->device)->memic;
  1919. struct mlx5_ib_dm *dm = to_mdm(ibdm);
  1920. u64 act_size = roundup(dm->ibdm.length, MLX5_MEMIC_BASE_SIZE);
  1921. u32 page_idx;
  1922. int ret;
  1923. ret = mlx5_cmd_dealloc_memic(memic, dm->dev_addr, act_size);
  1924. if (ret)
  1925. return ret;
  1926. page_idx = (dm->dev_addr - pci_resource_start(memic->dev->pdev, 0) -
  1927. MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
  1928. PAGE_SHIFT;
  1929. bitmap_clear(to_mucontext(ibdm->uobject->context)->dm_pages,
  1930. page_idx,
  1931. DIV_ROUND_UP(act_size, PAGE_SIZE));
  1932. kfree(dm);
  1933. return 0;
  1934. }
  1935. static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
  1936. struct ib_ucontext *context,
  1937. struct ib_udata *udata)
  1938. {
  1939. struct mlx5_ib_alloc_pd_resp resp;
  1940. struct mlx5_ib_pd *pd;
  1941. int err;
  1942. pd = kmalloc(sizeof(*pd), GFP_KERNEL);
  1943. if (!pd)
  1944. return ERR_PTR(-ENOMEM);
  1945. err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
  1946. if (err) {
  1947. kfree(pd);
  1948. return ERR_PTR(err);
  1949. }
  1950. if (context) {
  1951. resp.pdn = pd->pdn;
  1952. if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
  1953. mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
  1954. kfree(pd);
  1955. return ERR_PTR(-EFAULT);
  1956. }
  1957. }
  1958. return &pd->ibpd;
  1959. }
  1960. static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
  1961. {
  1962. struct mlx5_ib_dev *mdev = to_mdev(pd->device);
  1963. struct mlx5_ib_pd *mpd = to_mpd(pd);
  1964. mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
  1965. kfree(mpd);
  1966. return 0;
  1967. }
  1968. enum {
  1969. MATCH_CRITERIA_ENABLE_OUTER_BIT,
  1970. MATCH_CRITERIA_ENABLE_MISC_BIT,
  1971. MATCH_CRITERIA_ENABLE_INNER_BIT,
  1972. MATCH_CRITERIA_ENABLE_MISC2_BIT
  1973. };
  1974. #define HEADER_IS_ZERO(match_criteria, headers) \
  1975. !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
  1976. 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
  1977. static u8 get_match_criteria_enable(u32 *match_criteria)
  1978. {
  1979. u8 match_criteria_enable;
  1980. match_criteria_enable =
  1981. (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
  1982. MATCH_CRITERIA_ENABLE_OUTER_BIT;
  1983. match_criteria_enable |=
  1984. (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
  1985. MATCH_CRITERIA_ENABLE_MISC_BIT;
  1986. match_criteria_enable |=
  1987. (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
  1988. MATCH_CRITERIA_ENABLE_INNER_BIT;
  1989. match_criteria_enable |=
  1990. (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
  1991. MATCH_CRITERIA_ENABLE_MISC2_BIT;
  1992. return match_criteria_enable;
  1993. }
  1994. static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
  1995. {
  1996. MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
  1997. MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
  1998. }
  1999. static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
  2000. bool inner)
  2001. {
  2002. if (inner) {
  2003. MLX5_SET(fte_match_set_misc,
  2004. misc_c, inner_ipv6_flow_label, mask);
  2005. MLX5_SET(fte_match_set_misc,
  2006. misc_v, inner_ipv6_flow_label, val);
  2007. } else {
  2008. MLX5_SET(fte_match_set_misc,
  2009. misc_c, outer_ipv6_flow_label, mask);
  2010. MLX5_SET(fte_match_set_misc,
  2011. misc_v, outer_ipv6_flow_label, val);
  2012. }
  2013. }
  2014. static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
  2015. {
  2016. MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
  2017. MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
  2018. MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
  2019. MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
  2020. }
  2021. static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
  2022. {
  2023. if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) &&
  2024. !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL))
  2025. return -EOPNOTSUPP;
  2026. if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) &&
  2027. !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP))
  2028. return -EOPNOTSUPP;
  2029. if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) &&
  2030. !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS))
  2031. return -EOPNOTSUPP;
  2032. if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) &&
  2033. !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL))
  2034. return -EOPNOTSUPP;
  2035. return 0;
  2036. }
  2037. #define LAST_ETH_FIELD vlan_tag
  2038. #define LAST_IB_FIELD sl
  2039. #define LAST_IPV4_FIELD tos
  2040. #define LAST_IPV6_FIELD traffic_class
  2041. #define LAST_TCP_UDP_FIELD src_port
  2042. #define LAST_TUNNEL_FIELD tunnel_id
  2043. #define LAST_FLOW_TAG_FIELD tag_id
  2044. #define LAST_DROP_FIELD size
  2045. #define LAST_COUNTERS_FIELD counters
  2046. /* Field is the last supported field */
  2047. #define FIELDS_NOT_SUPPORTED(filter, field)\
  2048. memchr_inv((void *)&filter.field +\
  2049. sizeof(filter.field), 0,\
  2050. sizeof(filter) -\
  2051. offsetof(typeof(filter), field) -\
  2052. sizeof(filter.field))
  2053. static int parse_flow_flow_action(const union ib_flow_spec *ib_spec,
  2054. const struct ib_flow_attr *flow_attr,
  2055. struct mlx5_flow_act *action)
  2056. {
  2057. struct mlx5_ib_flow_action *maction = to_mflow_act(ib_spec->action.act);
  2058. switch (maction->ib_action.type) {
  2059. case IB_FLOW_ACTION_ESP:
  2060. /* Currently only AES_GCM keymat is supported by the driver */
  2061. action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
  2062. action->action |= flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS ?
  2063. MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
  2064. MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
  2065. return 0;
  2066. default:
  2067. return -EOPNOTSUPP;
  2068. }
  2069. }
  2070. static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
  2071. u32 *match_v, const union ib_flow_spec *ib_spec,
  2072. const struct ib_flow_attr *flow_attr,
  2073. struct mlx5_flow_act *action, u32 prev_type)
  2074. {
  2075. void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
  2076. misc_parameters);
  2077. void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
  2078. misc_parameters);
  2079. void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c,
  2080. misc_parameters_2);
  2081. void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v,
  2082. misc_parameters_2);
  2083. void *headers_c;
  2084. void *headers_v;
  2085. int match_ipv;
  2086. int ret;
  2087. if (ib_spec->type & IB_FLOW_SPEC_INNER) {
  2088. headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
  2089. inner_headers);
  2090. headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
  2091. inner_headers);
  2092. match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
  2093. ft_field_support.inner_ip_version);
  2094. } else {
  2095. headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
  2096. outer_headers);
  2097. headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
  2098. outer_headers);
  2099. match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
  2100. ft_field_support.outer_ip_version);
  2101. }
  2102. switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
  2103. case IB_FLOW_SPEC_ETH:
  2104. if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
  2105. return -EOPNOTSUPP;
  2106. ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  2107. dmac_47_16),
  2108. ib_spec->eth.mask.dst_mac);
  2109. ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  2110. dmac_47_16),
  2111. ib_spec->eth.val.dst_mac);
  2112. ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  2113. smac_47_16),
  2114. ib_spec->eth.mask.src_mac);
  2115. ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  2116. smac_47_16),
  2117. ib_spec->eth.val.src_mac);
  2118. if (ib_spec->eth.mask.vlan_tag) {
  2119. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  2120. cvlan_tag, 1);
  2121. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  2122. cvlan_tag, 1);
  2123. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  2124. first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
  2125. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  2126. first_vid, ntohs(ib_spec->eth.val.vlan_tag));
  2127. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  2128. first_cfi,
  2129. ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
  2130. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  2131. first_cfi,
  2132. ntohs(ib_spec->eth.val.vlan_tag) >> 12);
  2133. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  2134. first_prio,
  2135. ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
  2136. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  2137. first_prio,
  2138. ntohs(ib_spec->eth.val.vlan_tag) >> 13);
  2139. }
  2140. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  2141. ethertype, ntohs(ib_spec->eth.mask.ether_type));
  2142. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  2143. ethertype, ntohs(ib_spec->eth.val.ether_type));
  2144. break;
  2145. case IB_FLOW_SPEC_IPV4:
  2146. if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
  2147. return -EOPNOTSUPP;
  2148. if (match_ipv) {
  2149. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  2150. ip_version, 0xf);
  2151. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  2152. ip_version, MLX5_FS_IPV4_VERSION);
  2153. } else {
  2154. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  2155. ethertype, 0xffff);
  2156. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  2157. ethertype, ETH_P_IP);
  2158. }
  2159. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  2160. src_ipv4_src_ipv6.ipv4_layout.ipv4),
  2161. &ib_spec->ipv4.mask.src_ip,
  2162. sizeof(ib_spec->ipv4.mask.src_ip));
  2163. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  2164. src_ipv4_src_ipv6.ipv4_layout.ipv4),
  2165. &ib_spec->ipv4.val.src_ip,
  2166. sizeof(ib_spec->ipv4.val.src_ip));
  2167. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  2168. dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
  2169. &ib_spec->ipv4.mask.dst_ip,
  2170. sizeof(ib_spec->ipv4.mask.dst_ip));
  2171. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  2172. dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
  2173. &ib_spec->ipv4.val.dst_ip,
  2174. sizeof(ib_spec->ipv4.val.dst_ip));
  2175. set_tos(headers_c, headers_v,
  2176. ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
  2177. set_proto(headers_c, headers_v,
  2178. ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
  2179. break;
  2180. case IB_FLOW_SPEC_IPV6:
  2181. if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
  2182. return -EOPNOTSUPP;
  2183. if (match_ipv) {
  2184. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  2185. ip_version, 0xf);
  2186. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  2187. ip_version, MLX5_FS_IPV6_VERSION);
  2188. } else {
  2189. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  2190. ethertype, 0xffff);
  2191. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  2192. ethertype, ETH_P_IPV6);
  2193. }
  2194. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  2195. src_ipv4_src_ipv6.ipv6_layout.ipv6),
  2196. &ib_spec->ipv6.mask.src_ip,
  2197. sizeof(ib_spec->ipv6.mask.src_ip));
  2198. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  2199. src_ipv4_src_ipv6.ipv6_layout.ipv6),
  2200. &ib_spec->ipv6.val.src_ip,
  2201. sizeof(ib_spec->ipv6.val.src_ip));
  2202. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  2203. dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
  2204. &ib_spec->ipv6.mask.dst_ip,
  2205. sizeof(ib_spec->ipv6.mask.dst_ip));
  2206. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  2207. dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
  2208. &ib_spec->ipv6.val.dst_ip,
  2209. sizeof(ib_spec->ipv6.val.dst_ip));
  2210. set_tos(headers_c, headers_v,
  2211. ib_spec->ipv6.mask.traffic_class,
  2212. ib_spec->ipv6.val.traffic_class);
  2213. set_proto(headers_c, headers_v,
  2214. ib_spec->ipv6.mask.next_hdr,
  2215. ib_spec->ipv6.val.next_hdr);
  2216. set_flow_label(misc_params_c, misc_params_v,
  2217. ntohl(ib_spec->ipv6.mask.flow_label),
  2218. ntohl(ib_spec->ipv6.val.flow_label),
  2219. ib_spec->type & IB_FLOW_SPEC_INNER);
  2220. break;
  2221. case IB_FLOW_SPEC_ESP:
  2222. if (ib_spec->esp.mask.seq)
  2223. return -EOPNOTSUPP;
  2224. MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
  2225. ntohl(ib_spec->esp.mask.spi));
  2226. MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
  2227. ntohl(ib_spec->esp.val.spi));
  2228. break;
  2229. case IB_FLOW_SPEC_TCP:
  2230. if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
  2231. LAST_TCP_UDP_FIELD))
  2232. return -EOPNOTSUPP;
  2233. MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
  2234. 0xff);
  2235. MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
  2236. IPPROTO_TCP);
  2237. MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
  2238. ntohs(ib_spec->tcp_udp.mask.src_port));
  2239. MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
  2240. ntohs(ib_spec->tcp_udp.val.src_port));
  2241. MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
  2242. ntohs(ib_spec->tcp_udp.mask.dst_port));
  2243. MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
  2244. ntohs(ib_spec->tcp_udp.val.dst_port));
  2245. break;
  2246. case IB_FLOW_SPEC_UDP:
  2247. if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
  2248. LAST_TCP_UDP_FIELD))
  2249. return -EOPNOTSUPP;
  2250. MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
  2251. 0xff);
  2252. MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
  2253. IPPROTO_UDP);
  2254. MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
  2255. ntohs(ib_spec->tcp_udp.mask.src_port));
  2256. MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
  2257. ntohs(ib_spec->tcp_udp.val.src_port));
  2258. MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
  2259. ntohs(ib_spec->tcp_udp.mask.dst_port));
  2260. MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
  2261. ntohs(ib_spec->tcp_udp.val.dst_port));
  2262. break;
  2263. case IB_FLOW_SPEC_GRE:
  2264. if (ib_spec->gre.mask.c_ks_res0_ver)
  2265. return -EOPNOTSUPP;
  2266. MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
  2267. 0xff);
  2268. MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
  2269. IPPROTO_GRE);
  2270. MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
  2271. ntohs(ib_spec->gre.mask.protocol));
  2272. MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
  2273. ntohs(ib_spec->gre.val.protocol));
  2274. memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
  2275. gre_key_h),
  2276. &ib_spec->gre.mask.key,
  2277. sizeof(ib_spec->gre.mask.key));
  2278. memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
  2279. gre_key_h),
  2280. &ib_spec->gre.val.key,
  2281. sizeof(ib_spec->gre.val.key));
  2282. break;
  2283. case IB_FLOW_SPEC_MPLS:
  2284. switch (prev_type) {
  2285. case IB_FLOW_SPEC_UDP:
  2286. if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
  2287. ft_field_support.outer_first_mpls_over_udp),
  2288. &ib_spec->mpls.mask.tag))
  2289. return -EOPNOTSUPP;
  2290. memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
  2291. outer_first_mpls_over_udp),
  2292. &ib_spec->mpls.val.tag,
  2293. sizeof(ib_spec->mpls.val.tag));
  2294. memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
  2295. outer_first_mpls_over_udp),
  2296. &ib_spec->mpls.mask.tag,
  2297. sizeof(ib_spec->mpls.mask.tag));
  2298. break;
  2299. case IB_FLOW_SPEC_GRE:
  2300. if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
  2301. ft_field_support.outer_first_mpls_over_gre),
  2302. &ib_spec->mpls.mask.tag))
  2303. return -EOPNOTSUPP;
  2304. memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
  2305. outer_first_mpls_over_gre),
  2306. &ib_spec->mpls.val.tag,
  2307. sizeof(ib_spec->mpls.val.tag));
  2308. memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
  2309. outer_first_mpls_over_gre),
  2310. &ib_spec->mpls.mask.tag,
  2311. sizeof(ib_spec->mpls.mask.tag));
  2312. break;
  2313. default:
  2314. if (ib_spec->type & IB_FLOW_SPEC_INNER) {
  2315. if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
  2316. ft_field_support.inner_first_mpls),
  2317. &ib_spec->mpls.mask.tag))
  2318. return -EOPNOTSUPP;
  2319. memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
  2320. inner_first_mpls),
  2321. &ib_spec->mpls.val.tag,
  2322. sizeof(ib_spec->mpls.val.tag));
  2323. memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
  2324. inner_first_mpls),
  2325. &ib_spec->mpls.mask.tag,
  2326. sizeof(ib_spec->mpls.mask.tag));
  2327. } else {
  2328. if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
  2329. ft_field_support.outer_first_mpls),
  2330. &ib_spec->mpls.mask.tag))
  2331. return -EOPNOTSUPP;
  2332. memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
  2333. outer_first_mpls),
  2334. &ib_spec->mpls.val.tag,
  2335. sizeof(ib_spec->mpls.val.tag));
  2336. memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
  2337. outer_first_mpls),
  2338. &ib_spec->mpls.mask.tag,
  2339. sizeof(ib_spec->mpls.mask.tag));
  2340. }
  2341. }
  2342. break;
  2343. case IB_FLOW_SPEC_VXLAN_TUNNEL:
  2344. if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
  2345. LAST_TUNNEL_FIELD))
  2346. return -EOPNOTSUPP;
  2347. MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
  2348. ntohl(ib_spec->tunnel.mask.tunnel_id));
  2349. MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
  2350. ntohl(ib_spec->tunnel.val.tunnel_id));
  2351. break;
  2352. case IB_FLOW_SPEC_ACTION_TAG:
  2353. if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
  2354. LAST_FLOW_TAG_FIELD))
  2355. return -EOPNOTSUPP;
  2356. if (ib_spec->flow_tag.tag_id >= BIT(24))
  2357. return -EINVAL;
  2358. action->flow_tag = ib_spec->flow_tag.tag_id;
  2359. action->has_flow_tag = true;
  2360. break;
  2361. case IB_FLOW_SPEC_ACTION_DROP:
  2362. if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
  2363. LAST_DROP_FIELD))
  2364. return -EOPNOTSUPP;
  2365. action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
  2366. break;
  2367. case IB_FLOW_SPEC_ACTION_HANDLE:
  2368. ret = parse_flow_flow_action(ib_spec, flow_attr, action);
  2369. if (ret)
  2370. return ret;
  2371. break;
  2372. case IB_FLOW_SPEC_ACTION_COUNT:
  2373. if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
  2374. LAST_COUNTERS_FIELD))
  2375. return -EOPNOTSUPP;
  2376. /* for now support only one counters spec per flow */
  2377. if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
  2378. return -EINVAL;
  2379. action->counters = ib_spec->flow_count.counters;
  2380. action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
  2381. break;
  2382. default:
  2383. return -EINVAL;
  2384. }
  2385. return 0;
  2386. }
  2387. /* If a flow could catch both multicast and unicast packets,
  2388. * it won't fall into the multicast flow steering table and this rule
  2389. * could steal other multicast packets.
  2390. */
  2391. static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
  2392. {
  2393. union ib_flow_spec *flow_spec;
  2394. if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
  2395. ib_attr->num_of_specs < 1)
  2396. return false;
  2397. flow_spec = (union ib_flow_spec *)(ib_attr + 1);
  2398. if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
  2399. struct ib_flow_spec_ipv4 *ipv4_spec;
  2400. ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
  2401. if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
  2402. return true;
  2403. return false;
  2404. }
  2405. if (flow_spec->type == IB_FLOW_SPEC_ETH) {
  2406. struct ib_flow_spec_eth *eth_spec;
  2407. eth_spec = (struct ib_flow_spec_eth *)flow_spec;
  2408. return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
  2409. is_multicast_ether_addr(eth_spec->val.dst_mac);
  2410. }
  2411. return false;
  2412. }
  2413. enum valid_spec {
  2414. VALID_SPEC_INVALID,
  2415. VALID_SPEC_VALID,
  2416. VALID_SPEC_NA,
  2417. };
  2418. static enum valid_spec
  2419. is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
  2420. const struct mlx5_flow_spec *spec,
  2421. const struct mlx5_flow_act *flow_act,
  2422. bool egress)
  2423. {
  2424. const u32 *match_c = spec->match_criteria;
  2425. bool is_crypto =
  2426. (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
  2427. MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
  2428. bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
  2429. bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
  2430. /*
  2431. * Currently only crypto is supported in egress, when regular egress
  2432. * rules would be supported, always return VALID_SPEC_NA.
  2433. */
  2434. if (!is_crypto)
  2435. return egress ? VALID_SPEC_INVALID : VALID_SPEC_NA;
  2436. return is_crypto && is_ipsec &&
  2437. (!egress || (!is_drop && !flow_act->has_flow_tag)) ?
  2438. VALID_SPEC_VALID : VALID_SPEC_INVALID;
  2439. }
  2440. static bool is_valid_spec(struct mlx5_core_dev *mdev,
  2441. const struct mlx5_flow_spec *spec,
  2442. const struct mlx5_flow_act *flow_act,
  2443. bool egress)
  2444. {
  2445. /* We curretly only support ipsec egress flow */
  2446. return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
  2447. }
  2448. static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
  2449. const struct ib_flow_attr *flow_attr,
  2450. bool check_inner)
  2451. {
  2452. union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
  2453. int match_ipv = check_inner ?
  2454. MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
  2455. ft_field_support.inner_ip_version) :
  2456. MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
  2457. ft_field_support.outer_ip_version);
  2458. int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
  2459. bool ipv4_spec_valid, ipv6_spec_valid;
  2460. unsigned int ip_spec_type = 0;
  2461. bool has_ethertype = false;
  2462. unsigned int spec_index;
  2463. bool mask_valid = true;
  2464. u16 eth_type = 0;
  2465. bool type_valid;
  2466. /* Validate that ethertype is correct */
  2467. for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
  2468. if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
  2469. ib_spec->eth.mask.ether_type) {
  2470. mask_valid = (ib_spec->eth.mask.ether_type ==
  2471. htons(0xffff));
  2472. has_ethertype = true;
  2473. eth_type = ntohs(ib_spec->eth.val.ether_type);
  2474. } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
  2475. (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
  2476. ip_spec_type = ib_spec->type;
  2477. }
  2478. ib_spec = (void *)ib_spec + ib_spec->size;
  2479. }
  2480. type_valid = (!has_ethertype) || (!ip_spec_type);
  2481. if (!type_valid && mask_valid) {
  2482. ipv4_spec_valid = (eth_type == ETH_P_IP) &&
  2483. (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
  2484. ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
  2485. (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
  2486. type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
  2487. (((eth_type == ETH_P_MPLS_UC) ||
  2488. (eth_type == ETH_P_MPLS_MC)) && match_ipv);
  2489. }
  2490. return type_valid;
  2491. }
  2492. static bool is_valid_attr(struct mlx5_core_dev *mdev,
  2493. const struct ib_flow_attr *flow_attr)
  2494. {
  2495. return is_valid_ethertype(mdev, flow_attr, false) &&
  2496. is_valid_ethertype(mdev, flow_attr, true);
  2497. }
  2498. static void put_flow_table(struct mlx5_ib_dev *dev,
  2499. struct mlx5_ib_flow_prio *prio, bool ft_added)
  2500. {
  2501. prio->refcount -= !!ft_added;
  2502. if (!prio->refcount) {
  2503. mlx5_destroy_flow_table(prio->flow_table);
  2504. prio->flow_table = NULL;
  2505. }
  2506. }
  2507. static void counters_clear_description(struct ib_counters *counters)
  2508. {
  2509. struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
  2510. mutex_lock(&mcounters->mcntrs_mutex);
  2511. kfree(mcounters->counters_data);
  2512. mcounters->counters_data = NULL;
  2513. mcounters->cntrs_max_index = 0;
  2514. mutex_unlock(&mcounters->mcntrs_mutex);
  2515. }
  2516. static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
  2517. {
  2518. struct mlx5_ib_flow_handler *handler = container_of(flow_id,
  2519. struct mlx5_ib_flow_handler,
  2520. ibflow);
  2521. struct mlx5_ib_flow_handler *iter, *tmp;
  2522. struct mlx5_ib_dev *dev = handler->dev;
  2523. mutex_lock(&dev->flow_db->lock);
  2524. list_for_each_entry_safe(iter, tmp, &handler->list, list) {
  2525. mlx5_del_flow_rules(iter->rule);
  2526. put_flow_table(dev, iter->prio, true);
  2527. list_del(&iter->list);
  2528. kfree(iter);
  2529. }
  2530. mlx5_del_flow_rules(handler->rule);
  2531. put_flow_table(dev, handler->prio, true);
  2532. if (handler->ibcounters &&
  2533. atomic_read(&handler->ibcounters->usecnt) == 1)
  2534. counters_clear_description(handler->ibcounters);
  2535. mutex_unlock(&dev->flow_db->lock);
  2536. if (handler->flow_matcher)
  2537. atomic_dec(&handler->flow_matcher->usecnt);
  2538. kfree(handler);
  2539. return 0;
  2540. }
  2541. static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
  2542. {
  2543. priority *= 2;
  2544. if (!dont_trap)
  2545. priority++;
  2546. return priority;
  2547. }
  2548. enum flow_table_type {
  2549. MLX5_IB_FT_RX,
  2550. MLX5_IB_FT_TX
  2551. };
  2552. #define MLX5_FS_MAX_TYPES 6
  2553. #define MLX5_FS_MAX_ENTRIES BIT(16)
  2554. static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
  2555. struct mlx5_ib_flow_prio *prio,
  2556. int priority,
  2557. int num_entries, int num_groups)
  2558. {
  2559. struct mlx5_flow_table *ft;
  2560. ft = mlx5_create_auto_grouped_flow_table(ns, priority,
  2561. num_entries,
  2562. num_groups,
  2563. 0, 0);
  2564. if (IS_ERR(ft))
  2565. return ERR_CAST(ft);
  2566. prio->flow_table = ft;
  2567. prio->refcount = 0;
  2568. return prio;
  2569. }
  2570. static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
  2571. struct ib_flow_attr *flow_attr,
  2572. enum flow_table_type ft_type)
  2573. {
  2574. bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
  2575. struct mlx5_flow_namespace *ns = NULL;
  2576. struct mlx5_ib_flow_prio *prio;
  2577. struct mlx5_flow_table *ft;
  2578. int max_table_size;
  2579. int num_entries;
  2580. int num_groups;
  2581. int priority;
  2582. max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
  2583. log_max_ft_size));
  2584. if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
  2585. if (ft_type == MLX5_IB_FT_TX)
  2586. priority = 0;
  2587. else if (flow_is_multicast_only(flow_attr) &&
  2588. !dont_trap)
  2589. priority = MLX5_IB_FLOW_MCAST_PRIO;
  2590. else
  2591. priority = ib_prio_to_core_prio(flow_attr->priority,
  2592. dont_trap);
  2593. ns = mlx5_get_flow_namespace(dev->mdev,
  2594. ft_type == MLX5_IB_FT_TX ?
  2595. MLX5_FLOW_NAMESPACE_EGRESS :
  2596. MLX5_FLOW_NAMESPACE_BYPASS);
  2597. num_entries = MLX5_FS_MAX_ENTRIES;
  2598. num_groups = MLX5_FS_MAX_TYPES;
  2599. prio = &dev->flow_db->prios[priority];
  2600. } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
  2601. flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
  2602. ns = mlx5_get_flow_namespace(dev->mdev,
  2603. MLX5_FLOW_NAMESPACE_LEFTOVERS);
  2604. build_leftovers_ft_param(&priority,
  2605. &num_entries,
  2606. &num_groups);
  2607. prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
  2608. } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
  2609. if (!MLX5_CAP_FLOWTABLE(dev->mdev,
  2610. allow_sniffer_and_nic_rx_shared_tir))
  2611. return ERR_PTR(-ENOTSUPP);
  2612. ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
  2613. MLX5_FLOW_NAMESPACE_SNIFFER_RX :
  2614. MLX5_FLOW_NAMESPACE_SNIFFER_TX);
  2615. prio = &dev->flow_db->sniffer[ft_type];
  2616. priority = 0;
  2617. num_entries = 1;
  2618. num_groups = 1;
  2619. }
  2620. if (!ns)
  2621. return ERR_PTR(-ENOTSUPP);
  2622. if (num_entries > max_table_size)
  2623. return ERR_PTR(-ENOMEM);
  2624. ft = prio->flow_table;
  2625. if (!ft)
  2626. return _get_prio(ns, prio, priority, num_entries, num_groups);
  2627. return prio;
  2628. }
  2629. static void set_underlay_qp(struct mlx5_ib_dev *dev,
  2630. struct mlx5_flow_spec *spec,
  2631. u32 underlay_qpn)
  2632. {
  2633. void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
  2634. spec->match_criteria,
  2635. misc_parameters);
  2636. void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
  2637. misc_parameters);
  2638. if (underlay_qpn &&
  2639. MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
  2640. ft_field_support.bth_dst_qp)) {
  2641. MLX5_SET(fte_match_set_misc,
  2642. misc_params_v, bth_dst_qp, underlay_qpn);
  2643. MLX5_SET(fte_match_set_misc,
  2644. misc_params_c, bth_dst_qp, 0xffffff);
  2645. }
  2646. }
  2647. static int read_flow_counters(struct ib_device *ibdev,
  2648. struct mlx5_read_counters_attr *read_attr)
  2649. {
  2650. struct mlx5_fc *fc = read_attr->hw_cntrs_hndl;
  2651. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  2652. return mlx5_fc_query(dev->mdev, fc,
  2653. &read_attr->out[IB_COUNTER_PACKETS],
  2654. &read_attr->out[IB_COUNTER_BYTES]);
  2655. }
  2656. /* flow counters currently expose two counters packets and bytes */
  2657. #define FLOW_COUNTERS_NUM 2
  2658. static int counters_set_description(struct ib_counters *counters,
  2659. enum mlx5_ib_counters_type counters_type,
  2660. struct mlx5_ib_flow_counters_desc *desc_data,
  2661. u32 ncounters)
  2662. {
  2663. struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
  2664. u32 cntrs_max_index = 0;
  2665. int i;
  2666. if (counters_type != MLX5_IB_COUNTERS_FLOW)
  2667. return -EINVAL;
  2668. /* init the fields for the object */
  2669. mcounters->type = counters_type;
  2670. mcounters->read_counters = read_flow_counters;
  2671. mcounters->counters_num = FLOW_COUNTERS_NUM;
  2672. mcounters->ncounters = ncounters;
  2673. /* each counter entry have both description and index pair */
  2674. for (i = 0; i < ncounters; i++) {
  2675. if (desc_data[i].description > IB_COUNTER_BYTES)
  2676. return -EINVAL;
  2677. if (cntrs_max_index <= desc_data[i].index)
  2678. cntrs_max_index = desc_data[i].index + 1;
  2679. }
  2680. mutex_lock(&mcounters->mcntrs_mutex);
  2681. mcounters->counters_data = desc_data;
  2682. mcounters->cntrs_max_index = cntrs_max_index;
  2683. mutex_unlock(&mcounters->mcntrs_mutex);
  2684. return 0;
  2685. }
  2686. #define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
  2687. static int flow_counters_set_data(struct ib_counters *ibcounters,
  2688. struct mlx5_ib_create_flow *ucmd)
  2689. {
  2690. struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters);
  2691. struct mlx5_ib_flow_counters_data *cntrs_data = NULL;
  2692. struct mlx5_ib_flow_counters_desc *desc_data = NULL;
  2693. bool hw_hndl = false;
  2694. int ret = 0;
  2695. if (ucmd && ucmd->ncounters_data != 0) {
  2696. cntrs_data = ucmd->data;
  2697. if (cntrs_data->ncounters > MAX_COUNTERS_NUM)
  2698. return -EINVAL;
  2699. desc_data = kcalloc(cntrs_data->ncounters,
  2700. sizeof(*desc_data),
  2701. GFP_KERNEL);
  2702. if (!desc_data)
  2703. return -ENOMEM;
  2704. if (copy_from_user(desc_data,
  2705. u64_to_user_ptr(cntrs_data->counters_data),
  2706. sizeof(*desc_data) * cntrs_data->ncounters)) {
  2707. ret = -EFAULT;
  2708. goto free;
  2709. }
  2710. }
  2711. if (!mcounters->hw_cntrs_hndl) {
  2712. mcounters->hw_cntrs_hndl = mlx5_fc_create(
  2713. to_mdev(ibcounters->device)->mdev, false);
  2714. if (!mcounters->hw_cntrs_hndl) {
  2715. ret = -ENOMEM;
  2716. goto free;
  2717. }
  2718. hw_hndl = true;
  2719. }
  2720. if (desc_data) {
  2721. /* counters already bound to at least one flow */
  2722. if (mcounters->cntrs_max_index) {
  2723. ret = -EINVAL;
  2724. goto free_hndl;
  2725. }
  2726. ret = counters_set_description(ibcounters,
  2727. MLX5_IB_COUNTERS_FLOW,
  2728. desc_data,
  2729. cntrs_data->ncounters);
  2730. if (ret)
  2731. goto free_hndl;
  2732. } else if (!mcounters->cntrs_max_index) {
  2733. /* counters not bound yet, must have udata passed */
  2734. ret = -EINVAL;
  2735. goto free_hndl;
  2736. }
  2737. return 0;
  2738. free_hndl:
  2739. if (hw_hndl) {
  2740. mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev,
  2741. mcounters->hw_cntrs_hndl);
  2742. mcounters->hw_cntrs_hndl = NULL;
  2743. }
  2744. free:
  2745. kfree(desc_data);
  2746. return ret;
  2747. }
  2748. static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
  2749. struct mlx5_ib_flow_prio *ft_prio,
  2750. const struct ib_flow_attr *flow_attr,
  2751. struct mlx5_flow_destination *dst,
  2752. u32 underlay_qpn,
  2753. struct mlx5_ib_create_flow *ucmd)
  2754. {
  2755. struct mlx5_flow_table *ft = ft_prio->flow_table;
  2756. struct mlx5_ib_flow_handler *handler;
  2757. struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
  2758. struct mlx5_flow_spec *spec;
  2759. struct mlx5_flow_destination dest_arr[2] = {};
  2760. struct mlx5_flow_destination *rule_dst = dest_arr;
  2761. const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
  2762. unsigned int spec_index;
  2763. u32 prev_type = 0;
  2764. int err = 0;
  2765. int dest_num = 0;
  2766. bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
  2767. if (!is_valid_attr(dev->mdev, flow_attr))
  2768. return ERR_PTR(-EINVAL);
  2769. spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
  2770. handler = kzalloc(sizeof(*handler), GFP_KERNEL);
  2771. if (!handler || !spec) {
  2772. err = -ENOMEM;
  2773. goto free;
  2774. }
  2775. INIT_LIST_HEAD(&handler->list);
  2776. if (dst) {
  2777. memcpy(&dest_arr[0], dst, sizeof(*dst));
  2778. dest_num++;
  2779. }
  2780. for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
  2781. err = parse_flow_attr(dev->mdev, spec->match_criteria,
  2782. spec->match_value,
  2783. ib_flow, flow_attr, &flow_act,
  2784. prev_type);
  2785. if (err < 0)
  2786. goto free;
  2787. prev_type = ((union ib_flow_spec *)ib_flow)->type;
  2788. ib_flow += ((union ib_flow_spec *)ib_flow)->size;
  2789. }
  2790. if (!flow_is_multicast_only(flow_attr))
  2791. set_underlay_qp(dev, spec, underlay_qpn);
  2792. if (dev->rep) {
  2793. void *misc;
  2794. misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
  2795. misc_parameters);
  2796. MLX5_SET(fte_match_set_misc, misc, source_port,
  2797. dev->rep->vport);
  2798. misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
  2799. misc_parameters);
  2800. MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
  2801. }
  2802. spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
  2803. if (is_egress &&
  2804. !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
  2805. err = -EINVAL;
  2806. goto free;
  2807. }
  2808. if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
  2809. err = flow_counters_set_data(flow_act.counters, ucmd);
  2810. if (err)
  2811. goto free;
  2812. handler->ibcounters = flow_act.counters;
  2813. dest_arr[dest_num].type =
  2814. MLX5_FLOW_DESTINATION_TYPE_COUNTER;
  2815. dest_arr[dest_num].counter =
  2816. to_mcounters(flow_act.counters)->hw_cntrs_hndl;
  2817. dest_num++;
  2818. }
  2819. if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
  2820. if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) {
  2821. rule_dst = NULL;
  2822. dest_num = 0;
  2823. }
  2824. } else {
  2825. if (is_egress)
  2826. flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
  2827. else
  2828. flow_act.action |=
  2829. dest_num ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
  2830. MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
  2831. }
  2832. if (flow_act.has_flow_tag &&
  2833. (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
  2834. flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
  2835. mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
  2836. flow_act.flow_tag, flow_attr->type);
  2837. err = -EINVAL;
  2838. goto free;
  2839. }
  2840. handler->rule = mlx5_add_flow_rules(ft, spec,
  2841. &flow_act,
  2842. rule_dst, dest_num);
  2843. if (IS_ERR(handler->rule)) {
  2844. err = PTR_ERR(handler->rule);
  2845. goto free;
  2846. }
  2847. ft_prio->refcount++;
  2848. handler->prio = ft_prio;
  2849. handler->dev = dev;
  2850. ft_prio->flow_table = ft;
  2851. free:
  2852. if (err && handler) {
  2853. if (handler->ibcounters &&
  2854. atomic_read(&handler->ibcounters->usecnt) == 1)
  2855. counters_clear_description(handler->ibcounters);
  2856. kfree(handler);
  2857. }
  2858. kvfree(spec);
  2859. return err ? ERR_PTR(err) : handler;
  2860. }
  2861. static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
  2862. struct mlx5_ib_flow_prio *ft_prio,
  2863. const struct ib_flow_attr *flow_attr,
  2864. struct mlx5_flow_destination *dst)
  2865. {
  2866. return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
  2867. }
  2868. static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
  2869. struct mlx5_ib_flow_prio *ft_prio,
  2870. struct ib_flow_attr *flow_attr,
  2871. struct mlx5_flow_destination *dst)
  2872. {
  2873. struct mlx5_ib_flow_handler *handler_dst = NULL;
  2874. struct mlx5_ib_flow_handler *handler = NULL;
  2875. handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
  2876. if (!IS_ERR(handler)) {
  2877. handler_dst = create_flow_rule(dev, ft_prio,
  2878. flow_attr, dst);
  2879. if (IS_ERR(handler_dst)) {
  2880. mlx5_del_flow_rules(handler->rule);
  2881. ft_prio->refcount--;
  2882. kfree(handler);
  2883. handler = handler_dst;
  2884. } else {
  2885. list_add(&handler_dst->list, &handler->list);
  2886. }
  2887. }
  2888. return handler;
  2889. }
  2890. enum {
  2891. LEFTOVERS_MC,
  2892. LEFTOVERS_UC,
  2893. };
  2894. static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
  2895. struct mlx5_ib_flow_prio *ft_prio,
  2896. struct ib_flow_attr *flow_attr,
  2897. struct mlx5_flow_destination *dst)
  2898. {
  2899. struct mlx5_ib_flow_handler *handler_ucast = NULL;
  2900. struct mlx5_ib_flow_handler *handler = NULL;
  2901. static struct {
  2902. struct ib_flow_attr flow_attr;
  2903. struct ib_flow_spec_eth eth_flow;
  2904. } leftovers_specs[] = {
  2905. [LEFTOVERS_MC] = {
  2906. .flow_attr = {
  2907. .num_of_specs = 1,
  2908. .size = sizeof(leftovers_specs[0])
  2909. },
  2910. .eth_flow = {
  2911. .type = IB_FLOW_SPEC_ETH,
  2912. .size = sizeof(struct ib_flow_spec_eth),
  2913. .mask = {.dst_mac = {0x1} },
  2914. .val = {.dst_mac = {0x1} }
  2915. }
  2916. },
  2917. [LEFTOVERS_UC] = {
  2918. .flow_attr = {
  2919. .num_of_specs = 1,
  2920. .size = sizeof(leftovers_specs[0])
  2921. },
  2922. .eth_flow = {
  2923. .type = IB_FLOW_SPEC_ETH,
  2924. .size = sizeof(struct ib_flow_spec_eth),
  2925. .mask = {.dst_mac = {0x1} },
  2926. .val = {.dst_mac = {} }
  2927. }
  2928. }
  2929. };
  2930. handler = create_flow_rule(dev, ft_prio,
  2931. &leftovers_specs[LEFTOVERS_MC].flow_attr,
  2932. dst);
  2933. if (!IS_ERR(handler) &&
  2934. flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
  2935. handler_ucast = create_flow_rule(dev, ft_prio,
  2936. &leftovers_specs[LEFTOVERS_UC].flow_attr,
  2937. dst);
  2938. if (IS_ERR(handler_ucast)) {
  2939. mlx5_del_flow_rules(handler->rule);
  2940. ft_prio->refcount--;
  2941. kfree(handler);
  2942. handler = handler_ucast;
  2943. } else {
  2944. list_add(&handler_ucast->list, &handler->list);
  2945. }
  2946. }
  2947. return handler;
  2948. }
  2949. static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
  2950. struct mlx5_ib_flow_prio *ft_rx,
  2951. struct mlx5_ib_flow_prio *ft_tx,
  2952. struct mlx5_flow_destination *dst)
  2953. {
  2954. struct mlx5_ib_flow_handler *handler_rx;
  2955. struct mlx5_ib_flow_handler *handler_tx;
  2956. int err;
  2957. static const struct ib_flow_attr flow_attr = {
  2958. .num_of_specs = 0,
  2959. .size = sizeof(flow_attr)
  2960. };
  2961. handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
  2962. if (IS_ERR(handler_rx)) {
  2963. err = PTR_ERR(handler_rx);
  2964. goto err;
  2965. }
  2966. handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
  2967. if (IS_ERR(handler_tx)) {
  2968. err = PTR_ERR(handler_tx);
  2969. goto err_tx;
  2970. }
  2971. list_add(&handler_tx->list, &handler_rx->list);
  2972. return handler_rx;
  2973. err_tx:
  2974. mlx5_del_flow_rules(handler_rx->rule);
  2975. ft_rx->refcount--;
  2976. kfree(handler_rx);
  2977. err:
  2978. return ERR_PTR(err);
  2979. }
  2980. static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
  2981. struct ib_flow_attr *flow_attr,
  2982. int domain,
  2983. struct ib_udata *udata)
  2984. {
  2985. struct mlx5_ib_dev *dev = to_mdev(qp->device);
  2986. struct mlx5_ib_qp *mqp = to_mqp(qp);
  2987. struct mlx5_ib_flow_handler *handler = NULL;
  2988. struct mlx5_flow_destination *dst = NULL;
  2989. struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
  2990. struct mlx5_ib_flow_prio *ft_prio;
  2991. bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
  2992. struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
  2993. size_t min_ucmd_sz, required_ucmd_sz;
  2994. int err;
  2995. int underlay_qpn;
  2996. if (udata && udata->inlen) {
  2997. min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) +
  2998. sizeof(ucmd_hdr.reserved);
  2999. if (udata->inlen < min_ucmd_sz)
  3000. return ERR_PTR(-EOPNOTSUPP);
  3001. err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
  3002. if (err)
  3003. return ERR_PTR(err);
  3004. /* currently supports only one counters data */
  3005. if (ucmd_hdr.ncounters_data > 1)
  3006. return ERR_PTR(-EINVAL);
  3007. required_ucmd_sz = min_ucmd_sz +
  3008. sizeof(struct mlx5_ib_flow_counters_data) *
  3009. ucmd_hdr.ncounters_data;
  3010. if (udata->inlen > required_ucmd_sz &&
  3011. !ib_is_udata_cleared(udata, required_ucmd_sz,
  3012. udata->inlen - required_ucmd_sz))
  3013. return ERR_PTR(-EOPNOTSUPP);
  3014. ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
  3015. if (!ucmd)
  3016. return ERR_PTR(-ENOMEM);
  3017. err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
  3018. if (err) {
  3019. kfree(ucmd);
  3020. return ERR_PTR(err);
  3021. }
  3022. }
  3023. if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
  3024. return ERR_PTR(-ENOMEM);
  3025. if (domain != IB_FLOW_DOMAIN_USER ||
  3026. flow_attr->port > dev->num_ports ||
  3027. (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
  3028. IB_FLOW_ATTR_FLAGS_EGRESS)))
  3029. return ERR_PTR(-EINVAL);
  3030. if (is_egress &&
  3031. (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
  3032. flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT))
  3033. return ERR_PTR(-EINVAL);
  3034. dst = kzalloc(sizeof(*dst), GFP_KERNEL);
  3035. if (!dst)
  3036. return ERR_PTR(-ENOMEM);
  3037. mutex_lock(&dev->flow_db->lock);
  3038. ft_prio = get_flow_table(dev, flow_attr,
  3039. is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX);
  3040. if (IS_ERR(ft_prio)) {
  3041. err = PTR_ERR(ft_prio);
  3042. goto unlock;
  3043. }
  3044. if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
  3045. ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
  3046. if (IS_ERR(ft_prio_tx)) {
  3047. err = PTR_ERR(ft_prio_tx);
  3048. ft_prio_tx = NULL;
  3049. goto destroy_ft;
  3050. }
  3051. }
  3052. if (is_egress) {
  3053. dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
  3054. } else {
  3055. dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
  3056. if (mqp->flags & MLX5_IB_QP_RSS)
  3057. dst->tir_num = mqp->rss_qp.tirn;
  3058. else
  3059. dst->tir_num = mqp->raw_packet_qp.rq.tirn;
  3060. }
  3061. if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
  3062. if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
  3063. handler = create_dont_trap_rule(dev, ft_prio,
  3064. flow_attr, dst);
  3065. } else {
  3066. underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
  3067. mqp->underlay_qpn : 0;
  3068. handler = _create_flow_rule(dev, ft_prio, flow_attr,
  3069. dst, underlay_qpn, ucmd);
  3070. }
  3071. } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
  3072. flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
  3073. handler = create_leftovers_rule(dev, ft_prio, flow_attr,
  3074. dst);
  3075. } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
  3076. handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
  3077. } else {
  3078. err = -EINVAL;
  3079. goto destroy_ft;
  3080. }
  3081. if (IS_ERR(handler)) {
  3082. err = PTR_ERR(handler);
  3083. handler = NULL;
  3084. goto destroy_ft;
  3085. }
  3086. mutex_unlock(&dev->flow_db->lock);
  3087. kfree(dst);
  3088. kfree(ucmd);
  3089. return &handler->ibflow;
  3090. destroy_ft:
  3091. put_flow_table(dev, ft_prio, false);
  3092. if (ft_prio_tx)
  3093. put_flow_table(dev, ft_prio_tx, false);
  3094. unlock:
  3095. mutex_unlock(&dev->flow_db->lock);
  3096. kfree(dst);
  3097. kfree(ucmd);
  3098. kfree(handler);
  3099. return ERR_PTR(err);
  3100. }
  3101. static struct mlx5_ib_flow_prio *_get_flow_table(struct mlx5_ib_dev *dev,
  3102. int priority, bool mcast)
  3103. {
  3104. int max_table_size;
  3105. struct mlx5_flow_namespace *ns = NULL;
  3106. struct mlx5_ib_flow_prio *prio;
  3107. max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
  3108. log_max_ft_size));
  3109. if (max_table_size < MLX5_FS_MAX_ENTRIES)
  3110. return ERR_PTR(-ENOMEM);
  3111. if (mcast)
  3112. priority = MLX5_IB_FLOW_MCAST_PRIO;
  3113. else
  3114. priority = ib_prio_to_core_prio(priority, false);
  3115. ns = mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS);
  3116. if (!ns)
  3117. return ERR_PTR(-ENOTSUPP);
  3118. prio = &dev->flow_db->prios[priority];
  3119. if (prio->flow_table)
  3120. return prio;
  3121. return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
  3122. MLX5_FS_MAX_TYPES);
  3123. }
  3124. static struct mlx5_ib_flow_handler *
  3125. _create_raw_flow_rule(struct mlx5_ib_dev *dev,
  3126. struct mlx5_ib_flow_prio *ft_prio,
  3127. struct mlx5_flow_destination *dst,
  3128. struct mlx5_ib_flow_matcher *fs_matcher,
  3129. void *cmd_in, int inlen)
  3130. {
  3131. struct mlx5_ib_flow_handler *handler;
  3132. struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
  3133. struct mlx5_flow_spec *spec;
  3134. struct mlx5_flow_table *ft = ft_prio->flow_table;
  3135. int err = 0;
  3136. spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
  3137. handler = kzalloc(sizeof(*handler), GFP_KERNEL);
  3138. if (!handler || !spec) {
  3139. err = -ENOMEM;
  3140. goto free;
  3141. }
  3142. INIT_LIST_HEAD(&handler->list);
  3143. memcpy(spec->match_value, cmd_in, inlen);
  3144. memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
  3145. fs_matcher->mask_len);
  3146. spec->match_criteria_enable = fs_matcher->match_criteria_enable;
  3147. flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
  3148. handler->rule = mlx5_add_flow_rules(ft, spec,
  3149. &flow_act, dst, 1);
  3150. if (IS_ERR(handler->rule)) {
  3151. err = PTR_ERR(handler->rule);
  3152. goto free;
  3153. }
  3154. ft_prio->refcount++;
  3155. handler->prio = ft_prio;
  3156. handler->dev = dev;
  3157. ft_prio->flow_table = ft;
  3158. free:
  3159. if (err)
  3160. kfree(handler);
  3161. kvfree(spec);
  3162. return err ? ERR_PTR(err) : handler;
  3163. }
  3164. static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
  3165. void *match_v)
  3166. {
  3167. void *match_c;
  3168. void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4;
  3169. void *dmac, *dmac_mask;
  3170. void *ipv4, *ipv4_mask;
  3171. if (!(fs_matcher->match_criteria_enable &
  3172. (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT)))
  3173. return false;
  3174. match_c = fs_matcher->matcher_mask.match_params;
  3175. match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v,
  3176. outer_headers);
  3177. match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c,
  3178. outer_headers);
  3179. dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
  3180. dmac_47_16);
  3181. dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
  3182. dmac_47_16);
  3183. if (is_multicast_ether_addr(dmac) &&
  3184. is_multicast_ether_addr(dmac_mask))
  3185. return true;
  3186. ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
  3187. dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
  3188. ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
  3189. dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
  3190. if (ipv4_is_multicast(*(__be32 *)(ipv4)) &&
  3191. ipv4_is_multicast(*(__be32 *)(ipv4_mask)))
  3192. return true;
  3193. return false;
  3194. }
  3195. struct mlx5_ib_flow_handler *
  3196. mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
  3197. struct mlx5_ib_flow_matcher *fs_matcher,
  3198. void *cmd_in, int inlen, int dest_id,
  3199. int dest_type)
  3200. {
  3201. struct mlx5_flow_destination *dst;
  3202. struct mlx5_ib_flow_prio *ft_prio;
  3203. int priority = fs_matcher->priority;
  3204. struct mlx5_ib_flow_handler *handler;
  3205. bool mcast;
  3206. int err;
  3207. if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
  3208. return ERR_PTR(-EOPNOTSUPP);
  3209. if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
  3210. return ERR_PTR(-ENOMEM);
  3211. dst = kzalloc(sizeof(*dst), GFP_KERNEL);
  3212. if (!dst)
  3213. return ERR_PTR(-ENOMEM);
  3214. mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
  3215. mutex_lock(&dev->flow_db->lock);
  3216. ft_prio = _get_flow_table(dev, priority, mcast);
  3217. if (IS_ERR(ft_prio)) {
  3218. err = PTR_ERR(ft_prio);
  3219. goto unlock;
  3220. }
  3221. if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
  3222. dst->type = dest_type;
  3223. dst->tir_num = dest_id;
  3224. } else {
  3225. dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
  3226. dst->ft_num = dest_id;
  3227. }
  3228. handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, cmd_in,
  3229. inlen);
  3230. if (IS_ERR(handler)) {
  3231. err = PTR_ERR(handler);
  3232. goto destroy_ft;
  3233. }
  3234. mutex_unlock(&dev->flow_db->lock);
  3235. atomic_inc(&fs_matcher->usecnt);
  3236. handler->flow_matcher = fs_matcher;
  3237. kfree(dst);
  3238. return handler;
  3239. destroy_ft:
  3240. put_flow_table(dev, ft_prio, false);
  3241. unlock:
  3242. mutex_unlock(&dev->flow_db->lock);
  3243. kfree(dst);
  3244. return ERR_PTR(err);
  3245. }
  3246. static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
  3247. {
  3248. u32 flags = 0;
  3249. if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
  3250. flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
  3251. return flags;
  3252. }
  3253. #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
  3254. static struct ib_flow_action *
  3255. mlx5_ib_create_flow_action_esp(struct ib_device *device,
  3256. const struct ib_flow_action_attrs_esp *attr,
  3257. struct uverbs_attr_bundle *attrs)
  3258. {
  3259. struct mlx5_ib_dev *mdev = to_mdev(device);
  3260. struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
  3261. struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
  3262. struct mlx5_ib_flow_action *action;
  3263. u64 action_flags;
  3264. u64 flags;
  3265. int err = 0;
  3266. err = uverbs_get_flags64(
  3267. &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
  3268. ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
  3269. if (err)
  3270. return ERR_PTR(err);
  3271. flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
  3272. /* We current only support a subset of the standard features. Only a
  3273. * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
  3274. * (with overlap). Full offload mode isn't supported.
  3275. */
  3276. if (!attr->keymat || attr->replay || attr->encap ||
  3277. attr->spi || attr->seq || attr->tfc_pad ||
  3278. attr->hard_limit_pkts ||
  3279. (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
  3280. IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
  3281. return ERR_PTR(-EOPNOTSUPP);
  3282. if (attr->keymat->protocol !=
  3283. IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
  3284. return ERR_PTR(-EOPNOTSUPP);
  3285. aes_gcm = &attr->keymat->keymat.aes_gcm;
  3286. if (aes_gcm->icv_len != 16 ||
  3287. aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
  3288. return ERR_PTR(-EOPNOTSUPP);
  3289. action = kmalloc(sizeof(*action), GFP_KERNEL);
  3290. if (!action)
  3291. return ERR_PTR(-ENOMEM);
  3292. action->esp_aes_gcm.ib_flags = attr->flags;
  3293. memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
  3294. sizeof(accel_attrs.keymat.aes_gcm.aes_key));
  3295. accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
  3296. memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
  3297. sizeof(accel_attrs.keymat.aes_gcm.salt));
  3298. memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
  3299. sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
  3300. accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
  3301. accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
  3302. accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
  3303. accel_attrs.esn = attr->esn;
  3304. if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
  3305. accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
  3306. if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
  3307. accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
  3308. if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
  3309. accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
  3310. action->esp_aes_gcm.ctx =
  3311. mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
  3312. if (IS_ERR(action->esp_aes_gcm.ctx)) {
  3313. err = PTR_ERR(action->esp_aes_gcm.ctx);
  3314. goto err_parse;
  3315. }
  3316. action->esp_aes_gcm.ib_flags = attr->flags;
  3317. return &action->ib_action;
  3318. err_parse:
  3319. kfree(action);
  3320. return ERR_PTR(err);
  3321. }
  3322. static int
  3323. mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
  3324. const struct ib_flow_action_attrs_esp *attr,
  3325. struct uverbs_attr_bundle *attrs)
  3326. {
  3327. struct mlx5_ib_flow_action *maction = to_mflow_act(action);
  3328. struct mlx5_accel_esp_xfrm_attrs accel_attrs;
  3329. int err = 0;
  3330. if (attr->keymat || attr->replay || attr->encap ||
  3331. attr->spi || attr->seq || attr->tfc_pad ||
  3332. attr->hard_limit_pkts ||
  3333. (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
  3334. IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
  3335. IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
  3336. return -EOPNOTSUPP;
  3337. /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
  3338. * be modified.
  3339. */
  3340. if (!(maction->esp_aes_gcm.ib_flags &
  3341. IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
  3342. attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
  3343. IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
  3344. return -EINVAL;
  3345. memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
  3346. sizeof(accel_attrs));
  3347. accel_attrs.esn = attr->esn;
  3348. if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
  3349. accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
  3350. else
  3351. accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
  3352. err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
  3353. &accel_attrs);
  3354. if (err)
  3355. return err;
  3356. maction->esp_aes_gcm.ib_flags &=
  3357. ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
  3358. maction->esp_aes_gcm.ib_flags |=
  3359. attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
  3360. return 0;
  3361. }
  3362. static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
  3363. {
  3364. struct mlx5_ib_flow_action *maction = to_mflow_act(action);
  3365. switch (action->type) {
  3366. case IB_FLOW_ACTION_ESP:
  3367. /*
  3368. * We only support aes_gcm by now, so we implicitly know this is
  3369. * the underline crypto.
  3370. */
  3371. mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
  3372. break;
  3373. default:
  3374. WARN_ON(true);
  3375. break;
  3376. }
  3377. kfree(maction);
  3378. return 0;
  3379. }
  3380. static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  3381. {
  3382. struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
  3383. struct mlx5_ib_qp *mqp = to_mqp(ibqp);
  3384. int err;
  3385. if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
  3386. mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
  3387. return -EOPNOTSUPP;
  3388. }
  3389. err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
  3390. if (err)
  3391. mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
  3392. ibqp->qp_num, gid->raw);
  3393. return err;
  3394. }
  3395. static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  3396. {
  3397. struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
  3398. int err;
  3399. err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
  3400. if (err)
  3401. mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
  3402. ibqp->qp_num, gid->raw);
  3403. return err;
  3404. }
  3405. static int init_node_data(struct mlx5_ib_dev *dev)
  3406. {
  3407. int err;
  3408. err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
  3409. if (err)
  3410. return err;
  3411. dev->mdev->rev_id = dev->mdev->pdev->revision;
  3412. return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
  3413. }
  3414. static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
  3415. char *buf)
  3416. {
  3417. struct mlx5_ib_dev *dev =
  3418. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  3419. return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
  3420. }
  3421. static ssize_t show_reg_pages(struct device *device,
  3422. struct device_attribute *attr, char *buf)
  3423. {
  3424. struct mlx5_ib_dev *dev =
  3425. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  3426. return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
  3427. }
  3428. static ssize_t show_hca(struct device *device, struct device_attribute *attr,
  3429. char *buf)
  3430. {
  3431. struct mlx5_ib_dev *dev =
  3432. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  3433. return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
  3434. }
  3435. static ssize_t show_rev(struct device *device, struct device_attribute *attr,
  3436. char *buf)
  3437. {
  3438. struct mlx5_ib_dev *dev =
  3439. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  3440. return sprintf(buf, "%x\n", dev->mdev->rev_id);
  3441. }
  3442. static ssize_t show_board(struct device *device, struct device_attribute *attr,
  3443. char *buf)
  3444. {
  3445. struct mlx5_ib_dev *dev =
  3446. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  3447. return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
  3448. dev->mdev->board_id);
  3449. }
  3450. static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
  3451. static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
  3452. static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
  3453. static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
  3454. static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
  3455. static struct device_attribute *mlx5_class_attributes[] = {
  3456. &dev_attr_hw_rev,
  3457. &dev_attr_hca_type,
  3458. &dev_attr_board_id,
  3459. &dev_attr_fw_pages,
  3460. &dev_attr_reg_pages,
  3461. };
  3462. static void pkey_change_handler(struct work_struct *work)
  3463. {
  3464. struct mlx5_ib_port_resources *ports =
  3465. container_of(work, struct mlx5_ib_port_resources,
  3466. pkey_change_work);
  3467. mutex_lock(&ports->devr->mutex);
  3468. mlx5_ib_gsi_pkey_change(ports->gsi);
  3469. mutex_unlock(&ports->devr->mutex);
  3470. }
  3471. static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
  3472. {
  3473. struct mlx5_ib_qp *mqp;
  3474. struct mlx5_ib_cq *send_mcq, *recv_mcq;
  3475. struct mlx5_core_cq *mcq;
  3476. struct list_head cq_armed_list;
  3477. unsigned long flags_qp;
  3478. unsigned long flags_cq;
  3479. unsigned long flags;
  3480. INIT_LIST_HEAD(&cq_armed_list);
  3481. /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
  3482. spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
  3483. list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
  3484. spin_lock_irqsave(&mqp->sq.lock, flags_qp);
  3485. if (mqp->sq.tail != mqp->sq.head) {
  3486. send_mcq = to_mcq(mqp->ibqp.send_cq);
  3487. spin_lock_irqsave(&send_mcq->lock, flags_cq);
  3488. if (send_mcq->mcq.comp &&
  3489. mqp->ibqp.send_cq->comp_handler) {
  3490. if (!send_mcq->mcq.reset_notify_added) {
  3491. send_mcq->mcq.reset_notify_added = 1;
  3492. list_add_tail(&send_mcq->mcq.reset_notify,
  3493. &cq_armed_list);
  3494. }
  3495. }
  3496. spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
  3497. }
  3498. spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
  3499. spin_lock_irqsave(&mqp->rq.lock, flags_qp);
  3500. /* no handling is needed for SRQ */
  3501. if (!mqp->ibqp.srq) {
  3502. if (mqp->rq.tail != mqp->rq.head) {
  3503. recv_mcq = to_mcq(mqp->ibqp.recv_cq);
  3504. spin_lock_irqsave(&recv_mcq->lock, flags_cq);
  3505. if (recv_mcq->mcq.comp &&
  3506. mqp->ibqp.recv_cq->comp_handler) {
  3507. if (!recv_mcq->mcq.reset_notify_added) {
  3508. recv_mcq->mcq.reset_notify_added = 1;
  3509. list_add_tail(&recv_mcq->mcq.reset_notify,
  3510. &cq_armed_list);
  3511. }
  3512. }
  3513. spin_unlock_irqrestore(&recv_mcq->lock,
  3514. flags_cq);
  3515. }
  3516. }
  3517. spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
  3518. }
  3519. /*At that point all inflight post send were put to be executed as of we
  3520. * lock/unlock above locks Now need to arm all involved CQs.
  3521. */
  3522. list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
  3523. mcq->comp(mcq);
  3524. }
  3525. spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
  3526. }
  3527. static void delay_drop_handler(struct work_struct *work)
  3528. {
  3529. int err;
  3530. struct mlx5_ib_delay_drop *delay_drop =
  3531. container_of(work, struct mlx5_ib_delay_drop,
  3532. delay_drop_work);
  3533. atomic_inc(&delay_drop->events_cnt);
  3534. mutex_lock(&delay_drop->lock);
  3535. err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
  3536. delay_drop->timeout);
  3537. if (err) {
  3538. mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
  3539. delay_drop->timeout);
  3540. delay_drop->activate = false;
  3541. }
  3542. mutex_unlock(&delay_drop->lock);
  3543. }
  3544. static void mlx5_ib_handle_event(struct work_struct *_work)
  3545. {
  3546. struct mlx5_ib_event_work *work =
  3547. container_of(_work, struct mlx5_ib_event_work, work);
  3548. struct mlx5_ib_dev *ibdev;
  3549. struct ib_event ibev;
  3550. bool fatal = false;
  3551. u8 port = (u8)work->param;
  3552. if (mlx5_core_is_mp_slave(work->dev)) {
  3553. ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
  3554. if (!ibdev)
  3555. goto out;
  3556. } else {
  3557. ibdev = work->context;
  3558. }
  3559. switch (work->event) {
  3560. case MLX5_DEV_EVENT_SYS_ERROR:
  3561. ibev.event = IB_EVENT_DEVICE_FATAL;
  3562. mlx5_ib_handle_internal_error(ibdev);
  3563. fatal = true;
  3564. break;
  3565. case MLX5_DEV_EVENT_PORT_UP:
  3566. case MLX5_DEV_EVENT_PORT_DOWN:
  3567. case MLX5_DEV_EVENT_PORT_INITIALIZED:
  3568. /* In RoCE, port up/down events are handled in
  3569. * mlx5_netdev_event().
  3570. */
  3571. if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
  3572. IB_LINK_LAYER_ETHERNET)
  3573. goto out;
  3574. ibev.event = (work->event == MLX5_DEV_EVENT_PORT_UP) ?
  3575. IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
  3576. break;
  3577. case MLX5_DEV_EVENT_LID_CHANGE:
  3578. ibev.event = IB_EVENT_LID_CHANGE;
  3579. break;
  3580. case MLX5_DEV_EVENT_PKEY_CHANGE:
  3581. ibev.event = IB_EVENT_PKEY_CHANGE;
  3582. schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
  3583. break;
  3584. case MLX5_DEV_EVENT_GUID_CHANGE:
  3585. ibev.event = IB_EVENT_GID_CHANGE;
  3586. break;
  3587. case MLX5_DEV_EVENT_CLIENT_REREG:
  3588. ibev.event = IB_EVENT_CLIENT_REREGISTER;
  3589. break;
  3590. case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
  3591. schedule_work(&ibdev->delay_drop.delay_drop_work);
  3592. goto out;
  3593. default:
  3594. goto out;
  3595. }
  3596. ibev.device = &ibdev->ib_dev;
  3597. ibev.element.port_num = port;
  3598. if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
  3599. mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
  3600. goto out;
  3601. }
  3602. if (ibdev->ib_active)
  3603. ib_dispatch_event(&ibev);
  3604. if (fatal)
  3605. ibdev->ib_active = false;
  3606. out:
  3607. kfree(work);
  3608. }
  3609. static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
  3610. enum mlx5_dev_event event, unsigned long param)
  3611. {
  3612. struct mlx5_ib_event_work *work;
  3613. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  3614. if (!work)
  3615. return;
  3616. INIT_WORK(&work->work, mlx5_ib_handle_event);
  3617. work->dev = dev;
  3618. work->param = param;
  3619. work->context = context;
  3620. work->event = event;
  3621. queue_work(mlx5_ib_event_wq, &work->work);
  3622. }
  3623. static int set_has_smi_cap(struct mlx5_ib_dev *dev)
  3624. {
  3625. struct mlx5_hca_vport_context vport_ctx;
  3626. int err;
  3627. int port;
  3628. for (port = 1; port <= dev->num_ports; port++) {
  3629. dev->mdev->port_caps[port - 1].has_smi = false;
  3630. if (MLX5_CAP_GEN(dev->mdev, port_type) ==
  3631. MLX5_CAP_PORT_TYPE_IB) {
  3632. if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
  3633. err = mlx5_query_hca_vport_context(dev->mdev, 0,
  3634. port, 0,
  3635. &vport_ctx);
  3636. if (err) {
  3637. mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
  3638. port, err);
  3639. return err;
  3640. }
  3641. dev->mdev->port_caps[port - 1].has_smi =
  3642. vport_ctx.has_smi;
  3643. } else {
  3644. dev->mdev->port_caps[port - 1].has_smi = true;
  3645. }
  3646. }
  3647. }
  3648. return 0;
  3649. }
  3650. static void get_ext_port_caps(struct mlx5_ib_dev *dev)
  3651. {
  3652. int port;
  3653. for (port = 1; port <= dev->num_ports; port++)
  3654. mlx5_query_ext_port_caps(dev, port);
  3655. }
  3656. static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
  3657. {
  3658. struct ib_device_attr *dprops = NULL;
  3659. struct ib_port_attr *pprops = NULL;
  3660. int err = -ENOMEM;
  3661. struct ib_udata uhw = {.inlen = 0, .outlen = 0};
  3662. pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
  3663. if (!pprops)
  3664. goto out;
  3665. dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
  3666. if (!dprops)
  3667. goto out;
  3668. err = set_has_smi_cap(dev);
  3669. if (err)
  3670. goto out;
  3671. err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
  3672. if (err) {
  3673. mlx5_ib_warn(dev, "query_device failed %d\n", err);
  3674. goto out;
  3675. }
  3676. memset(pprops, 0, sizeof(*pprops));
  3677. err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
  3678. if (err) {
  3679. mlx5_ib_warn(dev, "query_port %d failed %d\n",
  3680. port, err);
  3681. goto out;
  3682. }
  3683. dev->mdev->port_caps[port - 1].pkey_table_len =
  3684. dprops->max_pkeys;
  3685. dev->mdev->port_caps[port - 1].gid_table_len =
  3686. pprops->gid_tbl_len;
  3687. mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
  3688. port, dprops->max_pkeys, pprops->gid_tbl_len);
  3689. out:
  3690. kfree(pprops);
  3691. kfree(dprops);
  3692. return err;
  3693. }
  3694. static void destroy_umrc_res(struct mlx5_ib_dev *dev)
  3695. {
  3696. int err;
  3697. err = mlx5_mr_cache_cleanup(dev);
  3698. if (err)
  3699. mlx5_ib_warn(dev, "mr cache cleanup failed\n");
  3700. if (dev->umrc.qp)
  3701. mlx5_ib_destroy_qp(dev->umrc.qp);
  3702. if (dev->umrc.cq)
  3703. ib_free_cq(dev->umrc.cq);
  3704. if (dev->umrc.pd)
  3705. ib_dealloc_pd(dev->umrc.pd);
  3706. }
  3707. enum {
  3708. MAX_UMR_WR = 128,
  3709. };
  3710. static int create_umr_res(struct mlx5_ib_dev *dev)
  3711. {
  3712. struct ib_qp_init_attr *init_attr = NULL;
  3713. struct ib_qp_attr *attr = NULL;
  3714. struct ib_pd *pd;
  3715. struct ib_cq *cq;
  3716. struct ib_qp *qp;
  3717. int ret;
  3718. attr = kzalloc(sizeof(*attr), GFP_KERNEL);
  3719. init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
  3720. if (!attr || !init_attr) {
  3721. ret = -ENOMEM;
  3722. goto error_0;
  3723. }
  3724. pd = ib_alloc_pd(&dev->ib_dev, 0);
  3725. if (IS_ERR(pd)) {
  3726. mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
  3727. ret = PTR_ERR(pd);
  3728. goto error_0;
  3729. }
  3730. cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
  3731. if (IS_ERR(cq)) {
  3732. mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
  3733. ret = PTR_ERR(cq);
  3734. goto error_2;
  3735. }
  3736. init_attr->send_cq = cq;
  3737. init_attr->recv_cq = cq;
  3738. init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
  3739. init_attr->cap.max_send_wr = MAX_UMR_WR;
  3740. init_attr->cap.max_send_sge = 1;
  3741. init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
  3742. init_attr->port_num = 1;
  3743. qp = mlx5_ib_create_qp(pd, init_attr, NULL);
  3744. if (IS_ERR(qp)) {
  3745. mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
  3746. ret = PTR_ERR(qp);
  3747. goto error_3;
  3748. }
  3749. qp->device = &dev->ib_dev;
  3750. qp->real_qp = qp;
  3751. qp->uobject = NULL;
  3752. qp->qp_type = MLX5_IB_QPT_REG_UMR;
  3753. qp->send_cq = init_attr->send_cq;
  3754. qp->recv_cq = init_attr->recv_cq;
  3755. attr->qp_state = IB_QPS_INIT;
  3756. attr->port_num = 1;
  3757. ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
  3758. IB_QP_PORT, NULL);
  3759. if (ret) {
  3760. mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
  3761. goto error_4;
  3762. }
  3763. memset(attr, 0, sizeof(*attr));
  3764. attr->qp_state = IB_QPS_RTR;
  3765. attr->path_mtu = IB_MTU_256;
  3766. ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
  3767. if (ret) {
  3768. mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
  3769. goto error_4;
  3770. }
  3771. memset(attr, 0, sizeof(*attr));
  3772. attr->qp_state = IB_QPS_RTS;
  3773. ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
  3774. if (ret) {
  3775. mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
  3776. goto error_4;
  3777. }
  3778. dev->umrc.qp = qp;
  3779. dev->umrc.cq = cq;
  3780. dev->umrc.pd = pd;
  3781. sema_init(&dev->umrc.sem, MAX_UMR_WR);
  3782. ret = mlx5_mr_cache_init(dev);
  3783. if (ret) {
  3784. mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
  3785. goto error_4;
  3786. }
  3787. kfree(attr);
  3788. kfree(init_attr);
  3789. return 0;
  3790. error_4:
  3791. mlx5_ib_destroy_qp(qp);
  3792. dev->umrc.qp = NULL;
  3793. error_3:
  3794. ib_free_cq(cq);
  3795. dev->umrc.cq = NULL;
  3796. error_2:
  3797. ib_dealloc_pd(pd);
  3798. dev->umrc.pd = NULL;
  3799. error_0:
  3800. kfree(attr);
  3801. kfree(init_attr);
  3802. return ret;
  3803. }
  3804. static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
  3805. {
  3806. switch (umr_fence_cap) {
  3807. case MLX5_CAP_UMR_FENCE_NONE:
  3808. return MLX5_FENCE_MODE_NONE;
  3809. case MLX5_CAP_UMR_FENCE_SMALL:
  3810. return MLX5_FENCE_MODE_INITIATOR_SMALL;
  3811. default:
  3812. return MLX5_FENCE_MODE_STRONG_ORDERING;
  3813. }
  3814. }
  3815. static int create_dev_resources(struct mlx5_ib_resources *devr)
  3816. {
  3817. struct ib_srq_init_attr attr;
  3818. struct mlx5_ib_dev *dev;
  3819. struct ib_cq_init_attr cq_attr = {.cqe = 1};
  3820. int port;
  3821. int ret = 0;
  3822. dev = container_of(devr, struct mlx5_ib_dev, devr);
  3823. mutex_init(&devr->mutex);
  3824. devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
  3825. if (IS_ERR(devr->p0)) {
  3826. ret = PTR_ERR(devr->p0);
  3827. goto error0;
  3828. }
  3829. devr->p0->device = &dev->ib_dev;
  3830. devr->p0->uobject = NULL;
  3831. atomic_set(&devr->p0->usecnt, 0);
  3832. devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
  3833. if (IS_ERR(devr->c0)) {
  3834. ret = PTR_ERR(devr->c0);
  3835. goto error1;
  3836. }
  3837. devr->c0->device = &dev->ib_dev;
  3838. devr->c0->uobject = NULL;
  3839. devr->c0->comp_handler = NULL;
  3840. devr->c0->event_handler = NULL;
  3841. devr->c0->cq_context = NULL;
  3842. atomic_set(&devr->c0->usecnt, 0);
  3843. devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
  3844. if (IS_ERR(devr->x0)) {
  3845. ret = PTR_ERR(devr->x0);
  3846. goto error2;
  3847. }
  3848. devr->x0->device = &dev->ib_dev;
  3849. devr->x0->inode = NULL;
  3850. atomic_set(&devr->x0->usecnt, 0);
  3851. mutex_init(&devr->x0->tgt_qp_mutex);
  3852. INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
  3853. devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
  3854. if (IS_ERR(devr->x1)) {
  3855. ret = PTR_ERR(devr->x1);
  3856. goto error3;
  3857. }
  3858. devr->x1->device = &dev->ib_dev;
  3859. devr->x1->inode = NULL;
  3860. atomic_set(&devr->x1->usecnt, 0);
  3861. mutex_init(&devr->x1->tgt_qp_mutex);
  3862. INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
  3863. memset(&attr, 0, sizeof(attr));
  3864. attr.attr.max_sge = 1;
  3865. attr.attr.max_wr = 1;
  3866. attr.srq_type = IB_SRQT_XRC;
  3867. attr.ext.cq = devr->c0;
  3868. attr.ext.xrc.xrcd = devr->x0;
  3869. devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
  3870. if (IS_ERR(devr->s0)) {
  3871. ret = PTR_ERR(devr->s0);
  3872. goto error4;
  3873. }
  3874. devr->s0->device = &dev->ib_dev;
  3875. devr->s0->pd = devr->p0;
  3876. devr->s0->uobject = NULL;
  3877. devr->s0->event_handler = NULL;
  3878. devr->s0->srq_context = NULL;
  3879. devr->s0->srq_type = IB_SRQT_XRC;
  3880. devr->s0->ext.xrc.xrcd = devr->x0;
  3881. devr->s0->ext.cq = devr->c0;
  3882. atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
  3883. atomic_inc(&devr->s0->ext.cq->usecnt);
  3884. atomic_inc(&devr->p0->usecnt);
  3885. atomic_set(&devr->s0->usecnt, 0);
  3886. memset(&attr, 0, sizeof(attr));
  3887. attr.attr.max_sge = 1;
  3888. attr.attr.max_wr = 1;
  3889. attr.srq_type = IB_SRQT_BASIC;
  3890. devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
  3891. if (IS_ERR(devr->s1)) {
  3892. ret = PTR_ERR(devr->s1);
  3893. goto error5;
  3894. }
  3895. devr->s1->device = &dev->ib_dev;
  3896. devr->s1->pd = devr->p0;
  3897. devr->s1->uobject = NULL;
  3898. devr->s1->event_handler = NULL;
  3899. devr->s1->srq_context = NULL;
  3900. devr->s1->srq_type = IB_SRQT_BASIC;
  3901. devr->s1->ext.cq = devr->c0;
  3902. atomic_inc(&devr->p0->usecnt);
  3903. atomic_set(&devr->s1->usecnt, 0);
  3904. for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
  3905. INIT_WORK(&devr->ports[port].pkey_change_work,
  3906. pkey_change_handler);
  3907. devr->ports[port].devr = devr;
  3908. }
  3909. return 0;
  3910. error5:
  3911. mlx5_ib_destroy_srq(devr->s0);
  3912. error4:
  3913. mlx5_ib_dealloc_xrcd(devr->x1);
  3914. error3:
  3915. mlx5_ib_dealloc_xrcd(devr->x0);
  3916. error2:
  3917. mlx5_ib_destroy_cq(devr->c0);
  3918. error1:
  3919. mlx5_ib_dealloc_pd(devr->p0);
  3920. error0:
  3921. return ret;
  3922. }
  3923. static void destroy_dev_resources(struct mlx5_ib_resources *devr)
  3924. {
  3925. struct mlx5_ib_dev *dev =
  3926. container_of(devr, struct mlx5_ib_dev, devr);
  3927. int port;
  3928. mlx5_ib_destroy_srq(devr->s1);
  3929. mlx5_ib_destroy_srq(devr->s0);
  3930. mlx5_ib_dealloc_xrcd(devr->x0);
  3931. mlx5_ib_dealloc_xrcd(devr->x1);
  3932. mlx5_ib_destroy_cq(devr->c0);
  3933. mlx5_ib_dealloc_pd(devr->p0);
  3934. /* Make sure no change P_Key work items are still executing */
  3935. for (port = 0; port < dev->num_ports; ++port)
  3936. cancel_work_sync(&devr->ports[port].pkey_change_work);
  3937. }
  3938. static u32 get_core_cap_flags(struct ib_device *ibdev,
  3939. struct mlx5_hca_vport_context *rep)
  3940. {
  3941. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  3942. enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
  3943. u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
  3944. u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
  3945. bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
  3946. u32 ret = 0;
  3947. if (rep->grh_required)
  3948. ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
  3949. if (ll == IB_LINK_LAYER_INFINIBAND)
  3950. return ret | RDMA_CORE_PORT_IBA_IB;
  3951. if (raw_support)
  3952. ret |= RDMA_CORE_PORT_RAW_PACKET;
  3953. if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
  3954. return ret;
  3955. if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
  3956. return ret;
  3957. if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
  3958. ret |= RDMA_CORE_PORT_IBA_ROCE;
  3959. if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
  3960. ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
  3961. return ret;
  3962. }
  3963. static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
  3964. struct ib_port_immutable *immutable)
  3965. {
  3966. struct ib_port_attr attr;
  3967. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  3968. enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
  3969. struct mlx5_hca_vport_context rep = {0};
  3970. int err;
  3971. err = ib_query_port(ibdev, port_num, &attr);
  3972. if (err)
  3973. return err;
  3974. if (ll == IB_LINK_LAYER_INFINIBAND) {
  3975. err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
  3976. &rep);
  3977. if (err)
  3978. return err;
  3979. }
  3980. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  3981. immutable->gid_tbl_len = attr.gid_tbl_len;
  3982. immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
  3983. if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
  3984. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  3985. return 0;
  3986. }
  3987. static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
  3988. struct ib_port_immutable *immutable)
  3989. {
  3990. struct ib_port_attr attr;
  3991. int err;
  3992. immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
  3993. err = ib_query_port(ibdev, port_num, &attr);
  3994. if (err)
  3995. return err;
  3996. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  3997. immutable->gid_tbl_len = attr.gid_tbl_len;
  3998. immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
  3999. return 0;
  4000. }
  4001. static void get_dev_fw_str(struct ib_device *ibdev, char *str)
  4002. {
  4003. struct mlx5_ib_dev *dev =
  4004. container_of(ibdev, struct mlx5_ib_dev, ib_dev);
  4005. snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
  4006. fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
  4007. fw_rev_sub(dev->mdev));
  4008. }
  4009. static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
  4010. {
  4011. struct mlx5_core_dev *mdev = dev->mdev;
  4012. struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
  4013. MLX5_FLOW_NAMESPACE_LAG);
  4014. struct mlx5_flow_table *ft;
  4015. int err;
  4016. if (!ns || !mlx5_lag_is_active(mdev))
  4017. return 0;
  4018. err = mlx5_cmd_create_vport_lag(mdev);
  4019. if (err)
  4020. return err;
  4021. ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
  4022. if (IS_ERR(ft)) {
  4023. err = PTR_ERR(ft);
  4024. goto err_destroy_vport_lag;
  4025. }
  4026. dev->flow_db->lag_demux_ft = ft;
  4027. return 0;
  4028. err_destroy_vport_lag:
  4029. mlx5_cmd_destroy_vport_lag(mdev);
  4030. return err;
  4031. }
  4032. static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
  4033. {
  4034. struct mlx5_core_dev *mdev = dev->mdev;
  4035. if (dev->flow_db->lag_demux_ft) {
  4036. mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
  4037. dev->flow_db->lag_demux_ft = NULL;
  4038. mlx5_cmd_destroy_vport_lag(mdev);
  4039. }
  4040. }
  4041. static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
  4042. {
  4043. int err;
  4044. dev->roce[port_num].nb.notifier_call = mlx5_netdev_event;
  4045. err = register_netdevice_notifier(&dev->roce[port_num].nb);
  4046. if (err) {
  4047. dev->roce[port_num].nb.notifier_call = NULL;
  4048. return err;
  4049. }
  4050. return 0;
  4051. }
  4052. static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
  4053. {
  4054. if (dev->roce[port_num].nb.notifier_call) {
  4055. unregister_netdevice_notifier(&dev->roce[port_num].nb);
  4056. dev->roce[port_num].nb.notifier_call = NULL;
  4057. }
  4058. }
  4059. static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
  4060. {
  4061. int err;
  4062. if (MLX5_CAP_GEN(dev->mdev, roce)) {
  4063. err = mlx5_nic_vport_enable_roce(dev->mdev);
  4064. if (err)
  4065. return err;
  4066. }
  4067. err = mlx5_eth_lag_init(dev);
  4068. if (err)
  4069. goto err_disable_roce;
  4070. return 0;
  4071. err_disable_roce:
  4072. if (MLX5_CAP_GEN(dev->mdev, roce))
  4073. mlx5_nic_vport_disable_roce(dev->mdev);
  4074. return err;
  4075. }
  4076. static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
  4077. {
  4078. mlx5_eth_lag_cleanup(dev);
  4079. if (MLX5_CAP_GEN(dev->mdev, roce))
  4080. mlx5_nic_vport_disable_roce(dev->mdev);
  4081. }
  4082. struct mlx5_ib_counter {
  4083. const char *name;
  4084. size_t offset;
  4085. };
  4086. #define INIT_Q_COUNTER(_name) \
  4087. { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
  4088. static const struct mlx5_ib_counter basic_q_cnts[] = {
  4089. INIT_Q_COUNTER(rx_write_requests),
  4090. INIT_Q_COUNTER(rx_read_requests),
  4091. INIT_Q_COUNTER(rx_atomic_requests),
  4092. INIT_Q_COUNTER(out_of_buffer),
  4093. };
  4094. static const struct mlx5_ib_counter out_of_seq_q_cnts[] = {
  4095. INIT_Q_COUNTER(out_of_sequence),
  4096. };
  4097. static const struct mlx5_ib_counter retrans_q_cnts[] = {
  4098. INIT_Q_COUNTER(duplicate_request),
  4099. INIT_Q_COUNTER(rnr_nak_retry_err),
  4100. INIT_Q_COUNTER(packet_seq_err),
  4101. INIT_Q_COUNTER(implied_nak_seq_err),
  4102. INIT_Q_COUNTER(local_ack_timeout_err),
  4103. };
  4104. #define INIT_CONG_COUNTER(_name) \
  4105. { .name = #_name, .offset = \
  4106. MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
  4107. static const struct mlx5_ib_counter cong_cnts[] = {
  4108. INIT_CONG_COUNTER(rp_cnp_ignored),
  4109. INIT_CONG_COUNTER(rp_cnp_handled),
  4110. INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
  4111. INIT_CONG_COUNTER(np_cnp_sent),
  4112. };
  4113. static const struct mlx5_ib_counter extended_err_cnts[] = {
  4114. INIT_Q_COUNTER(resp_local_length_error),
  4115. INIT_Q_COUNTER(resp_cqe_error),
  4116. INIT_Q_COUNTER(req_cqe_error),
  4117. INIT_Q_COUNTER(req_remote_invalid_request),
  4118. INIT_Q_COUNTER(req_remote_access_errors),
  4119. INIT_Q_COUNTER(resp_remote_access_errors),
  4120. INIT_Q_COUNTER(resp_cqe_flush_error),
  4121. INIT_Q_COUNTER(req_cqe_flush_error),
  4122. };
  4123. #define INIT_EXT_PPCNT_COUNTER(_name) \
  4124. { .name = #_name, .offset = \
  4125. MLX5_BYTE_OFF(ppcnt_reg, \
  4126. counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
  4127. static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
  4128. INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
  4129. };
  4130. static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
  4131. {
  4132. int i;
  4133. for (i = 0; i < dev->num_ports; i++) {
  4134. if (dev->port[i].cnts.set_id_valid)
  4135. mlx5_core_dealloc_q_counter(dev->mdev,
  4136. dev->port[i].cnts.set_id);
  4137. kfree(dev->port[i].cnts.names);
  4138. kfree(dev->port[i].cnts.offsets);
  4139. }
  4140. }
  4141. static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
  4142. struct mlx5_ib_counters *cnts)
  4143. {
  4144. u32 num_counters;
  4145. num_counters = ARRAY_SIZE(basic_q_cnts);
  4146. if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
  4147. num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
  4148. if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
  4149. num_counters += ARRAY_SIZE(retrans_q_cnts);
  4150. if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
  4151. num_counters += ARRAY_SIZE(extended_err_cnts);
  4152. cnts->num_q_counters = num_counters;
  4153. if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
  4154. cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
  4155. num_counters += ARRAY_SIZE(cong_cnts);
  4156. }
  4157. if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
  4158. cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
  4159. num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
  4160. }
  4161. cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
  4162. if (!cnts->names)
  4163. return -ENOMEM;
  4164. cnts->offsets = kcalloc(num_counters,
  4165. sizeof(cnts->offsets), GFP_KERNEL);
  4166. if (!cnts->offsets)
  4167. goto err_names;
  4168. return 0;
  4169. err_names:
  4170. kfree(cnts->names);
  4171. cnts->names = NULL;
  4172. return -ENOMEM;
  4173. }
  4174. static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
  4175. const char **names,
  4176. size_t *offsets)
  4177. {
  4178. int i;
  4179. int j = 0;
  4180. for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
  4181. names[j] = basic_q_cnts[i].name;
  4182. offsets[j] = basic_q_cnts[i].offset;
  4183. }
  4184. if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
  4185. for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
  4186. names[j] = out_of_seq_q_cnts[i].name;
  4187. offsets[j] = out_of_seq_q_cnts[i].offset;
  4188. }
  4189. }
  4190. if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
  4191. for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
  4192. names[j] = retrans_q_cnts[i].name;
  4193. offsets[j] = retrans_q_cnts[i].offset;
  4194. }
  4195. }
  4196. if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
  4197. for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
  4198. names[j] = extended_err_cnts[i].name;
  4199. offsets[j] = extended_err_cnts[i].offset;
  4200. }
  4201. }
  4202. if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
  4203. for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
  4204. names[j] = cong_cnts[i].name;
  4205. offsets[j] = cong_cnts[i].offset;
  4206. }
  4207. }
  4208. if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
  4209. for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) {
  4210. names[j] = ext_ppcnt_cnts[i].name;
  4211. offsets[j] = ext_ppcnt_cnts[i].offset;
  4212. }
  4213. }
  4214. }
  4215. static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
  4216. {
  4217. int err = 0;
  4218. int i;
  4219. for (i = 0; i < dev->num_ports; i++) {
  4220. err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
  4221. if (err)
  4222. goto err_alloc;
  4223. mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
  4224. dev->port[i].cnts.offsets);
  4225. err = mlx5_core_alloc_q_counter(dev->mdev,
  4226. &dev->port[i].cnts.set_id);
  4227. if (err) {
  4228. mlx5_ib_warn(dev,
  4229. "couldn't allocate queue counter for port %d, err %d\n",
  4230. i + 1, err);
  4231. goto err_alloc;
  4232. }
  4233. dev->port[i].cnts.set_id_valid = true;
  4234. }
  4235. return 0;
  4236. err_alloc:
  4237. mlx5_ib_dealloc_counters(dev);
  4238. return err;
  4239. }
  4240. static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
  4241. u8 port_num)
  4242. {
  4243. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  4244. struct mlx5_ib_port *port = &dev->port[port_num - 1];
  4245. /* We support only per port stats */
  4246. if (port_num == 0)
  4247. return NULL;
  4248. return rdma_alloc_hw_stats_struct(port->cnts.names,
  4249. port->cnts.num_q_counters +
  4250. port->cnts.num_cong_counters +
  4251. port->cnts.num_ext_ppcnt_counters,
  4252. RDMA_HW_STATS_DEFAULT_LIFESPAN);
  4253. }
  4254. static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
  4255. struct mlx5_ib_port *port,
  4256. struct rdma_hw_stats *stats)
  4257. {
  4258. int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
  4259. void *out;
  4260. __be32 val;
  4261. int ret, i;
  4262. out = kvzalloc(outlen, GFP_KERNEL);
  4263. if (!out)
  4264. return -ENOMEM;
  4265. ret = mlx5_core_query_q_counter(mdev,
  4266. port->cnts.set_id, 0,
  4267. out, outlen);
  4268. if (ret)
  4269. goto free;
  4270. for (i = 0; i < port->cnts.num_q_counters; i++) {
  4271. val = *(__be32 *)(out + port->cnts.offsets[i]);
  4272. stats->value[i] = (u64)be32_to_cpu(val);
  4273. }
  4274. free:
  4275. kvfree(out);
  4276. return ret;
  4277. }
  4278. static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
  4279. struct mlx5_ib_port *port,
  4280. struct rdma_hw_stats *stats)
  4281. {
  4282. int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters;
  4283. int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
  4284. int ret, i;
  4285. void *out;
  4286. out = kvzalloc(sz, GFP_KERNEL);
  4287. if (!out)
  4288. return -ENOMEM;
  4289. ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out);
  4290. if (ret)
  4291. goto free;
  4292. for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) {
  4293. stats->value[i + offset] =
  4294. be64_to_cpup((__be64 *)(out +
  4295. port->cnts.offsets[i + offset]));
  4296. }
  4297. free:
  4298. kvfree(out);
  4299. return ret;
  4300. }
  4301. static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
  4302. struct rdma_hw_stats *stats,
  4303. u8 port_num, int index)
  4304. {
  4305. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  4306. struct mlx5_ib_port *port = &dev->port[port_num - 1];
  4307. struct mlx5_core_dev *mdev;
  4308. int ret, num_counters;
  4309. u8 mdev_port_num;
  4310. if (!stats)
  4311. return -EINVAL;
  4312. num_counters = port->cnts.num_q_counters +
  4313. port->cnts.num_cong_counters +
  4314. port->cnts.num_ext_ppcnt_counters;
  4315. /* q_counters are per IB device, query the master mdev */
  4316. ret = mlx5_ib_query_q_counters(dev->mdev, port, stats);
  4317. if (ret)
  4318. return ret;
  4319. if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
  4320. ret = mlx5_ib_query_ext_ppcnt_counters(dev, port, stats);
  4321. if (ret)
  4322. return ret;
  4323. }
  4324. if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
  4325. mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
  4326. &mdev_port_num);
  4327. if (!mdev) {
  4328. /* If port is not affiliated yet, its in down state
  4329. * which doesn't have any counters yet, so it would be
  4330. * zero. So no need to read from the HCA.
  4331. */
  4332. goto done;
  4333. }
  4334. ret = mlx5_lag_query_cong_counters(dev->mdev,
  4335. stats->value +
  4336. port->cnts.num_q_counters,
  4337. port->cnts.num_cong_counters,
  4338. port->cnts.offsets +
  4339. port->cnts.num_q_counters);
  4340. mlx5_ib_put_native_port_mdev(dev, port_num);
  4341. if (ret)
  4342. return ret;
  4343. }
  4344. done:
  4345. return num_counters;
  4346. }
  4347. static struct net_device*
  4348. mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
  4349. u8 port_num,
  4350. enum rdma_netdev_t type,
  4351. const char *name,
  4352. unsigned char name_assign_type,
  4353. void (*setup)(struct net_device *))
  4354. {
  4355. struct net_device *netdev;
  4356. if (type != RDMA_NETDEV_IPOIB)
  4357. return ERR_PTR(-EOPNOTSUPP);
  4358. netdev = mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
  4359. name, setup);
  4360. return netdev;
  4361. }
  4362. static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
  4363. {
  4364. if (!dev->delay_drop.dbg)
  4365. return;
  4366. debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
  4367. kfree(dev->delay_drop.dbg);
  4368. dev->delay_drop.dbg = NULL;
  4369. }
  4370. static void cancel_delay_drop(struct mlx5_ib_dev *dev)
  4371. {
  4372. if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
  4373. return;
  4374. cancel_work_sync(&dev->delay_drop.delay_drop_work);
  4375. delay_drop_debugfs_cleanup(dev);
  4376. }
  4377. static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
  4378. size_t count, loff_t *pos)
  4379. {
  4380. struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
  4381. char lbuf[20];
  4382. int len;
  4383. len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
  4384. return simple_read_from_buffer(buf, count, pos, lbuf, len);
  4385. }
  4386. static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
  4387. size_t count, loff_t *pos)
  4388. {
  4389. struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
  4390. u32 timeout;
  4391. u32 var;
  4392. if (kstrtouint_from_user(buf, count, 0, &var))
  4393. return -EFAULT;
  4394. timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
  4395. 1000);
  4396. if (timeout != var)
  4397. mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
  4398. timeout);
  4399. delay_drop->timeout = timeout;
  4400. return count;
  4401. }
  4402. static const struct file_operations fops_delay_drop_timeout = {
  4403. .owner = THIS_MODULE,
  4404. .open = simple_open,
  4405. .write = delay_drop_timeout_write,
  4406. .read = delay_drop_timeout_read,
  4407. };
  4408. static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
  4409. {
  4410. struct mlx5_ib_dbg_delay_drop *dbg;
  4411. if (!mlx5_debugfs_root)
  4412. return 0;
  4413. dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
  4414. if (!dbg)
  4415. return -ENOMEM;
  4416. dev->delay_drop.dbg = dbg;
  4417. dbg->dir_debugfs =
  4418. debugfs_create_dir("delay_drop",
  4419. dev->mdev->priv.dbg_root);
  4420. if (!dbg->dir_debugfs)
  4421. goto out_debugfs;
  4422. dbg->events_cnt_debugfs =
  4423. debugfs_create_atomic_t("num_timeout_events", 0400,
  4424. dbg->dir_debugfs,
  4425. &dev->delay_drop.events_cnt);
  4426. if (!dbg->events_cnt_debugfs)
  4427. goto out_debugfs;
  4428. dbg->rqs_cnt_debugfs =
  4429. debugfs_create_atomic_t("num_rqs", 0400,
  4430. dbg->dir_debugfs,
  4431. &dev->delay_drop.rqs_cnt);
  4432. if (!dbg->rqs_cnt_debugfs)
  4433. goto out_debugfs;
  4434. dbg->timeout_debugfs =
  4435. debugfs_create_file("timeout", 0600,
  4436. dbg->dir_debugfs,
  4437. &dev->delay_drop,
  4438. &fops_delay_drop_timeout);
  4439. if (!dbg->timeout_debugfs)
  4440. goto out_debugfs;
  4441. return 0;
  4442. out_debugfs:
  4443. delay_drop_debugfs_cleanup(dev);
  4444. return -ENOMEM;
  4445. }
  4446. static void init_delay_drop(struct mlx5_ib_dev *dev)
  4447. {
  4448. if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
  4449. return;
  4450. mutex_init(&dev->delay_drop.lock);
  4451. dev->delay_drop.dev = dev;
  4452. dev->delay_drop.activate = false;
  4453. dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
  4454. INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
  4455. atomic_set(&dev->delay_drop.rqs_cnt, 0);
  4456. atomic_set(&dev->delay_drop.events_cnt, 0);
  4457. if (delay_drop_debugfs_init(dev))
  4458. mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
  4459. }
  4460. static const struct cpumask *
  4461. mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
  4462. {
  4463. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  4464. return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
  4465. }
  4466. /* The mlx5_ib_multiport_mutex should be held when calling this function */
  4467. static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
  4468. struct mlx5_ib_multiport_info *mpi)
  4469. {
  4470. u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
  4471. struct mlx5_ib_port *port = &ibdev->port[port_num];
  4472. int comps;
  4473. int err;
  4474. int i;
  4475. mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
  4476. spin_lock(&port->mp.mpi_lock);
  4477. if (!mpi->ibdev) {
  4478. spin_unlock(&port->mp.mpi_lock);
  4479. return;
  4480. }
  4481. mpi->ibdev = NULL;
  4482. spin_unlock(&port->mp.mpi_lock);
  4483. mlx5_remove_netdev_notifier(ibdev, port_num);
  4484. spin_lock(&port->mp.mpi_lock);
  4485. comps = mpi->mdev_refcnt;
  4486. if (comps) {
  4487. mpi->unaffiliate = true;
  4488. init_completion(&mpi->unref_comp);
  4489. spin_unlock(&port->mp.mpi_lock);
  4490. for (i = 0; i < comps; i++)
  4491. wait_for_completion(&mpi->unref_comp);
  4492. spin_lock(&port->mp.mpi_lock);
  4493. mpi->unaffiliate = false;
  4494. }
  4495. port->mp.mpi = NULL;
  4496. list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
  4497. spin_unlock(&port->mp.mpi_lock);
  4498. err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
  4499. mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
  4500. /* Log an error, still needed to cleanup the pointers and add
  4501. * it back to the list.
  4502. */
  4503. if (err)
  4504. mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
  4505. port_num + 1);
  4506. ibdev->roce[port_num].last_port_state = IB_PORT_DOWN;
  4507. }
  4508. /* The mlx5_ib_multiport_mutex should be held when calling this function */
  4509. static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
  4510. struct mlx5_ib_multiport_info *mpi)
  4511. {
  4512. u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
  4513. int err;
  4514. spin_lock(&ibdev->port[port_num].mp.mpi_lock);
  4515. if (ibdev->port[port_num].mp.mpi) {
  4516. mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
  4517. port_num + 1);
  4518. spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
  4519. return false;
  4520. }
  4521. ibdev->port[port_num].mp.mpi = mpi;
  4522. mpi->ibdev = ibdev;
  4523. spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
  4524. err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
  4525. if (err)
  4526. goto unbind;
  4527. err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
  4528. if (err)
  4529. goto unbind;
  4530. err = mlx5_add_netdev_notifier(ibdev, port_num);
  4531. if (err) {
  4532. mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
  4533. port_num + 1);
  4534. goto unbind;
  4535. }
  4536. err = mlx5_ib_init_cong_debugfs(ibdev, port_num);
  4537. if (err)
  4538. goto unbind;
  4539. return true;
  4540. unbind:
  4541. mlx5_ib_unbind_slave_port(ibdev, mpi);
  4542. return false;
  4543. }
  4544. static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
  4545. {
  4546. int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
  4547. enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
  4548. port_num + 1);
  4549. struct mlx5_ib_multiport_info *mpi;
  4550. int err;
  4551. int i;
  4552. if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
  4553. return 0;
  4554. err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
  4555. &dev->sys_image_guid);
  4556. if (err)
  4557. return err;
  4558. err = mlx5_nic_vport_enable_roce(dev->mdev);
  4559. if (err)
  4560. return err;
  4561. mutex_lock(&mlx5_ib_multiport_mutex);
  4562. for (i = 0; i < dev->num_ports; i++) {
  4563. bool bound = false;
  4564. /* build a stub multiport info struct for the native port. */
  4565. if (i == port_num) {
  4566. mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
  4567. if (!mpi) {
  4568. mutex_unlock(&mlx5_ib_multiport_mutex);
  4569. mlx5_nic_vport_disable_roce(dev->mdev);
  4570. return -ENOMEM;
  4571. }
  4572. mpi->is_master = true;
  4573. mpi->mdev = dev->mdev;
  4574. mpi->sys_image_guid = dev->sys_image_guid;
  4575. dev->port[i].mp.mpi = mpi;
  4576. mpi->ibdev = dev;
  4577. mpi = NULL;
  4578. continue;
  4579. }
  4580. list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
  4581. list) {
  4582. if (dev->sys_image_guid == mpi->sys_image_guid &&
  4583. (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
  4584. bound = mlx5_ib_bind_slave_port(dev, mpi);
  4585. }
  4586. if (bound) {
  4587. dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
  4588. mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
  4589. list_del(&mpi->list);
  4590. break;
  4591. }
  4592. }
  4593. if (!bound) {
  4594. get_port_caps(dev, i + 1);
  4595. mlx5_ib_dbg(dev, "no free port found for port %d\n",
  4596. i + 1);
  4597. }
  4598. }
  4599. list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
  4600. mutex_unlock(&mlx5_ib_multiport_mutex);
  4601. return err;
  4602. }
  4603. static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
  4604. {
  4605. int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
  4606. enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
  4607. port_num + 1);
  4608. int i;
  4609. if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
  4610. return;
  4611. mutex_lock(&mlx5_ib_multiport_mutex);
  4612. for (i = 0; i < dev->num_ports; i++) {
  4613. if (dev->port[i].mp.mpi) {
  4614. /* Destroy the native port stub */
  4615. if (i == port_num) {
  4616. kfree(dev->port[i].mp.mpi);
  4617. dev->port[i].mp.mpi = NULL;
  4618. } else {
  4619. mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
  4620. mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
  4621. }
  4622. }
  4623. }
  4624. mlx5_ib_dbg(dev, "removing from devlist\n");
  4625. list_del(&dev->ib_dev_list);
  4626. mutex_unlock(&mlx5_ib_multiport_mutex);
  4627. mlx5_nic_vport_disable_roce(dev->mdev);
  4628. }
  4629. ADD_UVERBS_ATTRIBUTES_SIMPLE(
  4630. mlx5_ib_dm,
  4631. UVERBS_OBJECT_DM,
  4632. UVERBS_METHOD_DM_ALLOC,
  4633. UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
  4634. UVERBS_ATTR_TYPE(u64),
  4635. UA_MANDATORY),
  4636. UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
  4637. UVERBS_ATTR_TYPE(u16),
  4638. UA_MANDATORY));
  4639. ADD_UVERBS_ATTRIBUTES_SIMPLE(
  4640. mlx5_ib_flow_action,
  4641. UVERBS_OBJECT_FLOW_ACTION,
  4642. UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
  4643. UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
  4644. enum mlx5_ib_uapi_flow_action_flags));
  4645. #define NUM_TREES 5
  4646. static int populate_specs_root(struct mlx5_ib_dev *dev)
  4647. {
  4648. const struct uverbs_object_tree_def *default_root[NUM_TREES + 1] = {
  4649. uverbs_default_get_objects()};
  4650. size_t num_trees = 1;
  4651. if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
  4652. !WARN_ON(num_trees >= ARRAY_SIZE(default_root)))
  4653. default_root[num_trees++] = &mlx5_ib_flow_action;
  4654. if (MLX5_CAP_DEV_MEM(dev->mdev, memic) &&
  4655. !WARN_ON(num_trees >= ARRAY_SIZE(default_root)))
  4656. default_root[num_trees++] = &mlx5_ib_dm;
  4657. if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
  4658. MLX5_GENERAL_OBJ_TYPES_CAP_UCTX &&
  4659. !WARN_ON(num_trees >= ARRAY_SIZE(default_root)))
  4660. default_root[num_trees++] = mlx5_ib_get_devx_tree();
  4661. num_trees += mlx5_ib_get_flow_trees(default_root + num_trees);
  4662. dev->ib_dev.driver_specs_root =
  4663. uverbs_alloc_spec_tree(num_trees, default_root);
  4664. return PTR_ERR_OR_ZERO(dev->ib_dev.driver_specs_root);
  4665. }
  4666. static void depopulate_specs_root(struct mlx5_ib_dev *dev)
  4667. {
  4668. uverbs_free_spec_tree(dev->ib_dev.driver_specs_root);
  4669. }
  4670. static int mlx5_ib_read_counters(struct ib_counters *counters,
  4671. struct ib_counters_read_attr *read_attr,
  4672. struct uverbs_attr_bundle *attrs)
  4673. {
  4674. struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
  4675. struct mlx5_read_counters_attr mread_attr = {};
  4676. struct mlx5_ib_flow_counters_desc *desc;
  4677. int ret, i;
  4678. mutex_lock(&mcounters->mcntrs_mutex);
  4679. if (mcounters->cntrs_max_index > read_attr->ncounters) {
  4680. ret = -EINVAL;
  4681. goto err_bound;
  4682. }
  4683. mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64),
  4684. GFP_KERNEL);
  4685. if (!mread_attr.out) {
  4686. ret = -ENOMEM;
  4687. goto err_bound;
  4688. }
  4689. mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl;
  4690. mread_attr.flags = read_attr->flags;
  4691. ret = mcounters->read_counters(counters->device, &mread_attr);
  4692. if (ret)
  4693. goto err_read;
  4694. /* do the pass over the counters data array to assign according to the
  4695. * descriptions and indexing pairs
  4696. */
  4697. desc = mcounters->counters_data;
  4698. for (i = 0; i < mcounters->ncounters; i++)
  4699. read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description];
  4700. err_read:
  4701. kfree(mread_attr.out);
  4702. err_bound:
  4703. mutex_unlock(&mcounters->mcntrs_mutex);
  4704. return ret;
  4705. }
  4706. static int mlx5_ib_destroy_counters(struct ib_counters *counters)
  4707. {
  4708. struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
  4709. counters_clear_description(counters);
  4710. if (mcounters->hw_cntrs_hndl)
  4711. mlx5_fc_destroy(to_mdev(counters->device)->mdev,
  4712. mcounters->hw_cntrs_hndl);
  4713. kfree(mcounters);
  4714. return 0;
  4715. }
  4716. static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
  4717. struct uverbs_attr_bundle *attrs)
  4718. {
  4719. struct mlx5_ib_mcounters *mcounters;
  4720. mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL);
  4721. if (!mcounters)
  4722. return ERR_PTR(-ENOMEM);
  4723. mutex_init(&mcounters->mcntrs_mutex);
  4724. return &mcounters->ibcntrs;
  4725. }
  4726. void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
  4727. {
  4728. mlx5_ib_cleanup_multiport_master(dev);
  4729. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  4730. cleanup_srcu_struct(&dev->mr_srcu);
  4731. #endif
  4732. kfree(dev->port);
  4733. }
  4734. int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
  4735. {
  4736. struct mlx5_core_dev *mdev = dev->mdev;
  4737. const char *name;
  4738. int err;
  4739. int i;
  4740. dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
  4741. GFP_KERNEL);
  4742. if (!dev->port)
  4743. return -ENOMEM;
  4744. for (i = 0; i < dev->num_ports; i++) {
  4745. spin_lock_init(&dev->port[i].mp.mpi_lock);
  4746. rwlock_init(&dev->roce[i].netdev_lock);
  4747. }
  4748. err = mlx5_ib_init_multiport_master(dev);
  4749. if (err)
  4750. goto err_free_port;
  4751. if (!mlx5_core_mp_enabled(mdev)) {
  4752. for (i = 1; i <= dev->num_ports; i++) {
  4753. err = get_port_caps(dev, i);
  4754. if (err)
  4755. break;
  4756. }
  4757. } else {
  4758. err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
  4759. }
  4760. if (err)
  4761. goto err_mp;
  4762. if (mlx5_use_mad_ifc(dev))
  4763. get_ext_port_caps(dev);
  4764. if (!mlx5_lag_is_active(mdev))
  4765. name = "mlx5_%d";
  4766. else
  4767. name = "mlx5_bond_%d";
  4768. strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
  4769. dev->ib_dev.owner = THIS_MODULE;
  4770. dev->ib_dev.node_type = RDMA_NODE_IB_CA;
  4771. dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
  4772. dev->ib_dev.phys_port_cnt = dev->num_ports;
  4773. dev->ib_dev.num_comp_vectors =
  4774. dev->mdev->priv.eq_table.num_comp_vectors;
  4775. dev->ib_dev.dev.parent = &mdev->pdev->dev;
  4776. mutex_init(&dev->cap_mask_mutex);
  4777. INIT_LIST_HEAD(&dev->qp_list);
  4778. spin_lock_init(&dev->reset_flow_resource_lock);
  4779. spin_lock_init(&dev->memic.memic_lock);
  4780. dev->memic.dev = mdev;
  4781. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  4782. err = init_srcu_struct(&dev->mr_srcu);
  4783. if (err)
  4784. goto err_free_port;
  4785. #endif
  4786. return 0;
  4787. err_mp:
  4788. mlx5_ib_cleanup_multiport_master(dev);
  4789. err_free_port:
  4790. kfree(dev->port);
  4791. return -ENOMEM;
  4792. }
  4793. static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
  4794. {
  4795. dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
  4796. if (!dev->flow_db)
  4797. return -ENOMEM;
  4798. mutex_init(&dev->flow_db->lock);
  4799. return 0;
  4800. }
  4801. int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev)
  4802. {
  4803. struct mlx5_ib_dev *nic_dev;
  4804. nic_dev = mlx5_ib_get_uplink_ibdev(dev->mdev->priv.eswitch);
  4805. if (!nic_dev)
  4806. return -EINVAL;
  4807. dev->flow_db = nic_dev->flow_db;
  4808. return 0;
  4809. }
  4810. static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
  4811. {
  4812. kfree(dev->flow_db);
  4813. }
  4814. int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
  4815. {
  4816. struct mlx5_core_dev *mdev = dev->mdev;
  4817. int err;
  4818. dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
  4819. dev->ib_dev.uverbs_cmd_mask =
  4820. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  4821. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  4822. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  4823. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  4824. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  4825. (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
  4826. (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
  4827. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  4828. (1ull << IB_USER_VERBS_CMD_REREG_MR) |
  4829. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  4830. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  4831. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  4832. (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
  4833. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  4834. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  4835. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  4836. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  4837. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  4838. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  4839. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
  4840. (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
  4841. (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
  4842. (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
  4843. (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
  4844. (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
  4845. (1ull << IB_USER_VERBS_CMD_OPEN_QP);
  4846. dev->ib_dev.uverbs_ex_cmd_mask =
  4847. (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
  4848. (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
  4849. (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
  4850. (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) |
  4851. (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
  4852. dev->ib_dev.query_device = mlx5_ib_query_device;
  4853. dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
  4854. dev->ib_dev.query_gid = mlx5_ib_query_gid;
  4855. dev->ib_dev.add_gid = mlx5_ib_add_gid;
  4856. dev->ib_dev.del_gid = mlx5_ib_del_gid;
  4857. dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
  4858. dev->ib_dev.modify_device = mlx5_ib_modify_device;
  4859. dev->ib_dev.modify_port = mlx5_ib_modify_port;
  4860. dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;
  4861. dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext;
  4862. dev->ib_dev.mmap = mlx5_ib_mmap;
  4863. dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd;
  4864. dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd;
  4865. dev->ib_dev.create_ah = mlx5_ib_create_ah;
  4866. dev->ib_dev.query_ah = mlx5_ib_query_ah;
  4867. dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah;
  4868. dev->ib_dev.create_srq = mlx5_ib_create_srq;
  4869. dev->ib_dev.modify_srq = mlx5_ib_modify_srq;
  4870. dev->ib_dev.query_srq = mlx5_ib_query_srq;
  4871. dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq;
  4872. dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv;
  4873. dev->ib_dev.create_qp = mlx5_ib_create_qp;
  4874. dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
  4875. dev->ib_dev.query_qp = mlx5_ib_query_qp;
  4876. dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
  4877. dev->ib_dev.drain_sq = mlx5_ib_drain_sq;
  4878. dev->ib_dev.drain_rq = mlx5_ib_drain_rq;
  4879. dev->ib_dev.post_send = mlx5_ib_post_send;
  4880. dev->ib_dev.post_recv = mlx5_ib_post_recv;
  4881. dev->ib_dev.create_cq = mlx5_ib_create_cq;
  4882. dev->ib_dev.modify_cq = mlx5_ib_modify_cq;
  4883. dev->ib_dev.resize_cq = mlx5_ib_resize_cq;
  4884. dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq;
  4885. dev->ib_dev.poll_cq = mlx5_ib_poll_cq;
  4886. dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq;
  4887. dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
  4888. dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
  4889. dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr;
  4890. dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
  4891. dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
  4892. dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
  4893. dev->ib_dev.process_mad = mlx5_ib_process_mad;
  4894. dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
  4895. dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
  4896. dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
  4897. dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
  4898. dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
  4899. if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
  4900. dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
  4901. if (mlx5_core_is_pf(mdev)) {
  4902. dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
  4903. dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
  4904. dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats;
  4905. dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;
  4906. }
  4907. dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
  4908. dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
  4909. if (MLX5_CAP_GEN(mdev, imaicl)) {
  4910. dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
  4911. dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw;
  4912. dev->ib_dev.uverbs_cmd_mask |=
  4913. (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
  4914. (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
  4915. }
  4916. if (MLX5_CAP_GEN(mdev, xrc)) {
  4917. dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
  4918. dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
  4919. dev->ib_dev.uverbs_cmd_mask |=
  4920. (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
  4921. (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
  4922. }
  4923. if (MLX5_CAP_DEV_MEM(mdev, memic)) {
  4924. dev->ib_dev.alloc_dm = mlx5_ib_alloc_dm;
  4925. dev->ib_dev.dealloc_dm = mlx5_ib_dealloc_dm;
  4926. dev->ib_dev.reg_dm_mr = mlx5_ib_reg_dm_mr;
  4927. }
  4928. dev->ib_dev.create_flow = mlx5_ib_create_flow;
  4929. dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
  4930. dev->ib_dev.uverbs_ex_cmd_mask |=
  4931. (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
  4932. (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
  4933. dev->ib_dev.create_flow_action_esp = mlx5_ib_create_flow_action_esp;
  4934. dev->ib_dev.destroy_flow_action = mlx5_ib_destroy_flow_action;
  4935. dev->ib_dev.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp;
  4936. dev->ib_dev.driver_id = RDMA_DRIVER_MLX5;
  4937. dev->ib_dev.create_counters = mlx5_ib_create_counters;
  4938. dev->ib_dev.destroy_counters = mlx5_ib_destroy_counters;
  4939. dev->ib_dev.read_counters = mlx5_ib_read_counters;
  4940. err = init_node_data(dev);
  4941. if (err)
  4942. return err;
  4943. if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
  4944. (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
  4945. MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
  4946. mutex_init(&dev->lb_mutex);
  4947. return 0;
  4948. }
  4949. static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
  4950. {
  4951. dev->ib_dev.get_port_immutable = mlx5_port_immutable;
  4952. dev->ib_dev.query_port = mlx5_ib_query_port;
  4953. return 0;
  4954. }
  4955. int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
  4956. {
  4957. dev->ib_dev.get_port_immutable = mlx5_port_rep_immutable;
  4958. dev->ib_dev.query_port = mlx5_ib_rep_query_port;
  4959. return 0;
  4960. }
  4961. static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
  4962. {
  4963. u8 port_num;
  4964. int i;
  4965. for (i = 0; i < dev->num_ports; i++) {
  4966. dev->roce[i].dev = dev;
  4967. dev->roce[i].native_port_num = i + 1;
  4968. dev->roce[i].last_port_state = IB_PORT_DOWN;
  4969. }
  4970. dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
  4971. dev->ib_dev.create_wq = mlx5_ib_create_wq;
  4972. dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
  4973. dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
  4974. dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
  4975. dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
  4976. dev->ib_dev.uverbs_ex_cmd_mask |=
  4977. (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
  4978. (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
  4979. (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
  4980. (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
  4981. (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
  4982. port_num = mlx5_core_native_port_num(dev->mdev) - 1;
  4983. return mlx5_add_netdev_notifier(dev, port_num);
  4984. }
  4985. static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
  4986. {
  4987. u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
  4988. mlx5_remove_netdev_notifier(dev, port_num);
  4989. }
  4990. int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
  4991. {
  4992. struct mlx5_core_dev *mdev = dev->mdev;
  4993. enum rdma_link_layer ll;
  4994. int port_type_cap;
  4995. int err = 0;
  4996. port_type_cap = MLX5_CAP_GEN(mdev, port_type);
  4997. ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
  4998. if (ll == IB_LINK_LAYER_ETHERNET)
  4999. err = mlx5_ib_stage_common_roce_init(dev);
  5000. return err;
  5001. }
  5002. void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
  5003. {
  5004. mlx5_ib_stage_common_roce_cleanup(dev);
  5005. }
  5006. static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
  5007. {
  5008. struct mlx5_core_dev *mdev = dev->mdev;
  5009. enum rdma_link_layer ll;
  5010. int port_type_cap;
  5011. int err;
  5012. port_type_cap = MLX5_CAP_GEN(mdev, port_type);
  5013. ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
  5014. if (ll == IB_LINK_LAYER_ETHERNET) {
  5015. err = mlx5_ib_stage_common_roce_init(dev);
  5016. if (err)
  5017. return err;
  5018. err = mlx5_enable_eth(dev);
  5019. if (err)
  5020. goto cleanup;
  5021. }
  5022. return 0;
  5023. cleanup:
  5024. mlx5_ib_stage_common_roce_cleanup(dev);
  5025. return err;
  5026. }
  5027. static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
  5028. {
  5029. struct mlx5_core_dev *mdev = dev->mdev;
  5030. enum rdma_link_layer ll;
  5031. int port_type_cap;
  5032. port_type_cap = MLX5_CAP_GEN(mdev, port_type);
  5033. ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
  5034. if (ll == IB_LINK_LAYER_ETHERNET) {
  5035. mlx5_disable_eth(dev);
  5036. mlx5_ib_stage_common_roce_cleanup(dev);
  5037. }
  5038. }
  5039. int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
  5040. {
  5041. return create_dev_resources(&dev->devr);
  5042. }
  5043. void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
  5044. {
  5045. destroy_dev_resources(&dev->devr);
  5046. }
  5047. static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
  5048. {
  5049. mlx5_ib_internal_fill_odp_caps(dev);
  5050. return mlx5_ib_odp_init_one(dev);
  5051. }
  5052. int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
  5053. {
  5054. if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
  5055. dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
  5056. dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
  5057. return mlx5_ib_alloc_counters(dev);
  5058. }
  5059. return 0;
  5060. }
  5061. void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
  5062. {
  5063. if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
  5064. mlx5_ib_dealloc_counters(dev);
  5065. }
  5066. static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
  5067. {
  5068. return mlx5_ib_init_cong_debugfs(dev,
  5069. mlx5_core_native_port_num(dev->mdev) - 1);
  5070. }
  5071. static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
  5072. {
  5073. mlx5_ib_cleanup_cong_debugfs(dev,
  5074. mlx5_core_native_port_num(dev->mdev) - 1);
  5075. }
  5076. static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
  5077. {
  5078. dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
  5079. return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
  5080. }
  5081. static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
  5082. {
  5083. mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
  5084. }
  5085. int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
  5086. {
  5087. int err;
  5088. err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
  5089. if (err)
  5090. return err;
  5091. err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
  5092. if (err)
  5093. mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
  5094. return err;
  5095. }
  5096. void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
  5097. {
  5098. mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
  5099. mlx5_free_bfreg(dev->mdev, &dev->bfreg);
  5100. }
  5101. static int mlx5_ib_stage_populate_specs(struct mlx5_ib_dev *dev)
  5102. {
  5103. return populate_specs_root(dev);
  5104. }
  5105. int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
  5106. {
  5107. return ib_register_device(&dev->ib_dev, NULL);
  5108. }
  5109. static void mlx5_ib_stage_depopulate_specs(struct mlx5_ib_dev *dev)
  5110. {
  5111. depopulate_specs_root(dev);
  5112. }
  5113. void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
  5114. {
  5115. destroy_umrc_res(dev);
  5116. }
  5117. void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
  5118. {
  5119. ib_unregister_device(&dev->ib_dev);
  5120. }
  5121. int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
  5122. {
  5123. return create_umr_res(dev);
  5124. }
  5125. static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
  5126. {
  5127. init_delay_drop(dev);
  5128. return 0;
  5129. }
  5130. static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
  5131. {
  5132. cancel_delay_drop(dev);
  5133. }
  5134. int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
  5135. {
  5136. int err;
  5137. int i;
  5138. for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
  5139. err = device_create_file(&dev->ib_dev.dev,
  5140. mlx5_class_attributes[i]);
  5141. if (err)
  5142. return err;
  5143. }
  5144. return 0;
  5145. }
  5146. static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev)
  5147. {
  5148. mlx5_ib_register_vport_reps(dev);
  5149. return 0;
  5150. }
  5151. static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev)
  5152. {
  5153. mlx5_ib_unregister_vport_reps(dev);
  5154. }
  5155. void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
  5156. const struct mlx5_ib_profile *profile,
  5157. int stage)
  5158. {
  5159. /* Number of stages to cleanup */
  5160. while (stage) {
  5161. stage--;
  5162. if (profile->stage[stage].cleanup)
  5163. profile->stage[stage].cleanup(dev);
  5164. }
  5165. ib_dealloc_device((struct ib_device *)dev);
  5166. }
  5167. void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
  5168. const struct mlx5_ib_profile *profile)
  5169. {
  5170. int err;
  5171. int i;
  5172. printk_once(KERN_INFO "%s", mlx5_version);
  5173. for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
  5174. if (profile->stage[i].init) {
  5175. err = profile->stage[i].init(dev);
  5176. if (err)
  5177. goto err_out;
  5178. }
  5179. }
  5180. dev->profile = profile;
  5181. dev->ib_active = true;
  5182. return dev;
  5183. err_out:
  5184. __mlx5_ib_remove(dev, profile, i);
  5185. return NULL;
  5186. }
  5187. static const struct mlx5_ib_profile pf_profile = {
  5188. STAGE_CREATE(MLX5_IB_STAGE_INIT,
  5189. mlx5_ib_stage_init_init,
  5190. mlx5_ib_stage_init_cleanup),
  5191. STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
  5192. mlx5_ib_stage_flow_db_init,
  5193. mlx5_ib_stage_flow_db_cleanup),
  5194. STAGE_CREATE(MLX5_IB_STAGE_CAPS,
  5195. mlx5_ib_stage_caps_init,
  5196. NULL),
  5197. STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
  5198. mlx5_ib_stage_non_default_cb,
  5199. NULL),
  5200. STAGE_CREATE(MLX5_IB_STAGE_ROCE,
  5201. mlx5_ib_stage_roce_init,
  5202. mlx5_ib_stage_roce_cleanup),
  5203. STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
  5204. mlx5_ib_stage_dev_res_init,
  5205. mlx5_ib_stage_dev_res_cleanup),
  5206. STAGE_CREATE(MLX5_IB_STAGE_ODP,
  5207. mlx5_ib_stage_odp_init,
  5208. NULL),
  5209. STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
  5210. mlx5_ib_stage_counters_init,
  5211. mlx5_ib_stage_counters_cleanup),
  5212. STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
  5213. mlx5_ib_stage_cong_debugfs_init,
  5214. mlx5_ib_stage_cong_debugfs_cleanup),
  5215. STAGE_CREATE(MLX5_IB_STAGE_UAR,
  5216. mlx5_ib_stage_uar_init,
  5217. mlx5_ib_stage_uar_cleanup),
  5218. STAGE_CREATE(MLX5_IB_STAGE_BFREG,
  5219. mlx5_ib_stage_bfrag_init,
  5220. mlx5_ib_stage_bfrag_cleanup),
  5221. STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
  5222. NULL,
  5223. mlx5_ib_stage_pre_ib_reg_umr_cleanup),
  5224. STAGE_CREATE(MLX5_IB_STAGE_SPECS,
  5225. mlx5_ib_stage_populate_specs,
  5226. mlx5_ib_stage_depopulate_specs),
  5227. STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
  5228. mlx5_ib_stage_ib_reg_init,
  5229. mlx5_ib_stage_ib_reg_cleanup),
  5230. STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
  5231. mlx5_ib_stage_post_ib_reg_umr_init,
  5232. NULL),
  5233. STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
  5234. mlx5_ib_stage_delay_drop_init,
  5235. mlx5_ib_stage_delay_drop_cleanup),
  5236. STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
  5237. mlx5_ib_stage_class_attr_init,
  5238. NULL),
  5239. };
  5240. static const struct mlx5_ib_profile nic_rep_profile = {
  5241. STAGE_CREATE(MLX5_IB_STAGE_INIT,
  5242. mlx5_ib_stage_init_init,
  5243. mlx5_ib_stage_init_cleanup),
  5244. STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
  5245. mlx5_ib_stage_flow_db_init,
  5246. mlx5_ib_stage_flow_db_cleanup),
  5247. STAGE_CREATE(MLX5_IB_STAGE_CAPS,
  5248. mlx5_ib_stage_caps_init,
  5249. NULL),
  5250. STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
  5251. mlx5_ib_stage_rep_non_default_cb,
  5252. NULL),
  5253. STAGE_CREATE(MLX5_IB_STAGE_ROCE,
  5254. mlx5_ib_stage_rep_roce_init,
  5255. mlx5_ib_stage_rep_roce_cleanup),
  5256. STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
  5257. mlx5_ib_stage_dev_res_init,
  5258. mlx5_ib_stage_dev_res_cleanup),
  5259. STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
  5260. mlx5_ib_stage_counters_init,
  5261. mlx5_ib_stage_counters_cleanup),
  5262. STAGE_CREATE(MLX5_IB_STAGE_UAR,
  5263. mlx5_ib_stage_uar_init,
  5264. mlx5_ib_stage_uar_cleanup),
  5265. STAGE_CREATE(MLX5_IB_STAGE_BFREG,
  5266. mlx5_ib_stage_bfrag_init,
  5267. mlx5_ib_stage_bfrag_cleanup),
  5268. STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
  5269. NULL,
  5270. mlx5_ib_stage_pre_ib_reg_umr_cleanup),
  5271. STAGE_CREATE(MLX5_IB_STAGE_SPECS,
  5272. mlx5_ib_stage_populate_specs,
  5273. mlx5_ib_stage_depopulate_specs),
  5274. STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
  5275. mlx5_ib_stage_ib_reg_init,
  5276. mlx5_ib_stage_ib_reg_cleanup),
  5277. STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
  5278. mlx5_ib_stage_post_ib_reg_umr_init,
  5279. NULL),
  5280. STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
  5281. mlx5_ib_stage_class_attr_init,
  5282. NULL),
  5283. STAGE_CREATE(MLX5_IB_STAGE_REP_REG,
  5284. mlx5_ib_stage_rep_reg_init,
  5285. mlx5_ib_stage_rep_reg_cleanup),
  5286. };
  5287. static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
  5288. {
  5289. struct mlx5_ib_multiport_info *mpi;
  5290. struct mlx5_ib_dev *dev;
  5291. bool bound = false;
  5292. int err;
  5293. mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
  5294. if (!mpi)
  5295. return NULL;
  5296. mpi->mdev = mdev;
  5297. err = mlx5_query_nic_vport_system_image_guid(mdev,
  5298. &mpi->sys_image_guid);
  5299. if (err) {
  5300. kfree(mpi);
  5301. return NULL;
  5302. }
  5303. mutex_lock(&mlx5_ib_multiport_mutex);
  5304. list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
  5305. if (dev->sys_image_guid == mpi->sys_image_guid)
  5306. bound = mlx5_ib_bind_slave_port(dev, mpi);
  5307. if (bound) {
  5308. rdma_roce_rescan_device(&dev->ib_dev);
  5309. break;
  5310. }
  5311. }
  5312. if (!bound) {
  5313. list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
  5314. dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
  5315. }
  5316. mutex_unlock(&mlx5_ib_multiport_mutex);
  5317. return mpi;
  5318. }
  5319. static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
  5320. {
  5321. enum rdma_link_layer ll;
  5322. struct mlx5_ib_dev *dev;
  5323. int port_type_cap;
  5324. printk_once(KERN_INFO "%s", mlx5_version);
  5325. port_type_cap = MLX5_CAP_GEN(mdev, port_type);
  5326. ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
  5327. if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
  5328. return mlx5_ib_add_slave_port(mdev);
  5329. dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
  5330. if (!dev)
  5331. return NULL;
  5332. dev->mdev = mdev;
  5333. dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
  5334. MLX5_CAP_GEN(mdev, num_vhca_ports));
  5335. if (MLX5_VPORT_MANAGER(mdev) &&
  5336. mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
  5337. dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
  5338. return __mlx5_ib_add(dev, &nic_rep_profile);
  5339. }
  5340. return __mlx5_ib_add(dev, &pf_profile);
  5341. }
  5342. static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
  5343. {
  5344. struct mlx5_ib_multiport_info *mpi;
  5345. struct mlx5_ib_dev *dev;
  5346. if (mlx5_core_is_mp_slave(mdev)) {
  5347. mpi = context;
  5348. mutex_lock(&mlx5_ib_multiport_mutex);
  5349. if (mpi->ibdev)
  5350. mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
  5351. list_del(&mpi->list);
  5352. mutex_unlock(&mlx5_ib_multiport_mutex);
  5353. return;
  5354. }
  5355. dev = context;
  5356. __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
  5357. }
  5358. static struct mlx5_interface mlx5_ib_interface = {
  5359. .add = mlx5_ib_add,
  5360. .remove = mlx5_ib_remove,
  5361. .event = mlx5_ib_event,
  5362. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  5363. .pfault = mlx5_ib_pfault,
  5364. #endif
  5365. .protocol = MLX5_INTERFACE_PROTOCOL_IB,
  5366. };
  5367. unsigned long mlx5_ib_get_xlt_emergency_page(void)
  5368. {
  5369. mutex_lock(&xlt_emergency_page_mutex);
  5370. return xlt_emergency_page;
  5371. }
  5372. void mlx5_ib_put_xlt_emergency_page(void)
  5373. {
  5374. mutex_unlock(&xlt_emergency_page_mutex);
  5375. }
  5376. static int __init mlx5_ib_init(void)
  5377. {
  5378. int err;
  5379. xlt_emergency_page = __get_free_page(GFP_KERNEL);
  5380. if (!xlt_emergency_page)
  5381. return -ENOMEM;
  5382. mutex_init(&xlt_emergency_page_mutex);
  5383. mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
  5384. if (!mlx5_ib_event_wq) {
  5385. free_page(xlt_emergency_page);
  5386. return -ENOMEM;
  5387. }
  5388. mlx5_ib_odp_init();
  5389. err = mlx5_register_interface(&mlx5_ib_interface);
  5390. return err;
  5391. }
  5392. static void __exit mlx5_ib_cleanup(void)
  5393. {
  5394. mlx5_unregister_interface(&mlx5_ib_interface);
  5395. destroy_workqueue(mlx5_ib_event_wq);
  5396. mutex_destroy(&xlt_emergency_page_mutex);
  5397. free_page(xlt_emergency_page);
  5398. }
  5399. module_init(mlx5_ib_init);
  5400. module_exit(mlx5_ib_cleanup);