resource_tracker.c 120 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
  4. * All rights reserved.
  5. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/sched.h>
  36. #include <linux/pci.h>
  37. #include <linux/errno.h>
  38. #include <linux/kernel.h>
  39. #include <linux/io.h>
  40. #include <linux/slab.h>
  41. #include <linux/mlx4/cmd.h>
  42. #include <linux/mlx4/qp.h>
  43. #include <linux/if_ether.h>
  44. #include <linux/etherdevice.h>
  45. #include "mlx4.h"
  46. #include "fw.h"
  47. #define MLX4_MAC_VALID (1ull << 63)
  48. struct mac_res {
  49. struct list_head list;
  50. u64 mac;
  51. int ref_count;
  52. u8 smac_index;
  53. u8 port;
  54. };
  55. struct vlan_res {
  56. struct list_head list;
  57. u16 vlan;
  58. int ref_count;
  59. int vlan_index;
  60. u8 port;
  61. };
  62. struct res_common {
  63. struct list_head list;
  64. struct rb_node node;
  65. u64 res_id;
  66. int owner;
  67. int state;
  68. int from_state;
  69. int to_state;
  70. int removing;
  71. };
  72. enum {
  73. RES_ANY_BUSY = 1
  74. };
  75. struct res_gid {
  76. struct list_head list;
  77. u8 gid[16];
  78. enum mlx4_protocol prot;
  79. enum mlx4_steer_type steer;
  80. u64 reg_id;
  81. };
  82. enum res_qp_states {
  83. RES_QP_BUSY = RES_ANY_BUSY,
  84. /* QP number was allocated */
  85. RES_QP_RESERVED,
  86. /* ICM memory for QP context was mapped */
  87. RES_QP_MAPPED,
  88. /* QP is in hw ownership */
  89. RES_QP_HW
  90. };
  91. struct res_qp {
  92. struct res_common com;
  93. struct res_mtt *mtt;
  94. struct res_cq *rcq;
  95. struct res_cq *scq;
  96. struct res_srq *srq;
  97. struct list_head mcg_list;
  98. spinlock_t mcg_spl;
  99. int local_qpn;
  100. atomic_t ref_count;
  101. u32 qpc_flags;
  102. /* saved qp params before VST enforcement in order to restore on VGT */
  103. u8 sched_queue;
  104. __be32 param3;
  105. u8 vlan_control;
  106. u8 fvl_rx;
  107. u8 pri_path_fl;
  108. u8 vlan_index;
  109. u8 feup;
  110. };
  111. enum res_mtt_states {
  112. RES_MTT_BUSY = RES_ANY_BUSY,
  113. RES_MTT_ALLOCATED,
  114. };
  115. static inline const char *mtt_states_str(enum res_mtt_states state)
  116. {
  117. switch (state) {
  118. case RES_MTT_BUSY: return "RES_MTT_BUSY";
  119. case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
  120. default: return "Unknown";
  121. }
  122. }
  123. struct res_mtt {
  124. struct res_common com;
  125. int order;
  126. atomic_t ref_count;
  127. };
  128. enum res_mpt_states {
  129. RES_MPT_BUSY = RES_ANY_BUSY,
  130. RES_MPT_RESERVED,
  131. RES_MPT_MAPPED,
  132. RES_MPT_HW,
  133. };
  134. struct res_mpt {
  135. struct res_common com;
  136. struct res_mtt *mtt;
  137. int key;
  138. };
  139. enum res_eq_states {
  140. RES_EQ_BUSY = RES_ANY_BUSY,
  141. RES_EQ_RESERVED,
  142. RES_EQ_HW,
  143. };
  144. struct res_eq {
  145. struct res_common com;
  146. struct res_mtt *mtt;
  147. };
  148. enum res_cq_states {
  149. RES_CQ_BUSY = RES_ANY_BUSY,
  150. RES_CQ_ALLOCATED,
  151. RES_CQ_HW,
  152. };
  153. struct res_cq {
  154. struct res_common com;
  155. struct res_mtt *mtt;
  156. atomic_t ref_count;
  157. };
  158. enum res_srq_states {
  159. RES_SRQ_BUSY = RES_ANY_BUSY,
  160. RES_SRQ_ALLOCATED,
  161. RES_SRQ_HW,
  162. };
  163. struct res_srq {
  164. struct res_common com;
  165. struct res_mtt *mtt;
  166. struct res_cq *cq;
  167. atomic_t ref_count;
  168. };
  169. enum res_counter_states {
  170. RES_COUNTER_BUSY = RES_ANY_BUSY,
  171. RES_COUNTER_ALLOCATED,
  172. };
  173. struct res_counter {
  174. struct res_common com;
  175. int port;
  176. };
  177. enum res_xrcdn_states {
  178. RES_XRCD_BUSY = RES_ANY_BUSY,
  179. RES_XRCD_ALLOCATED,
  180. };
  181. struct res_xrcdn {
  182. struct res_common com;
  183. int port;
  184. };
  185. enum res_fs_rule_states {
  186. RES_FS_RULE_BUSY = RES_ANY_BUSY,
  187. RES_FS_RULE_ALLOCATED,
  188. };
  189. struct res_fs_rule {
  190. struct res_common com;
  191. int qpn;
  192. };
  193. static int mlx4_is_eth(struct mlx4_dev *dev, int port)
  194. {
  195. return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
  196. }
  197. static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
  198. {
  199. struct rb_node *node = root->rb_node;
  200. while (node) {
  201. struct res_common *res = container_of(node, struct res_common,
  202. node);
  203. if (res_id < res->res_id)
  204. node = node->rb_left;
  205. else if (res_id > res->res_id)
  206. node = node->rb_right;
  207. else
  208. return res;
  209. }
  210. return NULL;
  211. }
  212. static int res_tracker_insert(struct rb_root *root, struct res_common *res)
  213. {
  214. struct rb_node **new = &(root->rb_node), *parent = NULL;
  215. /* Figure out where to put new node */
  216. while (*new) {
  217. struct res_common *this = container_of(*new, struct res_common,
  218. node);
  219. parent = *new;
  220. if (res->res_id < this->res_id)
  221. new = &((*new)->rb_left);
  222. else if (res->res_id > this->res_id)
  223. new = &((*new)->rb_right);
  224. else
  225. return -EEXIST;
  226. }
  227. /* Add new node and rebalance tree. */
  228. rb_link_node(&res->node, parent, new);
  229. rb_insert_color(&res->node, root);
  230. return 0;
  231. }
  232. enum qp_transition {
  233. QP_TRANS_INIT2RTR,
  234. QP_TRANS_RTR2RTS,
  235. QP_TRANS_RTS2RTS,
  236. QP_TRANS_SQERR2RTS,
  237. QP_TRANS_SQD2SQD,
  238. QP_TRANS_SQD2RTS
  239. };
  240. /* For Debug uses */
  241. static const char *resource_str(enum mlx4_resource rt)
  242. {
  243. switch (rt) {
  244. case RES_QP: return "RES_QP";
  245. case RES_CQ: return "RES_CQ";
  246. case RES_SRQ: return "RES_SRQ";
  247. case RES_MPT: return "RES_MPT";
  248. case RES_MTT: return "RES_MTT";
  249. case RES_MAC: return "RES_MAC";
  250. case RES_VLAN: return "RES_VLAN";
  251. case RES_EQ: return "RES_EQ";
  252. case RES_COUNTER: return "RES_COUNTER";
  253. case RES_FS_RULE: return "RES_FS_RULE";
  254. case RES_XRCD: return "RES_XRCD";
  255. default: return "Unknown resource type !!!";
  256. };
  257. }
  258. static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
  259. static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
  260. enum mlx4_resource res_type, int count,
  261. int port)
  262. {
  263. struct mlx4_priv *priv = mlx4_priv(dev);
  264. struct resource_allocator *res_alloc =
  265. &priv->mfunc.master.res_tracker.res_alloc[res_type];
  266. int err = -EINVAL;
  267. int allocated, free, reserved, guaranteed, from_free;
  268. int from_rsvd;
  269. if (slave > dev->num_vfs)
  270. return -EINVAL;
  271. spin_lock(&res_alloc->alloc_lock);
  272. allocated = (port > 0) ?
  273. res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
  274. res_alloc->allocated[slave];
  275. free = (port > 0) ? res_alloc->res_port_free[port - 1] :
  276. res_alloc->res_free;
  277. reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
  278. res_alloc->res_reserved;
  279. guaranteed = res_alloc->guaranteed[slave];
  280. if (allocated + count > res_alloc->quota[slave]) {
  281. mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
  282. slave, port, resource_str(res_type), count,
  283. allocated, res_alloc->quota[slave]);
  284. goto out;
  285. }
  286. if (allocated + count <= guaranteed) {
  287. err = 0;
  288. from_rsvd = count;
  289. } else {
  290. /* portion may need to be obtained from free area */
  291. if (guaranteed - allocated > 0)
  292. from_free = count - (guaranteed - allocated);
  293. else
  294. from_free = count;
  295. from_rsvd = count - from_free;
  296. if (free - from_free >= reserved)
  297. err = 0;
  298. else
  299. mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
  300. slave, port, resource_str(res_type), free,
  301. from_free, reserved);
  302. }
  303. if (!err) {
  304. /* grant the request */
  305. if (port > 0) {
  306. res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
  307. res_alloc->res_port_free[port - 1] -= count;
  308. res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
  309. } else {
  310. res_alloc->allocated[slave] += count;
  311. res_alloc->res_free -= count;
  312. res_alloc->res_reserved -= from_rsvd;
  313. }
  314. }
  315. out:
  316. spin_unlock(&res_alloc->alloc_lock);
  317. return err;
  318. }
  319. static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
  320. enum mlx4_resource res_type, int count,
  321. int port)
  322. {
  323. struct mlx4_priv *priv = mlx4_priv(dev);
  324. struct resource_allocator *res_alloc =
  325. &priv->mfunc.master.res_tracker.res_alloc[res_type];
  326. int allocated, guaranteed, from_rsvd;
  327. if (slave > dev->num_vfs)
  328. return;
  329. spin_lock(&res_alloc->alloc_lock);
  330. allocated = (port > 0) ?
  331. res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
  332. res_alloc->allocated[slave];
  333. guaranteed = res_alloc->guaranteed[slave];
  334. if (allocated - count >= guaranteed) {
  335. from_rsvd = 0;
  336. } else {
  337. /* portion may need to be returned to reserved area */
  338. if (allocated - guaranteed > 0)
  339. from_rsvd = count - (allocated - guaranteed);
  340. else
  341. from_rsvd = count;
  342. }
  343. if (port > 0) {
  344. res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
  345. res_alloc->res_port_free[port - 1] += count;
  346. res_alloc->res_port_rsvd[port - 1] += from_rsvd;
  347. } else {
  348. res_alloc->allocated[slave] -= count;
  349. res_alloc->res_free += count;
  350. res_alloc->res_reserved += from_rsvd;
  351. }
  352. spin_unlock(&res_alloc->alloc_lock);
  353. return;
  354. }
  355. static inline void initialize_res_quotas(struct mlx4_dev *dev,
  356. struct resource_allocator *res_alloc,
  357. enum mlx4_resource res_type,
  358. int vf, int num_instances)
  359. {
  360. res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
  361. res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
  362. if (vf == mlx4_master_func_num(dev)) {
  363. res_alloc->res_free = num_instances;
  364. if (res_type == RES_MTT) {
  365. /* reserved mtts will be taken out of the PF allocation */
  366. res_alloc->res_free += dev->caps.reserved_mtts;
  367. res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
  368. res_alloc->quota[vf] += dev->caps.reserved_mtts;
  369. }
  370. }
  371. }
  372. void mlx4_init_quotas(struct mlx4_dev *dev)
  373. {
  374. struct mlx4_priv *priv = mlx4_priv(dev);
  375. int pf;
  376. /* quotas for VFs are initialized in mlx4_slave_cap */
  377. if (mlx4_is_slave(dev))
  378. return;
  379. if (!mlx4_is_mfunc(dev)) {
  380. dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
  381. mlx4_num_reserved_sqps(dev);
  382. dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
  383. dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
  384. dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
  385. dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
  386. return;
  387. }
  388. pf = mlx4_master_func_num(dev);
  389. dev->quotas.qp =
  390. priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
  391. dev->quotas.cq =
  392. priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
  393. dev->quotas.srq =
  394. priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
  395. dev->quotas.mtt =
  396. priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
  397. dev->quotas.mpt =
  398. priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
  399. }
  400. int mlx4_init_resource_tracker(struct mlx4_dev *dev)
  401. {
  402. struct mlx4_priv *priv = mlx4_priv(dev);
  403. int i, j;
  404. int t;
  405. priv->mfunc.master.res_tracker.slave_list =
  406. kzalloc(dev->num_slaves * sizeof(struct slave_list),
  407. GFP_KERNEL);
  408. if (!priv->mfunc.master.res_tracker.slave_list)
  409. return -ENOMEM;
  410. for (i = 0 ; i < dev->num_slaves; i++) {
  411. for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
  412. INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
  413. slave_list[i].res_list[t]);
  414. mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
  415. }
  416. mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
  417. dev->num_slaves);
  418. for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
  419. priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
  420. for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
  421. struct resource_allocator *res_alloc =
  422. &priv->mfunc.master.res_tracker.res_alloc[i];
  423. res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
  424. res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
  425. if (i == RES_MAC || i == RES_VLAN)
  426. res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
  427. (dev->num_vfs + 1) * sizeof(int),
  428. GFP_KERNEL);
  429. else
  430. res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
  431. if (!res_alloc->quota || !res_alloc->guaranteed ||
  432. !res_alloc->allocated)
  433. goto no_mem_err;
  434. spin_lock_init(&res_alloc->alloc_lock);
  435. for (t = 0; t < dev->num_vfs + 1; t++) {
  436. struct mlx4_active_ports actv_ports =
  437. mlx4_get_active_ports(dev, t);
  438. switch (i) {
  439. case RES_QP:
  440. initialize_res_quotas(dev, res_alloc, RES_QP,
  441. t, dev->caps.num_qps -
  442. dev->caps.reserved_qps -
  443. mlx4_num_reserved_sqps(dev));
  444. break;
  445. case RES_CQ:
  446. initialize_res_quotas(dev, res_alloc, RES_CQ,
  447. t, dev->caps.num_cqs -
  448. dev->caps.reserved_cqs);
  449. break;
  450. case RES_SRQ:
  451. initialize_res_quotas(dev, res_alloc, RES_SRQ,
  452. t, dev->caps.num_srqs -
  453. dev->caps.reserved_srqs);
  454. break;
  455. case RES_MPT:
  456. initialize_res_quotas(dev, res_alloc, RES_MPT,
  457. t, dev->caps.num_mpts -
  458. dev->caps.reserved_mrws);
  459. break;
  460. case RES_MTT:
  461. initialize_res_quotas(dev, res_alloc, RES_MTT,
  462. t, dev->caps.num_mtts -
  463. dev->caps.reserved_mtts);
  464. break;
  465. case RES_MAC:
  466. if (t == mlx4_master_func_num(dev)) {
  467. int max_vfs_pport = 0;
  468. /* Calculate the max vfs per port for */
  469. /* both ports. */
  470. for (j = 0; j < dev->caps.num_ports;
  471. j++) {
  472. struct mlx4_slaves_pport slaves_pport =
  473. mlx4_phys_to_slaves_pport(dev, j + 1);
  474. unsigned current_slaves =
  475. bitmap_weight(slaves_pport.slaves,
  476. dev->caps.num_ports) - 1;
  477. if (max_vfs_pport < current_slaves)
  478. max_vfs_pport =
  479. current_slaves;
  480. }
  481. res_alloc->quota[t] =
  482. MLX4_MAX_MAC_NUM -
  483. 2 * max_vfs_pport;
  484. res_alloc->guaranteed[t] = 2;
  485. for (j = 0; j < MLX4_MAX_PORTS; j++)
  486. res_alloc->res_port_free[j] =
  487. MLX4_MAX_MAC_NUM;
  488. } else {
  489. res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
  490. res_alloc->guaranteed[t] = 2;
  491. }
  492. break;
  493. case RES_VLAN:
  494. if (t == mlx4_master_func_num(dev)) {
  495. res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
  496. res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
  497. for (j = 0; j < MLX4_MAX_PORTS; j++)
  498. res_alloc->res_port_free[j] =
  499. res_alloc->quota[t];
  500. } else {
  501. res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
  502. res_alloc->guaranteed[t] = 0;
  503. }
  504. break;
  505. case RES_COUNTER:
  506. res_alloc->quota[t] = dev->caps.max_counters;
  507. res_alloc->guaranteed[t] = 0;
  508. if (t == mlx4_master_func_num(dev))
  509. res_alloc->res_free = res_alloc->quota[t];
  510. break;
  511. default:
  512. break;
  513. }
  514. if (i == RES_MAC || i == RES_VLAN) {
  515. for (j = 0; j < dev->caps.num_ports; j++)
  516. if (test_bit(j, actv_ports.ports))
  517. res_alloc->res_port_rsvd[j] +=
  518. res_alloc->guaranteed[t];
  519. } else {
  520. res_alloc->res_reserved += res_alloc->guaranteed[t];
  521. }
  522. }
  523. }
  524. spin_lock_init(&priv->mfunc.master.res_tracker.lock);
  525. return 0;
  526. no_mem_err:
  527. for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
  528. kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
  529. priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
  530. kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
  531. priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
  532. kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
  533. priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
  534. }
  535. return -ENOMEM;
  536. }
  537. void mlx4_free_resource_tracker(struct mlx4_dev *dev,
  538. enum mlx4_res_tracker_free_type type)
  539. {
  540. struct mlx4_priv *priv = mlx4_priv(dev);
  541. int i;
  542. if (priv->mfunc.master.res_tracker.slave_list) {
  543. if (type != RES_TR_FREE_STRUCTS_ONLY) {
  544. for (i = 0; i < dev->num_slaves; i++) {
  545. if (type == RES_TR_FREE_ALL ||
  546. dev->caps.function != i)
  547. mlx4_delete_all_resources_for_slave(dev, i);
  548. }
  549. /* free master's vlans */
  550. i = dev->caps.function;
  551. mlx4_reset_roce_gids(dev, i);
  552. mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
  553. rem_slave_vlans(dev, i);
  554. mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
  555. }
  556. if (type != RES_TR_FREE_SLAVES_ONLY) {
  557. for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
  558. kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
  559. priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
  560. kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
  561. priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
  562. kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
  563. priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
  564. }
  565. kfree(priv->mfunc.master.res_tracker.slave_list);
  566. priv->mfunc.master.res_tracker.slave_list = NULL;
  567. }
  568. }
  569. }
  570. static void update_pkey_index(struct mlx4_dev *dev, int slave,
  571. struct mlx4_cmd_mailbox *inbox)
  572. {
  573. u8 sched = *(u8 *)(inbox->buf + 64);
  574. u8 orig_index = *(u8 *)(inbox->buf + 35);
  575. u8 new_index;
  576. struct mlx4_priv *priv = mlx4_priv(dev);
  577. int port;
  578. port = (sched >> 6 & 1) + 1;
  579. new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
  580. *(u8 *)(inbox->buf + 35) = new_index;
  581. }
  582. static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
  583. u8 slave)
  584. {
  585. struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
  586. enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
  587. u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
  588. int port;
  589. if (MLX4_QP_ST_UD == ts) {
  590. port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
  591. if (mlx4_is_eth(dev, port))
  592. qp_ctx->pri_path.mgid_index =
  593. mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
  594. else
  595. qp_ctx->pri_path.mgid_index = slave | 0x80;
  596. } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
  597. if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
  598. port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
  599. if (mlx4_is_eth(dev, port)) {
  600. qp_ctx->pri_path.mgid_index +=
  601. mlx4_get_base_gid_ix(dev, slave, port);
  602. qp_ctx->pri_path.mgid_index &= 0x7f;
  603. } else {
  604. qp_ctx->pri_path.mgid_index = slave & 0x7F;
  605. }
  606. }
  607. if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
  608. port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
  609. if (mlx4_is_eth(dev, port)) {
  610. qp_ctx->alt_path.mgid_index +=
  611. mlx4_get_base_gid_ix(dev, slave, port);
  612. qp_ctx->alt_path.mgid_index &= 0x7f;
  613. } else {
  614. qp_ctx->alt_path.mgid_index = slave & 0x7F;
  615. }
  616. }
  617. }
  618. }
  619. static int update_vport_qp_param(struct mlx4_dev *dev,
  620. struct mlx4_cmd_mailbox *inbox,
  621. u8 slave, u32 qpn)
  622. {
  623. struct mlx4_qp_context *qpc = inbox->buf + 8;
  624. struct mlx4_vport_oper_state *vp_oper;
  625. struct mlx4_priv *priv;
  626. u32 qp_type;
  627. int port;
  628. port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
  629. priv = mlx4_priv(dev);
  630. vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
  631. qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
  632. if (MLX4_VGT != vp_oper->state.default_vlan) {
  633. /* the reserved QPs (special, proxy, tunnel)
  634. * do not operate over vlans
  635. */
  636. if (mlx4_is_qp_reserved(dev, qpn))
  637. return 0;
  638. /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
  639. if (qp_type == MLX4_QP_ST_UD ||
  640. (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
  641. if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
  642. *(__be32 *)inbox->buf =
  643. cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
  644. MLX4_QP_OPTPAR_VLAN_STRIPPING);
  645. qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
  646. } else {
  647. struct mlx4_update_qp_params params = {.flags = 0};
  648. mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
  649. }
  650. }
  651. if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
  652. dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
  653. qpc->pri_path.vlan_control =
  654. MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
  655. MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
  656. MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
  657. MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
  658. MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
  659. MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
  660. } else if (0 != vp_oper->state.default_vlan) {
  661. qpc->pri_path.vlan_control =
  662. MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
  663. MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
  664. MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
  665. } else { /* priority tagged */
  666. qpc->pri_path.vlan_control =
  667. MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
  668. MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
  669. }
  670. qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
  671. qpc->pri_path.vlan_index = vp_oper->vlan_idx;
  672. qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
  673. qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
  674. qpc->pri_path.sched_queue &= 0xC7;
  675. qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
  676. }
  677. if (vp_oper->state.spoofchk) {
  678. qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
  679. qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
  680. }
  681. return 0;
  682. }
  683. static int mpt_mask(struct mlx4_dev *dev)
  684. {
  685. return dev->caps.num_mpts - 1;
  686. }
  687. static void *find_res(struct mlx4_dev *dev, u64 res_id,
  688. enum mlx4_resource type)
  689. {
  690. struct mlx4_priv *priv = mlx4_priv(dev);
  691. return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
  692. res_id);
  693. }
  694. static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
  695. enum mlx4_resource type,
  696. void *res)
  697. {
  698. struct res_common *r;
  699. int err = 0;
  700. spin_lock_irq(mlx4_tlock(dev));
  701. r = find_res(dev, res_id, type);
  702. if (!r) {
  703. err = -ENONET;
  704. goto exit;
  705. }
  706. if (r->state == RES_ANY_BUSY) {
  707. err = -EBUSY;
  708. goto exit;
  709. }
  710. if (r->owner != slave) {
  711. err = -EPERM;
  712. goto exit;
  713. }
  714. r->from_state = r->state;
  715. r->state = RES_ANY_BUSY;
  716. if (res)
  717. *((struct res_common **)res) = r;
  718. exit:
  719. spin_unlock_irq(mlx4_tlock(dev));
  720. return err;
  721. }
  722. int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
  723. enum mlx4_resource type,
  724. u64 res_id, int *slave)
  725. {
  726. struct res_common *r;
  727. int err = -ENOENT;
  728. int id = res_id;
  729. if (type == RES_QP)
  730. id &= 0x7fffff;
  731. spin_lock(mlx4_tlock(dev));
  732. r = find_res(dev, id, type);
  733. if (r) {
  734. *slave = r->owner;
  735. err = 0;
  736. }
  737. spin_unlock(mlx4_tlock(dev));
  738. return err;
  739. }
  740. static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
  741. enum mlx4_resource type)
  742. {
  743. struct res_common *r;
  744. spin_lock_irq(mlx4_tlock(dev));
  745. r = find_res(dev, res_id, type);
  746. if (r)
  747. r->state = r->from_state;
  748. spin_unlock_irq(mlx4_tlock(dev));
  749. }
  750. static struct res_common *alloc_qp_tr(int id)
  751. {
  752. struct res_qp *ret;
  753. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  754. if (!ret)
  755. return NULL;
  756. ret->com.res_id = id;
  757. ret->com.state = RES_QP_RESERVED;
  758. ret->local_qpn = id;
  759. INIT_LIST_HEAD(&ret->mcg_list);
  760. spin_lock_init(&ret->mcg_spl);
  761. atomic_set(&ret->ref_count, 0);
  762. return &ret->com;
  763. }
  764. static struct res_common *alloc_mtt_tr(int id, int order)
  765. {
  766. struct res_mtt *ret;
  767. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  768. if (!ret)
  769. return NULL;
  770. ret->com.res_id = id;
  771. ret->order = order;
  772. ret->com.state = RES_MTT_ALLOCATED;
  773. atomic_set(&ret->ref_count, 0);
  774. return &ret->com;
  775. }
  776. static struct res_common *alloc_mpt_tr(int id, int key)
  777. {
  778. struct res_mpt *ret;
  779. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  780. if (!ret)
  781. return NULL;
  782. ret->com.res_id = id;
  783. ret->com.state = RES_MPT_RESERVED;
  784. ret->key = key;
  785. return &ret->com;
  786. }
  787. static struct res_common *alloc_eq_tr(int id)
  788. {
  789. struct res_eq *ret;
  790. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  791. if (!ret)
  792. return NULL;
  793. ret->com.res_id = id;
  794. ret->com.state = RES_EQ_RESERVED;
  795. return &ret->com;
  796. }
  797. static struct res_common *alloc_cq_tr(int id)
  798. {
  799. struct res_cq *ret;
  800. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  801. if (!ret)
  802. return NULL;
  803. ret->com.res_id = id;
  804. ret->com.state = RES_CQ_ALLOCATED;
  805. atomic_set(&ret->ref_count, 0);
  806. return &ret->com;
  807. }
  808. static struct res_common *alloc_srq_tr(int id)
  809. {
  810. struct res_srq *ret;
  811. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  812. if (!ret)
  813. return NULL;
  814. ret->com.res_id = id;
  815. ret->com.state = RES_SRQ_ALLOCATED;
  816. atomic_set(&ret->ref_count, 0);
  817. return &ret->com;
  818. }
  819. static struct res_common *alloc_counter_tr(int id)
  820. {
  821. struct res_counter *ret;
  822. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  823. if (!ret)
  824. return NULL;
  825. ret->com.res_id = id;
  826. ret->com.state = RES_COUNTER_ALLOCATED;
  827. return &ret->com;
  828. }
  829. static struct res_common *alloc_xrcdn_tr(int id)
  830. {
  831. struct res_xrcdn *ret;
  832. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  833. if (!ret)
  834. return NULL;
  835. ret->com.res_id = id;
  836. ret->com.state = RES_XRCD_ALLOCATED;
  837. return &ret->com;
  838. }
  839. static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
  840. {
  841. struct res_fs_rule *ret;
  842. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  843. if (!ret)
  844. return NULL;
  845. ret->com.res_id = id;
  846. ret->com.state = RES_FS_RULE_ALLOCATED;
  847. ret->qpn = qpn;
  848. return &ret->com;
  849. }
  850. static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
  851. int extra)
  852. {
  853. struct res_common *ret;
  854. switch (type) {
  855. case RES_QP:
  856. ret = alloc_qp_tr(id);
  857. break;
  858. case RES_MPT:
  859. ret = alloc_mpt_tr(id, extra);
  860. break;
  861. case RES_MTT:
  862. ret = alloc_mtt_tr(id, extra);
  863. break;
  864. case RES_EQ:
  865. ret = alloc_eq_tr(id);
  866. break;
  867. case RES_CQ:
  868. ret = alloc_cq_tr(id);
  869. break;
  870. case RES_SRQ:
  871. ret = alloc_srq_tr(id);
  872. break;
  873. case RES_MAC:
  874. pr_err("implementation missing\n");
  875. return NULL;
  876. case RES_COUNTER:
  877. ret = alloc_counter_tr(id);
  878. break;
  879. case RES_XRCD:
  880. ret = alloc_xrcdn_tr(id);
  881. break;
  882. case RES_FS_RULE:
  883. ret = alloc_fs_rule_tr(id, extra);
  884. break;
  885. default:
  886. return NULL;
  887. }
  888. if (ret)
  889. ret->owner = slave;
  890. return ret;
  891. }
  892. static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
  893. enum mlx4_resource type, int extra)
  894. {
  895. int i;
  896. int err;
  897. struct mlx4_priv *priv = mlx4_priv(dev);
  898. struct res_common **res_arr;
  899. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  900. struct rb_root *root = &tracker->res_tree[type];
  901. res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
  902. if (!res_arr)
  903. return -ENOMEM;
  904. for (i = 0; i < count; ++i) {
  905. res_arr[i] = alloc_tr(base + i, type, slave, extra);
  906. if (!res_arr[i]) {
  907. for (--i; i >= 0; --i)
  908. kfree(res_arr[i]);
  909. kfree(res_arr);
  910. return -ENOMEM;
  911. }
  912. }
  913. spin_lock_irq(mlx4_tlock(dev));
  914. for (i = 0; i < count; ++i) {
  915. if (find_res(dev, base + i, type)) {
  916. err = -EEXIST;
  917. goto undo;
  918. }
  919. err = res_tracker_insert(root, res_arr[i]);
  920. if (err)
  921. goto undo;
  922. list_add_tail(&res_arr[i]->list,
  923. &tracker->slave_list[slave].res_list[type]);
  924. }
  925. spin_unlock_irq(mlx4_tlock(dev));
  926. kfree(res_arr);
  927. return 0;
  928. undo:
  929. for (--i; i >= base; --i)
  930. rb_erase(&res_arr[i]->node, root);
  931. spin_unlock_irq(mlx4_tlock(dev));
  932. for (i = 0; i < count; ++i)
  933. kfree(res_arr[i]);
  934. kfree(res_arr);
  935. return err;
  936. }
  937. static int remove_qp_ok(struct res_qp *res)
  938. {
  939. if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
  940. !list_empty(&res->mcg_list)) {
  941. pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
  942. res->com.state, atomic_read(&res->ref_count));
  943. return -EBUSY;
  944. } else if (res->com.state != RES_QP_RESERVED) {
  945. return -EPERM;
  946. }
  947. return 0;
  948. }
  949. static int remove_mtt_ok(struct res_mtt *res, int order)
  950. {
  951. if (res->com.state == RES_MTT_BUSY ||
  952. atomic_read(&res->ref_count)) {
  953. pr_devel("%s-%d: state %s, ref_count %d\n",
  954. __func__, __LINE__,
  955. mtt_states_str(res->com.state),
  956. atomic_read(&res->ref_count));
  957. return -EBUSY;
  958. } else if (res->com.state != RES_MTT_ALLOCATED)
  959. return -EPERM;
  960. else if (res->order != order)
  961. return -EINVAL;
  962. return 0;
  963. }
  964. static int remove_mpt_ok(struct res_mpt *res)
  965. {
  966. if (res->com.state == RES_MPT_BUSY)
  967. return -EBUSY;
  968. else if (res->com.state != RES_MPT_RESERVED)
  969. return -EPERM;
  970. return 0;
  971. }
  972. static int remove_eq_ok(struct res_eq *res)
  973. {
  974. if (res->com.state == RES_MPT_BUSY)
  975. return -EBUSY;
  976. else if (res->com.state != RES_MPT_RESERVED)
  977. return -EPERM;
  978. return 0;
  979. }
  980. static int remove_counter_ok(struct res_counter *res)
  981. {
  982. if (res->com.state == RES_COUNTER_BUSY)
  983. return -EBUSY;
  984. else if (res->com.state != RES_COUNTER_ALLOCATED)
  985. return -EPERM;
  986. return 0;
  987. }
  988. static int remove_xrcdn_ok(struct res_xrcdn *res)
  989. {
  990. if (res->com.state == RES_XRCD_BUSY)
  991. return -EBUSY;
  992. else if (res->com.state != RES_XRCD_ALLOCATED)
  993. return -EPERM;
  994. return 0;
  995. }
  996. static int remove_fs_rule_ok(struct res_fs_rule *res)
  997. {
  998. if (res->com.state == RES_FS_RULE_BUSY)
  999. return -EBUSY;
  1000. else if (res->com.state != RES_FS_RULE_ALLOCATED)
  1001. return -EPERM;
  1002. return 0;
  1003. }
  1004. static int remove_cq_ok(struct res_cq *res)
  1005. {
  1006. if (res->com.state == RES_CQ_BUSY)
  1007. return -EBUSY;
  1008. else if (res->com.state != RES_CQ_ALLOCATED)
  1009. return -EPERM;
  1010. return 0;
  1011. }
  1012. static int remove_srq_ok(struct res_srq *res)
  1013. {
  1014. if (res->com.state == RES_SRQ_BUSY)
  1015. return -EBUSY;
  1016. else if (res->com.state != RES_SRQ_ALLOCATED)
  1017. return -EPERM;
  1018. return 0;
  1019. }
  1020. static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
  1021. {
  1022. switch (type) {
  1023. case RES_QP:
  1024. return remove_qp_ok((struct res_qp *)res);
  1025. case RES_CQ:
  1026. return remove_cq_ok((struct res_cq *)res);
  1027. case RES_SRQ:
  1028. return remove_srq_ok((struct res_srq *)res);
  1029. case RES_MPT:
  1030. return remove_mpt_ok((struct res_mpt *)res);
  1031. case RES_MTT:
  1032. return remove_mtt_ok((struct res_mtt *)res, extra);
  1033. case RES_MAC:
  1034. return -ENOSYS;
  1035. case RES_EQ:
  1036. return remove_eq_ok((struct res_eq *)res);
  1037. case RES_COUNTER:
  1038. return remove_counter_ok((struct res_counter *)res);
  1039. case RES_XRCD:
  1040. return remove_xrcdn_ok((struct res_xrcdn *)res);
  1041. case RES_FS_RULE:
  1042. return remove_fs_rule_ok((struct res_fs_rule *)res);
  1043. default:
  1044. return -EINVAL;
  1045. }
  1046. }
  1047. static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
  1048. enum mlx4_resource type, int extra)
  1049. {
  1050. u64 i;
  1051. int err;
  1052. struct mlx4_priv *priv = mlx4_priv(dev);
  1053. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1054. struct res_common *r;
  1055. spin_lock_irq(mlx4_tlock(dev));
  1056. for (i = base; i < base + count; ++i) {
  1057. r = res_tracker_lookup(&tracker->res_tree[type], i);
  1058. if (!r) {
  1059. err = -ENOENT;
  1060. goto out;
  1061. }
  1062. if (r->owner != slave) {
  1063. err = -EPERM;
  1064. goto out;
  1065. }
  1066. err = remove_ok(r, type, extra);
  1067. if (err)
  1068. goto out;
  1069. }
  1070. for (i = base; i < base + count; ++i) {
  1071. r = res_tracker_lookup(&tracker->res_tree[type], i);
  1072. rb_erase(&r->node, &tracker->res_tree[type]);
  1073. list_del(&r->list);
  1074. kfree(r);
  1075. }
  1076. err = 0;
  1077. out:
  1078. spin_unlock_irq(mlx4_tlock(dev));
  1079. return err;
  1080. }
  1081. static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
  1082. enum res_qp_states state, struct res_qp **qp,
  1083. int alloc)
  1084. {
  1085. struct mlx4_priv *priv = mlx4_priv(dev);
  1086. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1087. struct res_qp *r;
  1088. int err = 0;
  1089. spin_lock_irq(mlx4_tlock(dev));
  1090. r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
  1091. if (!r)
  1092. err = -ENOENT;
  1093. else if (r->com.owner != slave)
  1094. err = -EPERM;
  1095. else {
  1096. switch (state) {
  1097. case RES_QP_BUSY:
  1098. mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
  1099. __func__, r->com.res_id);
  1100. err = -EBUSY;
  1101. break;
  1102. case RES_QP_RESERVED:
  1103. if (r->com.state == RES_QP_MAPPED && !alloc)
  1104. break;
  1105. mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
  1106. err = -EINVAL;
  1107. break;
  1108. case RES_QP_MAPPED:
  1109. if ((r->com.state == RES_QP_RESERVED && alloc) ||
  1110. r->com.state == RES_QP_HW)
  1111. break;
  1112. else {
  1113. mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
  1114. r->com.res_id);
  1115. err = -EINVAL;
  1116. }
  1117. break;
  1118. case RES_QP_HW:
  1119. if (r->com.state != RES_QP_MAPPED)
  1120. err = -EINVAL;
  1121. break;
  1122. default:
  1123. err = -EINVAL;
  1124. }
  1125. if (!err) {
  1126. r->com.from_state = r->com.state;
  1127. r->com.to_state = state;
  1128. r->com.state = RES_QP_BUSY;
  1129. if (qp)
  1130. *qp = r;
  1131. }
  1132. }
  1133. spin_unlock_irq(mlx4_tlock(dev));
  1134. return err;
  1135. }
  1136. static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
  1137. enum res_mpt_states state, struct res_mpt **mpt)
  1138. {
  1139. struct mlx4_priv *priv = mlx4_priv(dev);
  1140. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1141. struct res_mpt *r;
  1142. int err = 0;
  1143. spin_lock_irq(mlx4_tlock(dev));
  1144. r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
  1145. if (!r)
  1146. err = -ENOENT;
  1147. else if (r->com.owner != slave)
  1148. err = -EPERM;
  1149. else {
  1150. switch (state) {
  1151. case RES_MPT_BUSY:
  1152. err = -EINVAL;
  1153. break;
  1154. case RES_MPT_RESERVED:
  1155. if (r->com.state != RES_MPT_MAPPED)
  1156. err = -EINVAL;
  1157. break;
  1158. case RES_MPT_MAPPED:
  1159. if (r->com.state != RES_MPT_RESERVED &&
  1160. r->com.state != RES_MPT_HW)
  1161. err = -EINVAL;
  1162. break;
  1163. case RES_MPT_HW:
  1164. if (r->com.state != RES_MPT_MAPPED)
  1165. err = -EINVAL;
  1166. break;
  1167. default:
  1168. err = -EINVAL;
  1169. }
  1170. if (!err) {
  1171. r->com.from_state = r->com.state;
  1172. r->com.to_state = state;
  1173. r->com.state = RES_MPT_BUSY;
  1174. if (mpt)
  1175. *mpt = r;
  1176. }
  1177. }
  1178. spin_unlock_irq(mlx4_tlock(dev));
  1179. return err;
  1180. }
  1181. static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
  1182. enum res_eq_states state, struct res_eq **eq)
  1183. {
  1184. struct mlx4_priv *priv = mlx4_priv(dev);
  1185. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1186. struct res_eq *r;
  1187. int err = 0;
  1188. spin_lock_irq(mlx4_tlock(dev));
  1189. r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
  1190. if (!r)
  1191. err = -ENOENT;
  1192. else if (r->com.owner != slave)
  1193. err = -EPERM;
  1194. else {
  1195. switch (state) {
  1196. case RES_EQ_BUSY:
  1197. err = -EINVAL;
  1198. break;
  1199. case RES_EQ_RESERVED:
  1200. if (r->com.state != RES_EQ_HW)
  1201. err = -EINVAL;
  1202. break;
  1203. case RES_EQ_HW:
  1204. if (r->com.state != RES_EQ_RESERVED)
  1205. err = -EINVAL;
  1206. break;
  1207. default:
  1208. err = -EINVAL;
  1209. }
  1210. if (!err) {
  1211. r->com.from_state = r->com.state;
  1212. r->com.to_state = state;
  1213. r->com.state = RES_EQ_BUSY;
  1214. if (eq)
  1215. *eq = r;
  1216. }
  1217. }
  1218. spin_unlock_irq(mlx4_tlock(dev));
  1219. return err;
  1220. }
  1221. static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
  1222. enum res_cq_states state, struct res_cq **cq)
  1223. {
  1224. struct mlx4_priv *priv = mlx4_priv(dev);
  1225. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1226. struct res_cq *r;
  1227. int err;
  1228. spin_lock_irq(mlx4_tlock(dev));
  1229. r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
  1230. if (!r) {
  1231. err = -ENOENT;
  1232. } else if (r->com.owner != slave) {
  1233. err = -EPERM;
  1234. } else if (state == RES_CQ_ALLOCATED) {
  1235. if (r->com.state != RES_CQ_HW)
  1236. err = -EINVAL;
  1237. else if (atomic_read(&r->ref_count))
  1238. err = -EBUSY;
  1239. else
  1240. err = 0;
  1241. } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
  1242. err = -EINVAL;
  1243. } else {
  1244. err = 0;
  1245. }
  1246. if (!err) {
  1247. r->com.from_state = r->com.state;
  1248. r->com.to_state = state;
  1249. r->com.state = RES_CQ_BUSY;
  1250. if (cq)
  1251. *cq = r;
  1252. }
  1253. spin_unlock_irq(mlx4_tlock(dev));
  1254. return err;
  1255. }
  1256. static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
  1257. enum res_srq_states state, struct res_srq **srq)
  1258. {
  1259. struct mlx4_priv *priv = mlx4_priv(dev);
  1260. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1261. struct res_srq *r;
  1262. int err = 0;
  1263. spin_lock_irq(mlx4_tlock(dev));
  1264. r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
  1265. if (!r) {
  1266. err = -ENOENT;
  1267. } else if (r->com.owner != slave) {
  1268. err = -EPERM;
  1269. } else if (state == RES_SRQ_ALLOCATED) {
  1270. if (r->com.state != RES_SRQ_HW)
  1271. err = -EINVAL;
  1272. else if (atomic_read(&r->ref_count))
  1273. err = -EBUSY;
  1274. } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
  1275. err = -EINVAL;
  1276. }
  1277. if (!err) {
  1278. r->com.from_state = r->com.state;
  1279. r->com.to_state = state;
  1280. r->com.state = RES_SRQ_BUSY;
  1281. if (srq)
  1282. *srq = r;
  1283. }
  1284. spin_unlock_irq(mlx4_tlock(dev));
  1285. return err;
  1286. }
  1287. static void res_abort_move(struct mlx4_dev *dev, int slave,
  1288. enum mlx4_resource type, int id)
  1289. {
  1290. struct mlx4_priv *priv = mlx4_priv(dev);
  1291. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1292. struct res_common *r;
  1293. spin_lock_irq(mlx4_tlock(dev));
  1294. r = res_tracker_lookup(&tracker->res_tree[type], id);
  1295. if (r && (r->owner == slave))
  1296. r->state = r->from_state;
  1297. spin_unlock_irq(mlx4_tlock(dev));
  1298. }
  1299. static void res_end_move(struct mlx4_dev *dev, int slave,
  1300. enum mlx4_resource type, int id)
  1301. {
  1302. struct mlx4_priv *priv = mlx4_priv(dev);
  1303. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1304. struct res_common *r;
  1305. spin_lock_irq(mlx4_tlock(dev));
  1306. r = res_tracker_lookup(&tracker->res_tree[type], id);
  1307. if (r && (r->owner == slave))
  1308. r->state = r->to_state;
  1309. spin_unlock_irq(mlx4_tlock(dev));
  1310. }
  1311. static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
  1312. {
  1313. return mlx4_is_qp_reserved(dev, qpn) &&
  1314. (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
  1315. }
  1316. static int fw_reserved(struct mlx4_dev *dev, int qpn)
  1317. {
  1318. return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
  1319. }
  1320. static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1321. u64 in_param, u64 *out_param)
  1322. {
  1323. int err;
  1324. int count;
  1325. int align;
  1326. int base;
  1327. int qpn;
  1328. u8 flags;
  1329. switch (op) {
  1330. case RES_OP_RESERVE:
  1331. count = get_param_l(&in_param) & 0xffffff;
  1332. /* Turn off all unsupported QP allocation flags that the
  1333. * slave tries to set.
  1334. */
  1335. flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
  1336. align = get_param_h(&in_param);
  1337. err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
  1338. if (err)
  1339. return err;
  1340. err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
  1341. if (err) {
  1342. mlx4_release_resource(dev, slave, RES_QP, count, 0);
  1343. return err;
  1344. }
  1345. err = add_res_range(dev, slave, base, count, RES_QP, 0);
  1346. if (err) {
  1347. mlx4_release_resource(dev, slave, RES_QP, count, 0);
  1348. __mlx4_qp_release_range(dev, base, count);
  1349. return err;
  1350. }
  1351. set_param_l(out_param, base);
  1352. break;
  1353. case RES_OP_MAP_ICM:
  1354. qpn = get_param_l(&in_param) & 0x7fffff;
  1355. if (valid_reserved(dev, slave, qpn)) {
  1356. err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
  1357. if (err)
  1358. return err;
  1359. }
  1360. err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
  1361. NULL, 1);
  1362. if (err)
  1363. return err;
  1364. if (!fw_reserved(dev, qpn)) {
  1365. err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
  1366. if (err) {
  1367. res_abort_move(dev, slave, RES_QP, qpn);
  1368. return err;
  1369. }
  1370. }
  1371. res_end_move(dev, slave, RES_QP, qpn);
  1372. break;
  1373. default:
  1374. err = -EINVAL;
  1375. break;
  1376. }
  1377. return err;
  1378. }
  1379. static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1380. u64 in_param, u64 *out_param)
  1381. {
  1382. int err = -EINVAL;
  1383. int base;
  1384. int order;
  1385. if (op != RES_OP_RESERVE_AND_MAP)
  1386. return err;
  1387. order = get_param_l(&in_param);
  1388. err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
  1389. if (err)
  1390. return err;
  1391. base = __mlx4_alloc_mtt_range(dev, order);
  1392. if (base == -1) {
  1393. mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
  1394. return -ENOMEM;
  1395. }
  1396. err = add_res_range(dev, slave, base, 1, RES_MTT, order);
  1397. if (err) {
  1398. mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
  1399. __mlx4_free_mtt_range(dev, base, order);
  1400. } else {
  1401. set_param_l(out_param, base);
  1402. }
  1403. return err;
  1404. }
  1405. static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1406. u64 in_param, u64 *out_param)
  1407. {
  1408. int err = -EINVAL;
  1409. int index;
  1410. int id;
  1411. struct res_mpt *mpt;
  1412. switch (op) {
  1413. case RES_OP_RESERVE:
  1414. err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
  1415. if (err)
  1416. break;
  1417. index = __mlx4_mpt_reserve(dev);
  1418. if (index == -1) {
  1419. mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
  1420. break;
  1421. }
  1422. id = index & mpt_mask(dev);
  1423. err = add_res_range(dev, slave, id, 1, RES_MPT, index);
  1424. if (err) {
  1425. mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
  1426. __mlx4_mpt_release(dev, index);
  1427. break;
  1428. }
  1429. set_param_l(out_param, index);
  1430. break;
  1431. case RES_OP_MAP_ICM:
  1432. index = get_param_l(&in_param);
  1433. id = index & mpt_mask(dev);
  1434. err = mr_res_start_move_to(dev, slave, id,
  1435. RES_MPT_MAPPED, &mpt);
  1436. if (err)
  1437. return err;
  1438. err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
  1439. if (err) {
  1440. res_abort_move(dev, slave, RES_MPT, id);
  1441. return err;
  1442. }
  1443. res_end_move(dev, slave, RES_MPT, id);
  1444. break;
  1445. }
  1446. return err;
  1447. }
  1448. static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1449. u64 in_param, u64 *out_param)
  1450. {
  1451. int cqn;
  1452. int err;
  1453. switch (op) {
  1454. case RES_OP_RESERVE_AND_MAP:
  1455. err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
  1456. if (err)
  1457. break;
  1458. err = __mlx4_cq_alloc_icm(dev, &cqn);
  1459. if (err) {
  1460. mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
  1461. break;
  1462. }
  1463. err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
  1464. if (err) {
  1465. mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
  1466. __mlx4_cq_free_icm(dev, cqn);
  1467. break;
  1468. }
  1469. set_param_l(out_param, cqn);
  1470. break;
  1471. default:
  1472. err = -EINVAL;
  1473. }
  1474. return err;
  1475. }
  1476. static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1477. u64 in_param, u64 *out_param)
  1478. {
  1479. int srqn;
  1480. int err;
  1481. switch (op) {
  1482. case RES_OP_RESERVE_AND_MAP:
  1483. err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
  1484. if (err)
  1485. break;
  1486. err = __mlx4_srq_alloc_icm(dev, &srqn);
  1487. if (err) {
  1488. mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
  1489. break;
  1490. }
  1491. err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
  1492. if (err) {
  1493. mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
  1494. __mlx4_srq_free_icm(dev, srqn);
  1495. break;
  1496. }
  1497. set_param_l(out_param, srqn);
  1498. break;
  1499. default:
  1500. err = -EINVAL;
  1501. }
  1502. return err;
  1503. }
  1504. static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
  1505. u8 smac_index, u64 *mac)
  1506. {
  1507. struct mlx4_priv *priv = mlx4_priv(dev);
  1508. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1509. struct list_head *mac_list =
  1510. &tracker->slave_list[slave].res_list[RES_MAC];
  1511. struct mac_res *res, *tmp;
  1512. list_for_each_entry_safe(res, tmp, mac_list, list) {
  1513. if (res->smac_index == smac_index && res->port == (u8) port) {
  1514. *mac = res->mac;
  1515. return 0;
  1516. }
  1517. }
  1518. return -ENOENT;
  1519. }
  1520. static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
  1521. {
  1522. struct mlx4_priv *priv = mlx4_priv(dev);
  1523. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1524. struct list_head *mac_list =
  1525. &tracker->slave_list[slave].res_list[RES_MAC];
  1526. struct mac_res *res, *tmp;
  1527. list_for_each_entry_safe(res, tmp, mac_list, list) {
  1528. if (res->mac == mac && res->port == (u8) port) {
  1529. /* mac found. update ref count */
  1530. ++res->ref_count;
  1531. return 0;
  1532. }
  1533. }
  1534. if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
  1535. return -EINVAL;
  1536. res = kzalloc(sizeof *res, GFP_KERNEL);
  1537. if (!res) {
  1538. mlx4_release_resource(dev, slave, RES_MAC, 1, port);
  1539. return -ENOMEM;
  1540. }
  1541. res->mac = mac;
  1542. res->port = (u8) port;
  1543. res->smac_index = smac_index;
  1544. res->ref_count = 1;
  1545. list_add_tail(&res->list,
  1546. &tracker->slave_list[slave].res_list[RES_MAC]);
  1547. return 0;
  1548. }
  1549. static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
  1550. int port)
  1551. {
  1552. struct mlx4_priv *priv = mlx4_priv(dev);
  1553. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1554. struct list_head *mac_list =
  1555. &tracker->slave_list[slave].res_list[RES_MAC];
  1556. struct mac_res *res, *tmp;
  1557. list_for_each_entry_safe(res, tmp, mac_list, list) {
  1558. if (res->mac == mac && res->port == (u8) port) {
  1559. if (!--res->ref_count) {
  1560. list_del(&res->list);
  1561. mlx4_release_resource(dev, slave, RES_MAC, 1, port);
  1562. kfree(res);
  1563. }
  1564. break;
  1565. }
  1566. }
  1567. }
  1568. static void rem_slave_macs(struct mlx4_dev *dev, int slave)
  1569. {
  1570. struct mlx4_priv *priv = mlx4_priv(dev);
  1571. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1572. struct list_head *mac_list =
  1573. &tracker->slave_list[slave].res_list[RES_MAC];
  1574. struct mac_res *res, *tmp;
  1575. int i;
  1576. list_for_each_entry_safe(res, tmp, mac_list, list) {
  1577. list_del(&res->list);
  1578. /* dereference the mac the num times the slave referenced it */
  1579. for (i = 0; i < res->ref_count; i++)
  1580. __mlx4_unregister_mac(dev, res->port, res->mac);
  1581. mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
  1582. kfree(res);
  1583. }
  1584. }
  1585. static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1586. u64 in_param, u64 *out_param, int in_port)
  1587. {
  1588. int err = -EINVAL;
  1589. int port;
  1590. u64 mac;
  1591. u8 smac_index;
  1592. if (op != RES_OP_RESERVE_AND_MAP)
  1593. return err;
  1594. port = !in_port ? get_param_l(out_param) : in_port;
  1595. port = mlx4_slave_convert_port(
  1596. dev, slave, port);
  1597. if (port < 0)
  1598. return -EINVAL;
  1599. mac = in_param;
  1600. err = __mlx4_register_mac(dev, port, mac);
  1601. if (err >= 0) {
  1602. smac_index = err;
  1603. set_param_l(out_param, err);
  1604. err = 0;
  1605. }
  1606. if (!err) {
  1607. err = mac_add_to_slave(dev, slave, mac, port, smac_index);
  1608. if (err)
  1609. __mlx4_unregister_mac(dev, port, mac);
  1610. }
  1611. return err;
  1612. }
  1613. static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
  1614. int port, int vlan_index)
  1615. {
  1616. struct mlx4_priv *priv = mlx4_priv(dev);
  1617. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1618. struct list_head *vlan_list =
  1619. &tracker->slave_list[slave].res_list[RES_VLAN];
  1620. struct vlan_res *res, *tmp;
  1621. list_for_each_entry_safe(res, tmp, vlan_list, list) {
  1622. if (res->vlan == vlan && res->port == (u8) port) {
  1623. /* vlan found. update ref count */
  1624. ++res->ref_count;
  1625. return 0;
  1626. }
  1627. }
  1628. if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
  1629. return -EINVAL;
  1630. res = kzalloc(sizeof(*res), GFP_KERNEL);
  1631. if (!res) {
  1632. mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
  1633. return -ENOMEM;
  1634. }
  1635. res->vlan = vlan;
  1636. res->port = (u8) port;
  1637. res->vlan_index = vlan_index;
  1638. res->ref_count = 1;
  1639. list_add_tail(&res->list,
  1640. &tracker->slave_list[slave].res_list[RES_VLAN]);
  1641. return 0;
  1642. }
  1643. static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
  1644. int port)
  1645. {
  1646. struct mlx4_priv *priv = mlx4_priv(dev);
  1647. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1648. struct list_head *vlan_list =
  1649. &tracker->slave_list[slave].res_list[RES_VLAN];
  1650. struct vlan_res *res, *tmp;
  1651. list_for_each_entry_safe(res, tmp, vlan_list, list) {
  1652. if (res->vlan == vlan && res->port == (u8) port) {
  1653. if (!--res->ref_count) {
  1654. list_del(&res->list);
  1655. mlx4_release_resource(dev, slave, RES_VLAN,
  1656. 1, port);
  1657. kfree(res);
  1658. }
  1659. break;
  1660. }
  1661. }
  1662. }
  1663. static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
  1664. {
  1665. struct mlx4_priv *priv = mlx4_priv(dev);
  1666. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1667. struct list_head *vlan_list =
  1668. &tracker->slave_list[slave].res_list[RES_VLAN];
  1669. struct vlan_res *res, *tmp;
  1670. int i;
  1671. list_for_each_entry_safe(res, tmp, vlan_list, list) {
  1672. list_del(&res->list);
  1673. /* dereference the vlan the num times the slave referenced it */
  1674. for (i = 0; i < res->ref_count; i++)
  1675. __mlx4_unregister_vlan(dev, res->port, res->vlan);
  1676. mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
  1677. kfree(res);
  1678. }
  1679. }
  1680. static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1681. u64 in_param, u64 *out_param, int in_port)
  1682. {
  1683. struct mlx4_priv *priv = mlx4_priv(dev);
  1684. struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
  1685. int err;
  1686. u16 vlan;
  1687. int vlan_index;
  1688. int port;
  1689. port = !in_port ? get_param_l(out_param) : in_port;
  1690. if (!port || op != RES_OP_RESERVE_AND_MAP)
  1691. return -EINVAL;
  1692. port = mlx4_slave_convert_port(
  1693. dev, slave, port);
  1694. if (port < 0)
  1695. return -EINVAL;
  1696. /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
  1697. if (!in_port && port > 0 && port <= dev->caps.num_ports) {
  1698. slave_state[slave].old_vlan_api = true;
  1699. return 0;
  1700. }
  1701. vlan = (u16) in_param;
  1702. err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
  1703. if (!err) {
  1704. set_param_l(out_param, (u32) vlan_index);
  1705. err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
  1706. if (err)
  1707. __mlx4_unregister_vlan(dev, port, vlan);
  1708. }
  1709. return err;
  1710. }
  1711. static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1712. u64 in_param, u64 *out_param)
  1713. {
  1714. u32 index;
  1715. int err;
  1716. if (op != RES_OP_RESERVE)
  1717. return -EINVAL;
  1718. err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
  1719. if (err)
  1720. return err;
  1721. err = __mlx4_counter_alloc(dev, &index);
  1722. if (err) {
  1723. mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
  1724. return err;
  1725. }
  1726. err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
  1727. if (err) {
  1728. __mlx4_counter_free(dev, index);
  1729. mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
  1730. } else {
  1731. set_param_l(out_param, index);
  1732. }
  1733. return err;
  1734. }
  1735. static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1736. u64 in_param, u64 *out_param)
  1737. {
  1738. u32 xrcdn;
  1739. int err;
  1740. if (op != RES_OP_RESERVE)
  1741. return -EINVAL;
  1742. err = __mlx4_xrcd_alloc(dev, &xrcdn);
  1743. if (err)
  1744. return err;
  1745. err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
  1746. if (err)
  1747. __mlx4_xrcd_free(dev, xrcdn);
  1748. else
  1749. set_param_l(out_param, xrcdn);
  1750. return err;
  1751. }
  1752. int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
  1753. struct mlx4_vhcr *vhcr,
  1754. struct mlx4_cmd_mailbox *inbox,
  1755. struct mlx4_cmd_mailbox *outbox,
  1756. struct mlx4_cmd_info *cmd)
  1757. {
  1758. int err;
  1759. int alop = vhcr->op_modifier;
  1760. switch (vhcr->in_modifier & 0xFF) {
  1761. case RES_QP:
  1762. err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1763. vhcr->in_param, &vhcr->out_param);
  1764. break;
  1765. case RES_MTT:
  1766. err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1767. vhcr->in_param, &vhcr->out_param);
  1768. break;
  1769. case RES_MPT:
  1770. err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1771. vhcr->in_param, &vhcr->out_param);
  1772. break;
  1773. case RES_CQ:
  1774. err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1775. vhcr->in_param, &vhcr->out_param);
  1776. break;
  1777. case RES_SRQ:
  1778. err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1779. vhcr->in_param, &vhcr->out_param);
  1780. break;
  1781. case RES_MAC:
  1782. err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1783. vhcr->in_param, &vhcr->out_param,
  1784. (vhcr->in_modifier >> 8) & 0xFF);
  1785. break;
  1786. case RES_VLAN:
  1787. err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1788. vhcr->in_param, &vhcr->out_param,
  1789. (vhcr->in_modifier >> 8) & 0xFF);
  1790. break;
  1791. case RES_COUNTER:
  1792. err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1793. vhcr->in_param, &vhcr->out_param);
  1794. break;
  1795. case RES_XRCD:
  1796. err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1797. vhcr->in_param, &vhcr->out_param);
  1798. break;
  1799. default:
  1800. err = -EINVAL;
  1801. break;
  1802. }
  1803. return err;
  1804. }
  1805. static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1806. u64 in_param)
  1807. {
  1808. int err;
  1809. int count;
  1810. int base;
  1811. int qpn;
  1812. switch (op) {
  1813. case RES_OP_RESERVE:
  1814. base = get_param_l(&in_param) & 0x7fffff;
  1815. count = get_param_h(&in_param);
  1816. err = rem_res_range(dev, slave, base, count, RES_QP, 0);
  1817. if (err)
  1818. break;
  1819. mlx4_release_resource(dev, slave, RES_QP, count, 0);
  1820. __mlx4_qp_release_range(dev, base, count);
  1821. break;
  1822. case RES_OP_MAP_ICM:
  1823. qpn = get_param_l(&in_param) & 0x7fffff;
  1824. err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
  1825. NULL, 0);
  1826. if (err)
  1827. return err;
  1828. if (!fw_reserved(dev, qpn))
  1829. __mlx4_qp_free_icm(dev, qpn);
  1830. res_end_move(dev, slave, RES_QP, qpn);
  1831. if (valid_reserved(dev, slave, qpn))
  1832. err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
  1833. break;
  1834. default:
  1835. err = -EINVAL;
  1836. break;
  1837. }
  1838. return err;
  1839. }
  1840. static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1841. u64 in_param, u64 *out_param)
  1842. {
  1843. int err = -EINVAL;
  1844. int base;
  1845. int order;
  1846. if (op != RES_OP_RESERVE_AND_MAP)
  1847. return err;
  1848. base = get_param_l(&in_param);
  1849. order = get_param_h(&in_param);
  1850. err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
  1851. if (!err) {
  1852. mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
  1853. __mlx4_free_mtt_range(dev, base, order);
  1854. }
  1855. return err;
  1856. }
  1857. static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1858. u64 in_param)
  1859. {
  1860. int err = -EINVAL;
  1861. int index;
  1862. int id;
  1863. struct res_mpt *mpt;
  1864. switch (op) {
  1865. case RES_OP_RESERVE:
  1866. index = get_param_l(&in_param);
  1867. id = index & mpt_mask(dev);
  1868. err = get_res(dev, slave, id, RES_MPT, &mpt);
  1869. if (err)
  1870. break;
  1871. index = mpt->key;
  1872. put_res(dev, slave, id, RES_MPT);
  1873. err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
  1874. if (err)
  1875. break;
  1876. mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
  1877. __mlx4_mpt_release(dev, index);
  1878. break;
  1879. case RES_OP_MAP_ICM:
  1880. index = get_param_l(&in_param);
  1881. id = index & mpt_mask(dev);
  1882. err = mr_res_start_move_to(dev, slave, id,
  1883. RES_MPT_RESERVED, &mpt);
  1884. if (err)
  1885. return err;
  1886. __mlx4_mpt_free_icm(dev, mpt->key);
  1887. res_end_move(dev, slave, RES_MPT, id);
  1888. return err;
  1889. break;
  1890. default:
  1891. err = -EINVAL;
  1892. break;
  1893. }
  1894. return err;
  1895. }
  1896. static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1897. u64 in_param, u64 *out_param)
  1898. {
  1899. int cqn;
  1900. int err;
  1901. switch (op) {
  1902. case RES_OP_RESERVE_AND_MAP:
  1903. cqn = get_param_l(&in_param);
  1904. err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
  1905. if (err)
  1906. break;
  1907. mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
  1908. __mlx4_cq_free_icm(dev, cqn);
  1909. break;
  1910. default:
  1911. err = -EINVAL;
  1912. break;
  1913. }
  1914. return err;
  1915. }
  1916. static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1917. u64 in_param, u64 *out_param)
  1918. {
  1919. int srqn;
  1920. int err;
  1921. switch (op) {
  1922. case RES_OP_RESERVE_AND_MAP:
  1923. srqn = get_param_l(&in_param);
  1924. err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
  1925. if (err)
  1926. break;
  1927. mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
  1928. __mlx4_srq_free_icm(dev, srqn);
  1929. break;
  1930. default:
  1931. err = -EINVAL;
  1932. break;
  1933. }
  1934. return err;
  1935. }
  1936. static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1937. u64 in_param, u64 *out_param, int in_port)
  1938. {
  1939. int port;
  1940. int err = 0;
  1941. switch (op) {
  1942. case RES_OP_RESERVE_AND_MAP:
  1943. port = !in_port ? get_param_l(out_param) : in_port;
  1944. port = mlx4_slave_convert_port(
  1945. dev, slave, port);
  1946. if (port < 0)
  1947. return -EINVAL;
  1948. mac_del_from_slave(dev, slave, in_param, port);
  1949. __mlx4_unregister_mac(dev, port, in_param);
  1950. break;
  1951. default:
  1952. err = -EINVAL;
  1953. break;
  1954. }
  1955. return err;
  1956. }
  1957. static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1958. u64 in_param, u64 *out_param, int port)
  1959. {
  1960. struct mlx4_priv *priv = mlx4_priv(dev);
  1961. struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
  1962. int err = 0;
  1963. port = mlx4_slave_convert_port(
  1964. dev, slave, port);
  1965. if (port < 0)
  1966. return -EINVAL;
  1967. switch (op) {
  1968. case RES_OP_RESERVE_AND_MAP:
  1969. if (slave_state[slave].old_vlan_api)
  1970. return 0;
  1971. if (!port)
  1972. return -EINVAL;
  1973. vlan_del_from_slave(dev, slave, in_param, port);
  1974. __mlx4_unregister_vlan(dev, port, in_param);
  1975. break;
  1976. default:
  1977. err = -EINVAL;
  1978. break;
  1979. }
  1980. return err;
  1981. }
  1982. static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1983. u64 in_param, u64 *out_param)
  1984. {
  1985. int index;
  1986. int err;
  1987. if (op != RES_OP_RESERVE)
  1988. return -EINVAL;
  1989. index = get_param_l(&in_param);
  1990. err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
  1991. if (err)
  1992. return err;
  1993. __mlx4_counter_free(dev, index);
  1994. mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
  1995. return err;
  1996. }
  1997. static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1998. u64 in_param, u64 *out_param)
  1999. {
  2000. int xrcdn;
  2001. int err;
  2002. if (op != RES_OP_RESERVE)
  2003. return -EINVAL;
  2004. xrcdn = get_param_l(&in_param);
  2005. err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
  2006. if (err)
  2007. return err;
  2008. __mlx4_xrcd_free(dev, xrcdn);
  2009. return err;
  2010. }
  2011. int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
  2012. struct mlx4_vhcr *vhcr,
  2013. struct mlx4_cmd_mailbox *inbox,
  2014. struct mlx4_cmd_mailbox *outbox,
  2015. struct mlx4_cmd_info *cmd)
  2016. {
  2017. int err = -EINVAL;
  2018. int alop = vhcr->op_modifier;
  2019. switch (vhcr->in_modifier & 0xFF) {
  2020. case RES_QP:
  2021. err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
  2022. vhcr->in_param);
  2023. break;
  2024. case RES_MTT:
  2025. err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
  2026. vhcr->in_param, &vhcr->out_param);
  2027. break;
  2028. case RES_MPT:
  2029. err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
  2030. vhcr->in_param);
  2031. break;
  2032. case RES_CQ:
  2033. err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
  2034. vhcr->in_param, &vhcr->out_param);
  2035. break;
  2036. case RES_SRQ:
  2037. err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
  2038. vhcr->in_param, &vhcr->out_param);
  2039. break;
  2040. case RES_MAC:
  2041. err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
  2042. vhcr->in_param, &vhcr->out_param,
  2043. (vhcr->in_modifier >> 8) & 0xFF);
  2044. break;
  2045. case RES_VLAN:
  2046. err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
  2047. vhcr->in_param, &vhcr->out_param,
  2048. (vhcr->in_modifier >> 8) & 0xFF);
  2049. break;
  2050. case RES_COUNTER:
  2051. err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
  2052. vhcr->in_param, &vhcr->out_param);
  2053. break;
  2054. case RES_XRCD:
  2055. err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
  2056. vhcr->in_param, &vhcr->out_param);
  2057. default:
  2058. break;
  2059. }
  2060. return err;
  2061. }
  2062. /* ugly but other choices are uglier */
  2063. static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
  2064. {
  2065. return (be32_to_cpu(mpt->flags) >> 9) & 1;
  2066. }
  2067. static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
  2068. {
  2069. return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
  2070. }
  2071. static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
  2072. {
  2073. return be32_to_cpu(mpt->mtt_sz);
  2074. }
  2075. static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
  2076. {
  2077. return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
  2078. }
  2079. static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
  2080. {
  2081. return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
  2082. }
  2083. static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
  2084. {
  2085. return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
  2086. }
  2087. static int mr_is_region(struct mlx4_mpt_entry *mpt)
  2088. {
  2089. return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
  2090. }
  2091. static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
  2092. {
  2093. return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
  2094. }
  2095. static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
  2096. {
  2097. return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
  2098. }
  2099. static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
  2100. {
  2101. int page_shift = (qpc->log_page_size & 0x3f) + 12;
  2102. int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
  2103. int log_sq_sride = qpc->sq_size_stride & 7;
  2104. int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
  2105. int log_rq_stride = qpc->rq_size_stride & 7;
  2106. int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
  2107. int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
  2108. u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
  2109. int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
  2110. int sq_size;
  2111. int rq_size;
  2112. int total_pages;
  2113. int total_mem;
  2114. int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
  2115. sq_size = 1 << (log_sq_size + log_sq_sride + 4);
  2116. rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
  2117. total_mem = sq_size + rq_size;
  2118. total_pages =
  2119. roundup_pow_of_two((total_mem + (page_offset << 6)) >>
  2120. page_shift);
  2121. return total_pages;
  2122. }
  2123. static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
  2124. int size, struct res_mtt *mtt)
  2125. {
  2126. int res_start = mtt->com.res_id;
  2127. int res_size = (1 << mtt->order);
  2128. if (start < res_start || start + size > res_start + res_size)
  2129. return -EPERM;
  2130. return 0;
  2131. }
  2132. int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
  2133. struct mlx4_vhcr *vhcr,
  2134. struct mlx4_cmd_mailbox *inbox,
  2135. struct mlx4_cmd_mailbox *outbox,
  2136. struct mlx4_cmd_info *cmd)
  2137. {
  2138. int err;
  2139. int index = vhcr->in_modifier;
  2140. struct res_mtt *mtt;
  2141. struct res_mpt *mpt;
  2142. int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
  2143. int phys;
  2144. int id;
  2145. u32 pd;
  2146. int pd_slave;
  2147. id = index & mpt_mask(dev);
  2148. err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
  2149. if (err)
  2150. return err;
  2151. /* Disable memory windows for VFs. */
  2152. if (!mr_is_region(inbox->buf)) {
  2153. err = -EPERM;
  2154. goto ex_abort;
  2155. }
  2156. /* Make sure that the PD bits related to the slave id are zeros. */
  2157. pd = mr_get_pd(inbox->buf);
  2158. pd_slave = (pd >> 17) & 0x7f;
  2159. if (pd_slave != 0 && pd_slave != slave) {
  2160. err = -EPERM;
  2161. goto ex_abort;
  2162. }
  2163. if (mr_is_fmr(inbox->buf)) {
  2164. /* FMR and Bind Enable are forbidden in slave devices. */
  2165. if (mr_is_bind_enabled(inbox->buf)) {
  2166. err = -EPERM;
  2167. goto ex_abort;
  2168. }
  2169. /* FMR and Memory Windows are also forbidden. */
  2170. if (!mr_is_region(inbox->buf)) {
  2171. err = -EPERM;
  2172. goto ex_abort;
  2173. }
  2174. }
  2175. phys = mr_phys_mpt(inbox->buf);
  2176. if (!phys) {
  2177. err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
  2178. if (err)
  2179. goto ex_abort;
  2180. err = check_mtt_range(dev, slave, mtt_base,
  2181. mr_get_mtt_size(inbox->buf), mtt);
  2182. if (err)
  2183. goto ex_put;
  2184. mpt->mtt = mtt;
  2185. }
  2186. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2187. if (err)
  2188. goto ex_put;
  2189. if (!phys) {
  2190. atomic_inc(&mtt->ref_count);
  2191. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2192. }
  2193. res_end_move(dev, slave, RES_MPT, id);
  2194. return 0;
  2195. ex_put:
  2196. if (!phys)
  2197. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2198. ex_abort:
  2199. res_abort_move(dev, slave, RES_MPT, id);
  2200. return err;
  2201. }
  2202. int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
  2203. struct mlx4_vhcr *vhcr,
  2204. struct mlx4_cmd_mailbox *inbox,
  2205. struct mlx4_cmd_mailbox *outbox,
  2206. struct mlx4_cmd_info *cmd)
  2207. {
  2208. int err;
  2209. int index = vhcr->in_modifier;
  2210. struct res_mpt *mpt;
  2211. int id;
  2212. id = index & mpt_mask(dev);
  2213. err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
  2214. if (err)
  2215. return err;
  2216. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2217. if (err)
  2218. goto ex_abort;
  2219. if (mpt->mtt)
  2220. atomic_dec(&mpt->mtt->ref_count);
  2221. res_end_move(dev, slave, RES_MPT, id);
  2222. return 0;
  2223. ex_abort:
  2224. res_abort_move(dev, slave, RES_MPT, id);
  2225. return err;
  2226. }
  2227. int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
  2228. struct mlx4_vhcr *vhcr,
  2229. struct mlx4_cmd_mailbox *inbox,
  2230. struct mlx4_cmd_mailbox *outbox,
  2231. struct mlx4_cmd_info *cmd)
  2232. {
  2233. int err;
  2234. int index = vhcr->in_modifier;
  2235. struct res_mpt *mpt;
  2236. int id;
  2237. id = index & mpt_mask(dev);
  2238. err = get_res(dev, slave, id, RES_MPT, &mpt);
  2239. if (err)
  2240. return err;
  2241. if (mpt->com.from_state == RES_MPT_MAPPED) {
  2242. /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
  2243. * that, the VF must read the MPT. But since the MPT entry memory is not
  2244. * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
  2245. * entry contents. To guarantee that the MPT cannot be changed, the driver
  2246. * must perform HW2SW_MPT before this query and return the MPT entry to HW
  2247. * ownership fofollowing the change. The change here allows the VF to
  2248. * perform QUERY_MPT also when the entry is in SW ownership.
  2249. */
  2250. struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
  2251. &mlx4_priv(dev)->mr_table.dmpt_table,
  2252. mpt->key, NULL);
  2253. if (NULL == mpt_entry || NULL == outbox->buf) {
  2254. err = -EINVAL;
  2255. goto out;
  2256. }
  2257. memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
  2258. err = 0;
  2259. } else if (mpt->com.from_state == RES_MPT_HW) {
  2260. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2261. } else {
  2262. err = -EBUSY;
  2263. goto out;
  2264. }
  2265. out:
  2266. put_res(dev, slave, id, RES_MPT);
  2267. return err;
  2268. }
  2269. static int qp_get_rcqn(struct mlx4_qp_context *qpc)
  2270. {
  2271. return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
  2272. }
  2273. static int qp_get_scqn(struct mlx4_qp_context *qpc)
  2274. {
  2275. return be32_to_cpu(qpc->cqn_send) & 0xffffff;
  2276. }
  2277. static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
  2278. {
  2279. return be32_to_cpu(qpc->srqn) & 0x1ffffff;
  2280. }
  2281. static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
  2282. struct mlx4_qp_context *context)
  2283. {
  2284. u32 qpn = vhcr->in_modifier & 0xffffff;
  2285. u32 qkey = 0;
  2286. if (mlx4_get_parav_qkey(dev, qpn, &qkey))
  2287. return;
  2288. /* adjust qkey in qp context */
  2289. context->qkey = cpu_to_be32(qkey);
  2290. }
  2291. int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
  2292. struct mlx4_vhcr *vhcr,
  2293. struct mlx4_cmd_mailbox *inbox,
  2294. struct mlx4_cmd_mailbox *outbox,
  2295. struct mlx4_cmd_info *cmd)
  2296. {
  2297. int err;
  2298. int qpn = vhcr->in_modifier & 0x7fffff;
  2299. struct res_mtt *mtt;
  2300. struct res_qp *qp;
  2301. struct mlx4_qp_context *qpc = inbox->buf + 8;
  2302. int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
  2303. int mtt_size = qp_get_mtt_size(qpc);
  2304. struct res_cq *rcq;
  2305. struct res_cq *scq;
  2306. int rcqn = qp_get_rcqn(qpc);
  2307. int scqn = qp_get_scqn(qpc);
  2308. u32 srqn = qp_get_srqn(qpc) & 0xffffff;
  2309. int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
  2310. struct res_srq *srq;
  2311. int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
  2312. err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
  2313. if (err)
  2314. return err;
  2315. qp->local_qpn = local_qpn;
  2316. qp->sched_queue = 0;
  2317. qp->param3 = 0;
  2318. qp->vlan_control = 0;
  2319. qp->fvl_rx = 0;
  2320. qp->pri_path_fl = 0;
  2321. qp->vlan_index = 0;
  2322. qp->feup = 0;
  2323. qp->qpc_flags = be32_to_cpu(qpc->flags);
  2324. err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
  2325. if (err)
  2326. goto ex_abort;
  2327. err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
  2328. if (err)
  2329. goto ex_put_mtt;
  2330. err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
  2331. if (err)
  2332. goto ex_put_mtt;
  2333. if (scqn != rcqn) {
  2334. err = get_res(dev, slave, scqn, RES_CQ, &scq);
  2335. if (err)
  2336. goto ex_put_rcq;
  2337. } else
  2338. scq = rcq;
  2339. if (use_srq) {
  2340. err = get_res(dev, slave, srqn, RES_SRQ, &srq);
  2341. if (err)
  2342. goto ex_put_scq;
  2343. }
  2344. adjust_proxy_tun_qkey(dev, vhcr, qpc);
  2345. update_pkey_index(dev, slave, inbox);
  2346. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2347. if (err)
  2348. goto ex_put_srq;
  2349. atomic_inc(&mtt->ref_count);
  2350. qp->mtt = mtt;
  2351. atomic_inc(&rcq->ref_count);
  2352. qp->rcq = rcq;
  2353. atomic_inc(&scq->ref_count);
  2354. qp->scq = scq;
  2355. if (scqn != rcqn)
  2356. put_res(dev, slave, scqn, RES_CQ);
  2357. if (use_srq) {
  2358. atomic_inc(&srq->ref_count);
  2359. put_res(dev, slave, srqn, RES_SRQ);
  2360. qp->srq = srq;
  2361. }
  2362. put_res(dev, slave, rcqn, RES_CQ);
  2363. put_res(dev, slave, mtt_base, RES_MTT);
  2364. res_end_move(dev, slave, RES_QP, qpn);
  2365. return 0;
  2366. ex_put_srq:
  2367. if (use_srq)
  2368. put_res(dev, slave, srqn, RES_SRQ);
  2369. ex_put_scq:
  2370. if (scqn != rcqn)
  2371. put_res(dev, slave, scqn, RES_CQ);
  2372. ex_put_rcq:
  2373. put_res(dev, slave, rcqn, RES_CQ);
  2374. ex_put_mtt:
  2375. put_res(dev, slave, mtt_base, RES_MTT);
  2376. ex_abort:
  2377. res_abort_move(dev, slave, RES_QP, qpn);
  2378. return err;
  2379. }
  2380. static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
  2381. {
  2382. return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
  2383. }
  2384. static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
  2385. {
  2386. int log_eq_size = eqc->log_eq_size & 0x1f;
  2387. int page_shift = (eqc->log_page_size & 0x3f) + 12;
  2388. if (log_eq_size + 5 < page_shift)
  2389. return 1;
  2390. return 1 << (log_eq_size + 5 - page_shift);
  2391. }
  2392. static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
  2393. {
  2394. return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
  2395. }
  2396. static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
  2397. {
  2398. int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
  2399. int page_shift = (cqc->log_page_size & 0x3f) + 12;
  2400. if (log_cq_size + 5 < page_shift)
  2401. return 1;
  2402. return 1 << (log_cq_size + 5 - page_shift);
  2403. }
  2404. int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
  2405. struct mlx4_vhcr *vhcr,
  2406. struct mlx4_cmd_mailbox *inbox,
  2407. struct mlx4_cmd_mailbox *outbox,
  2408. struct mlx4_cmd_info *cmd)
  2409. {
  2410. int err;
  2411. int eqn = vhcr->in_modifier;
  2412. int res_id = (slave << 8) | eqn;
  2413. struct mlx4_eq_context *eqc = inbox->buf;
  2414. int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
  2415. int mtt_size = eq_get_mtt_size(eqc);
  2416. struct res_eq *eq;
  2417. struct res_mtt *mtt;
  2418. err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
  2419. if (err)
  2420. return err;
  2421. err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
  2422. if (err)
  2423. goto out_add;
  2424. err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
  2425. if (err)
  2426. goto out_move;
  2427. err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
  2428. if (err)
  2429. goto out_put;
  2430. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2431. if (err)
  2432. goto out_put;
  2433. atomic_inc(&mtt->ref_count);
  2434. eq->mtt = mtt;
  2435. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2436. res_end_move(dev, slave, RES_EQ, res_id);
  2437. return 0;
  2438. out_put:
  2439. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2440. out_move:
  2441. res_abort_move(dev, slave, RES_EQ, res_id);
  2442. out_add:
  2443. rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
  2444. return err;
  2445. }
  2446. int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
  2447. struct mlx4_vhcr *vhcr,
  2448. struct mlx4_cmd_mailbox *inbox,
  2449. struct mlx4_cmd_mailbox *outbox,
  2450. struct mlx4_cmd_info *cmd)
  2451. {
  2452. int err;
  2453. u8 get = vhcr->op_modifier;
  2454. if (get != 1)
  2455. return -EPERM;
  2456. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2457. return err;
  2458. }
  2459. static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
  2460. int len, struct res_mtt **res)
  2461. {
  2462. struct mlx4_priv *priv = mlx4_priv(dev);
  2463. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  2464. struct res_mtt *mtt;
  2465. int err = -EINVAL;
  2466. spin_lock_irq(mlx4_tlock(dev));
  2467. list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
  2468. com.list) {
  2469. if (!check_mtt_range(dev, slave, start, len, mtt)) {
  2470. *res = mtt;
  2471. mtt->com.from_state = mtt->com.state;
  2472. mtt->com.state = RES_MTT_BUSY;
  2473. err = 0;
  2474. break;
  2475. }
  2476. }
  2477. spin_unlock_irq(mlx4_tlock(dev));
  2478. return err;
  2479. }
  2480. static int verify_qp_parameters(struct mlx4_dev *dev,
  2481. struct mlx4_vhcr *vhcr,
  2482. struct mlx4_cmd_mailbox *inbox,
  2483. enum qp_transition transition, u8 slave)
  2484. {
  2485. u32 qp_type;
  2486. u32 qpn;
  2487. struct mlx4_qp_context *qp_ctx;
  2488. enum mlx4_qp_optpar optpar;
  2489. int port;
  2490. int num_gids;
  2491. qp_ctx = inbox->buf + 8;
  2492. qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
  2493. optpar = be32_to_cpu(*(__be32 *) inbox->buf);
  2494. switch (qp_type) {
  2495. case MLX4_QP_ST_RC:
  2496. case MLX4_QP_ST_XRC:
  2497. case MLX4_QP_ST_UC:
  2498. switch (transition) {
  2499. case QP_TRANS_INIT2RTR:
  2500. case QP_TRANS_RTR2RTS:
  2501. case QP_TRANS_RTS2RTS:
  2502. case QP_TRANS_SQD2SQD:
  2503. case QP_TRANS_SQD2RTS:
  2504. if (slave != mlx4_master_func_num(dev))
  2505. if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
  2506. port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
  2507. if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
  2508. num_gids = mlx4_get_slave_num_gids(dev, slave, port);
  2509. else
  2510. num_gids = 1;
  2511. if (qp_ctx->pri_path.mgid_index >= num_gids)
  2512. return -EINVAL;
  2513. }
  2514. if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
  2515. port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
  2516. if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
  2517. num_gids = mlx4_get_slave_num_gids(dev, slave, port);
  2518. else
  2519. num_gids = 1;
  2520. if (qp_ctx->alt_path.mgid_index >= num_gids)
  2521. return -EINVAL;
  2522. }
  2523. break;
  2524. default:
  2525. break;
  2526. }
  2527. break;
  2528. case MLX4_QP_ST_MLX:
  2529. qpn = vhcr->in_modifier & 0x7fffff;
  2530. port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
  2531. if (transition == QP_TRANS_INIT2RTR &&
  2532. slave != mlx4_master_func_num(dev) &&
  2533. mlx4_is_qp_reserved(dev, qpn) &&
  2534. !mlx4_vf_smi_enabled(dev, slave, port)) {
  2535. /* only enabled VFs may create MLX proxy QPs */
  2536. mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
  2537. __func__, slave, port);
  2538. return -EPERM;
  2539. }
  2540. break;
  2541. default:
  2542. break;
  2543. }
  2544. return 0;
  2545. }
  2546. int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
  2547. struct mlx4_vhcr *vhcr,
  2548. struct mlx4_cmd_mailbox *inbox,
  2549. struct mlx4_cmd_mailbox *outbox,
  2550. struct mlx4_cmd_info *cmd)
  2551. {
  2552. struct mlx4_mtt mtt;
  2553. __be64 *page_list = inbox->buf;
  2554. u64 *pg_list = (u64 *)page_list;
  2555. int i;
  2556. struct res_mtt *rmtt = NULL;
  2557. int start = be64_to_cpu(page_list[0]);
  2558. int npages = vhcr->in_modifier;
  2559. int err;
  2560. err = get_containing_mtt(dev, slave, start, npages, &rmtt);
  2561. if (err)
  2562. return err;
  2563. /* Call the SW implementation of write_mtt:
  2564. * - Prepare a dummy mtt struct
  2565. * - Translate inbox contents to simple addresses in host endianess */
  2566. mtt.offset = 0; /* TBD this is broken but I don't handle it since
  2567. we don't really use it */
  2568. mtt.order = 0;
  2569. mtt.page_shift = 0;
  2570. for (i = 0; i < npages; ++i)
  2571. pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
  2572. err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
  2573. ((u64 *)page_list + 2));
  2574. if (rmtt)
  2575. put_res(dev, slave, rmtt->com.res_id, RES_MTT);
  2576. return err;
  2577. }
  2578. int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
  2579. struct mlx4_vhcr *vhcr,
  2580. struct mlx4_cmd_mailbox *inbox,
  2581. struct mlx4_cmd_mailbox *outbox,
  2582. struct mlx4_cmd_info *cmd)
  2583. {
  2584. int eqn = vhcr->in_modifier;
  2585. int res_id = eqn | (slave << 8);
  2586. struct res_eq *eq;
  2587. int err;
  2588. err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
  2589. if (err)
  2590. return err;
  2591. err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
  2592. if (err)
  2593. goto ex_abort;
  2594. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2595. if (err)
  2596. goto ex_put;
  2597. atomic_dec(&eq->mtt->ref_count);
  2598. put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
  2599. res_end_move(dev, slave, RES_EQ, res_id);
  2600. rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
  2601. return 0;
  2602. ex_put:
  2603. put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
  2604. ex_abort:
  2605. res_abort_move(dev, slave, RES_EQ, res_id);
  2606. return err;
  2607. }
  2608. int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
  2609. {
  2610. struct mlx4_priv *priv = mlx4_priv(dev);
  2611. struct mlx4_slave_event_eq_info *event_eq;
  2612. struct mlx4_cmd_mailbox *mailbox;
  2613. u32 in_modifier = 0;
  2614. int err;
  2615. int res_id;
  2616. struct res_eq *req;
  2617. if (!priv->mfunc.master.slave_state)
  2618. return -EINVAL;
  2619. event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
  2620. /* Create the event only if the slave is registered */
  2621. if (event_eq->eqn < 0)
  2622. return 0;
  2623. mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
  2624. res_id = (slave << 8) | event_eq->eqn;
  2625. err = get_res(dev, slave, res_id, RES_EQ, &req);
  2626. if (err)
  2627. goto unlock;
  2628. if (req->com.from_state != RES_EQ_HW) {
  2629. err = -EINVAL;
  2630. goto put;
  2631. }
  2632. mailbox = mlx4_alloc_cmd_mailbox(dev);
  2633. if (IS_ERR(mailbox)) {
  2634. err = PTR_ERR(mailbox);
  2635. goto put;
  2636. }
  2637. if (eqe->type == MLX4_EVENT_TYPE_CMD) {
  2638. ++event_eq->token;
  2639. eqe->event.cmd.token = cpu_to_be16(event_eq->token);
  2640. }
  2641. memcpy(mailbox->buf, (u8 *) eqe, 28);
  2642. in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
  2643. err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
  2644. MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
  2645. MLX4_CMD_NATIVE);
  2646. put_res(dev, slave, res_id, RES_EQ);
  2647. mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
  2648. mlx4_free_cmd_mailbox(dev, mailbox);
  2649. return err;
  2650. put:
  2651. put_res(dev, slave, res_id, RES_EQ);
  2652. unlock:
  2653. mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
  2654. return err;
  2655. }
  2656. int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
  2657. struct mlx4_vhcr *vhcr,
  2658. struct mlx4_cmd_mailbox *inbox,
  2659. struct mlx4_cmd_mailbox *outbox,
  2660. struct mlx4_cmd_info *cmd)
  2661. {
  2662. int eqn = vhcr->in_modifier;
  2663. int res_id = eqn | (slave << 8);
  2664. struct res_eq *eq;
  2665. int err;
  2666. err = get_res(dev, slave, res_id, RES_EQ, &eq);
  2667. if (err)
  2668. return err;
  2669. if (eq->com.from_state != RES_EQ_HW) {
  2670. err = -EINVAL;
  2671. goto ex_put;
  2672. }
  2673. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2674. ex_put:
  2675. put_res(dev, slave, res_id, RES_EQ);
  2676. return err;
  2677. }
  2678. int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
  2679. struct mlx4_vhcr *vhcr,
  2680. struct mlx4_cmd_mailbox *inbox,
  2681. struct mlx4_cmd_mailbox *outbox,
  2682. struct mlx4_cmd_info *cmd)
  2683. {
  2684. int err;
  2685. int cqn = vhcr->in_modifier;
  2686. struct mlx4_cq_context *cqc = inbox->buf;
  2687. int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
  2688. struct res_cq *cq;
  2689. struct res_mtt *mtt;
  2690. err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
  2691. if (err)
  2692. return err;
  2693. err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
  2694. if (err)
  2695. goto out_move;
  2696. err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
  2697. if (err)
  2698. goto out_put;
  2699. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2700. if (err)
  2701. goto out_put;
  2702. atomic_inc(&mtt->ref_count);
  2703. cq->mtt = mtt;
  2704. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2705. res_end_move(dev, slave, RES_CQ, cqn);
  2706. return 0;
  2707. out_put:
  2708. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2709. out_move:
  2710. res_abort_move(dev, slave, RES_CQ, cqn);
  2711. return err;
  2712. }
  2713. int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
  2714. struct mlx4_vhcr *vhcr,
  2715. struct mlx4_cmd_mailbox *inbox,
  2716. struct mlx4_cmd_mailbox *outbox,
  2717. struct mlx4_cmd_info *cmd)
  2718. {
  2719. int err;
  2720. int cqn = vhcr->in_modifier;
  2721. struct res_cq *cq;
  2722. err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
  2723. if (err)
  2724. return err;
  2725. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2726. if (err)
  2727. goto out_move;
  2728. atomic_dec(&cq->mtt->ref_count);
  2729. res_end_move(dev, slave, RES_CQ, cqn);
  2730. return 0;
  2731. out_move:
  2732. res_abort_move(dev, slave, RES_CQ, cqn);
  2733. return err;
  2734. }
  2735. int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
  2736. struct mlx4_vhcr *vhcr,
  2737. struct mlx4_cmd_mailbox *inbox,
  2738. struct mlx4_cmd_mailbox *outbox,
  2739. struct mlx4_cmd_info *cmd)
  2740. {
  2741. int cqn = vhcr->in_modifier;
  2742. struct res_cq *cq;
  2743. int err;
  2744. err = get_res(dev, slave, cqn, RES_CQ, &cq);
  2745. if (err)
  2746. return err;
  2747. if (cq->com.from_state != RES_CQ_HW)
  2748. goto ex_put;
  2749. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2750. ex_put:
  2751. put_res(dev, slave, cqn, RES_CQ);
  2752. return err;
  2753. }
  2754. static int handle_resize(struct mlx4_dev *dev, int slave,
  2755. struct mlx4_vhcr *vhcr,
  2756. struct mlx4_cmd_mailbox *inbox,
  2757. struct mlx4_cmd_mailbox *outbox,
  2758. struct mlx4_cmd_info *cmd,
  2759. struct res_cq *cq)
  2760. {
  2761. int err;
  2762. struct res_mtt *orig_mtt;
  2763. struct res_mtt *mtt;
  2764. struct mlx4_cq_context *cqc = inbox->buf;
  2765. int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
  2766. err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
  2767. if (err)
  2768. return err;
  2769. if (orig_mtt != cq->mtt) {
  2770. err = -EINVAL;
  2771. goto ex_put;
  2772. }
  2773. err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
  2774. if (err)
  2775. goto ex_put;
  2776. err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
  2777. if (err)
  2778. goto ex_put1;
  2779. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2780. if (err)
  2781. goto ex_put1;
  2782. atomic_dec(&orig_mtt->ref_count);
  2783. put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
  2784. atomic_inc(&mtt->ref_count);
  2785. cq->mtt = mtt;
  2786. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2787. return 0;
  2788. ex_put1:
  2789. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2790. ex_put:
  2791. put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
  2792. return err;
  2793. }
  2794. int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
  2795. struct mlx4_vhcr *vhcr,
  2796. struct mlx4_cmd_mailbox *inbox,
  2797. struct mlx4_cmd_mailbox *outbox,
  2798. struct mlx4_cmd_info *cmd)
  2799. {
  2800. int cqn = vhcr->in_modifier;
  2801. struct res_cq *cq;
  2802. int err;
  2803. err = get_res(dev, slave, cqn, RES_CQ, &cq);
  2804. if (err)
  2805. return err;
  2806. if (cq->com.from_state != RES_CQ_HW)
  2807. goto ex_put;
  2808. if (vhcr->op_modifier == 0) {
  2809. err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
  2810. goto ex_put;
  2811. }
  2812. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2813. ex_put:
  2814. put_res(dev, slave, cqn, RES_CQ);
  2815. return err;
  2816. }
  2817. static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
  2818. {
  2819. int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
  2820. int log_rq_stride = srqc->logstride & 7;
  2821. int page_shift = (srqc->log_page_size & 0x3f) + 12;
  2822. if (log_srq_size + log_rq_stride + 4 < page_shift)
  2823. return 1;
  2824. return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
  2825. }
  2826. int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
  2827. struct mlx4_vhcr *vhcr,
  2828. struct mlx4_cmd_mailbox *inbox,
  2829. struct mlx4_cmd_mailbox *outbox,
  2830. struct mlx4_cmd_info *cmd)
  2831. {
  2832. int err;
  2833. int srqn = vhcr->in_modifier;
  2834. struct res_mtt *mtt;
  2835. struct res_srq *srq;
  2836. struct mlx4_srq_context *srqc = inbox->buf;
  2837. int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
  2838. if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
  2839. return -EINVAL;
  2840. err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
  2841. if (err)
  2842. return err;
  2843. err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
  2844. if (err)
  2845. goto ex_abort;
  2846. err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
  2847. mtt);
  2848. if (err)
  2849. goto ex_put_mtt;
  2850. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2851. if (err)
  2852. goto ex_put_mtt;
  2853. atomic_inc(&mtt->ref_count);
  2854. srq->mtt = mtt;
  2855. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2856. res_end_move(dev, slave, RES_SRQ, srqn);
  2857. return 0;
  2858. ex_put_mtt:
  2859. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2860. ex_abort:
  2861. res_abort_move(dev, slave, RES_SRQ, srqn);
  2862. return err;
  2863. }
  2864. int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
  2865. struct mlx4_vhcr *vhcr,
  2866. struct mlx4_cmd_mailbox *inbox,
  2867. struct mlx4_cmd_mailbox *outbox,
  2868. struct mlx4_cmd_info *cmd)
  2869. {
  2870. int err;
  2871. int srqn = vhcr->in_modifier;
  2872. struct res_srq *srq;
  2873. err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
  2874. if (err)
  2875. return err;
  2876. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2877. if (err)
  2878. goto ex_abort;
  2879. atomic_dec(&srq->mtt->ref_count);
  2880. if (srq->cq)
  2881. atomic_dec(&srq->cq->ref_count);
  2882. res_end_move(dev, slave, RES_SRQ, srqn);
  2883. return 0;
  2884. ex_abort:
  2885. res_abort_move(dev, slave, RES_SRQ, srqn);
  2886. return err;
  2887. }
  2888. int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
  2889. struct mlx4_vhcr *vhcr,
  2890. struct mlx4_cmd_mailbox *inbox,
  2891. struct mlx4_cmd_mailbox *outbox,
  2892. struct mlx4_cmd_info *cmd)
  2893. {
  2894. int err;
  2895. int srqn = vhcr->in_modifier;
  2896. struct res_srq *srq;
  2897. err = get_res(dev, slave, srqn, RES_SRQ, &srq);
  2898. if (err)
  2899. return err;
  2900. if (srq->com.from_state != RES_SRQ_HW) {
  2901. err = -EBUSY;
  2902. goto out;
  2903. }
  2904. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2905. out:
  2906. put_res(dev, slave, srqn, RES_SRQ);
  2907. return err;
  2908. }
  2909. int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
  2910. struct mlx4_vhcr *vhcr,
  2911. struct mlx4_cmd_mailbox *inbox,
  2912. struct mlx4_cmd_mailbox *outbox,
  2913. struct mlx4_cmd_info *cmd)
  2914. {
  2915. int err;
  2916. int srqn = vhcr->in_modifier;
  2917. struct res_srq *srq;
  2918. err = get_res(dev, slave, srqn, RES_SRQ, &srq);
  2919. if (err)
  2920. return err;
  2921. if (srq->com.from_state != RES_SRQ_HW) {
  2922. err = -EBUSY;
  2923. goto out;
  2924. }
  2925. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2926. out:
  2927. put_res(dev, slave, srqn, RES_SRQ);
  2928. return err;
  2929. }
  2930. int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
  2931. struct mlx4_vhcr *vhcr,
  2932. struct mlx4_cmd_mailbox *inbox,
  2933. struct mlx4_cmd_mailbox *outbox,
  2934. struct mlx4_cmd_info *cmd)
  2935. {
  2936. int err;
  2937. int qpn = vhcr->in_modifier & 0x7fffff;
  2938. struct res_qp *qp;
  2939. err = get_res(dev, slave, qpn, RES_QP, &qp);
  2940. if (err)
  2941. return err;
  2942. if (qp->com.from_state != RES_QP_HW) {
  2943. err = -EBUSY;
  2944. goto out;
  2945. }
  2946. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2947. out:
  2948. put_res(dev, slave, qpn, RES_QP);
  2949. return err;
  2950. }
  2951. int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
  2952. struct mlx4_vhcr *vhcr,
  2953. struct mlx4_cmd_mailbox *inbox,
  2954. struct mlx4_cmd_mailbox *outbox,
  2955. struct mlx4_cmd_info *cmd)
  2956. {
  2957. struct mlx4_qp_context *context = inbox->buf + 8;
  2958. adjust_proxy_tun_qkey(dev, vhcr, context);
  2959. update_pkey_index(dev, slave, inbox);
  2960. return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2961. }
  2962. static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
  2963. struct mlx4_qp_context *qpc,
  2964. struct mlx4_cmd_mailbox *inbox)
  2965. {
  2966. enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
  2967. u8 pri_sched_queue;
  2968. int port = mlx4_slave_convert_port(
  2969. dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
  2970. if (port < 0)
  2971. return -EINVAL;
  2972. pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
  2973. ((port & 1) << 6);
  2974. if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
  2975. mlx4_is_eth(dev, port + 1)) {
  2976. qpc->pri_path.sched_queue = pri_sched_queue;
  2977. }
  2978. if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
  2979. port = mlx4_slave_convert_port(
  2980. dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
  2981. + 1) - 1;
  2982. if (port < 0)
  2983. return -EINVAL;
  2984. qpc->alt_path.sched_queue =
  2985. (qpc->alt_path.sched_queue & ~(1 << 6)) |
  2986. (port & 1) << 6;
  2987. }
  2988. return 0;
  2989. }
  2990. static int roce_verify_mac(struct mlx4_dev *dev, int slave,
  2991. struct mlx4_qp_context *qpc,
  2992. struct mlx4_cmd_mailbox *inbox)
  2993. {
  2994. u64 mac;
  2995. int port;
  2996. u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
  2997. u8 sched = *(u8 *)(inbox->buf + 64);
  2998. u8 smac_ix;
  2999. port = (sched >> 6 & 1) + 1;
  3000. if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
  3001. smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
  3002. if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
  3003. return -ENOENT;
  3004. }
  3005. return 0;
  3006. }
  3007. int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
  3008. struct mlx4_vhcr *vhcr,
  3009. struct mlx4_cmd_mailbox *inbox,
  3010. struct mlx4_cmd_mailbox *outbox,
  3011. struct mlx4_cmd_info *cmd)
  3012. {
  3013. int err;
  3014. struct mlx4_qp_context *qpc = inbox->buf + 8;
  3015. int qpn = vhcr->in_modifier & 0x7fffff;
  3016. struct res_qp *qp;
  3017. u8 orig_sched_queue;
  3018. __be32 orig_param3 = qpc->param3;
  3019. u8 orig_vlan_control = qpc->pri_path.vlan_control;
  3020. u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
  3021. u8 orig_pri_path_fl = qpc->pri_path.fl;
  3022. u8 orig_vlan_index = qpc->pri_path.vlan_index;
  3023. u8 orig_feup = qpc->pri_path.feup;
  3024. err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
  3025. if (err)
  3026. return err;
  3027. err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
  3028. if (err)
  3029. return err;
  3030. if (roce_verify_mac(dev, slave, qpc, inbox))
  3031. return -EINVAL;
  3032. update_pkey_index(dev, slave, inbox);
  3033. update_gid(dev, inbox, (u8)slave);
  3034. adjust_proxy_tun_qkey(dev, vhcr, qpc);
  3035. orig_sched_queue = qpc->pri_path.sched_queue;
  3036. err = update_vport_qp_param(dev, inbox, slave, qpn);
  3037. if (err)
  3038. return err;
  3039. err = get_res(dev, slave, qpn, RES_QP, &qp);
  3040. if (err)
  3041. return err;
  3042. if (qp->com.from_state != RES_QP_HW) {
  3043. err = -EBUSY;
  3044. goto out;
  3045. }
  3046. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  3047. out:
  3048. /* if no error, save sched queue value passed in by VF. This is
  3049. * essentially the QOS value provided by the VF. This will be useful
  3050. * if we allow dynamic changes from VST back to VGT
  3051. */
  3052. if (!err) {
  3053. qp->sched_queue = orig_sched_queue;
  3054. qp->param3 = orig_param3;
  3055. qp->vlan_control = orig_vlan_control;
  3056. qp->fvl_rx = orig_fvl_rx;
  3057. qp->pri_path_fl = orig_pri_path_fl;
  3058. qp->vlan_index = orig_vlan_index;
  3059. qp->feup = orig_feup;
  3060. }
  3061. put_res(dev, slave, qpn, RES_QP);
  3062. return err;
  3063. }
  3064. int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
  3065. struct mlx4_vhcr *vhcr,
  3066. struct mlx4_cmd_mailbox *inbox,
  3067. struct mlx4_cmd_mailbox *outbox,
  3068. struct mlx4_cmd_info *cmd)
  3069. {
  3070. int err;
  3071. struct mlx4_qp_context *context = inbox->buf + 8;
  3072. err = adjust_qp_sched_queue(dev, slave, context, inbox);
  3073. if (err)
  3074. return err;
  3075. err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
  3076. if (err)
  3077. return err;
  3078. update_pkey_index(dev, slave, inbox);
  3079. update_gid(dev, inbox, (u8)slave);
  3080. adjust_proxy_tun_qkey(dev, vhcr, context);
  3081. return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  3082. }
  3083. int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
  3084. struct mlx4_vhcr *vhcr,
  3085. struct mlx4_cmd_mailbox *inbox,
  3086. struct mlx4_cmd_mailbox *outbox,
  3087. struct mlx4_cmd_info *cmd)
  3088. {
  3089. int err;
  3090. struct mlx4_qp_context *context = inbox->buf + 8;
  3091. err = adjust_qp_sched_queue(dev, slave, context, inbox);
  3092. if (err)
  3093. return err;
  3094. err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
  3095. if (err)
  3096. return err;
  3097. update_pkey_index(dev, slave, inbox);
  3098. update_gid(dev, inbox, (u8)slave);
  3099. adjust_proxy_tun_qkey(dev, vhcr, context);
  3100. return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  3101. }
  3102. int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
  3103. struct mlx4_vhcr *vhcr,
  3104. struct mlx4_cmd_mailbox *inbox,
  3105. struct mlx4_cmd_mailbox *outbox,
  3106. struct mlx4_cmd_info *cmd)
  3107. {
  3108. struct mlx4_qp_context *context = inbox->buf + 8;
  3109. int err = adjust_qp_sched_queue(dev, slave, context, inbox);
  3110. if (err)
  3111. return err;
  3112. adjust_proxy_tun_qkey(dev, vhcr, context);
  3113. return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  3114. }
  3115. int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
  3116. struct mlx4_vhcr *vhcr,
  3117. struct mlx4_cmd_mailbox *inbox,
  3118. struct mlx4_cmd_mailbox *outbox,
  3119. struct mlx4_cmd_info *cmd)
  3120. {
  3121. int err;
  3122. struct mlx4_qp_context *context = inbox->buf + 8;
  3123. err = adjust_qp_sched_queue(dev, slave, context, inbox);
  3124. if (err)
  3125. return err;
  3126. err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
  3127. if (err)
  3128. return err;
  3129. adjust_proxy_tun_qkey(dev, vhcr, context);
  3130. update_gid(dev, inbox, (u8)slave);
  3131. update_pkey_index(dev, slave, inbox);
  3132. return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  3133. }
  3134. int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
  3135. struct mlx4_vhcr *vhcr,
  3136. struct mlx4_cmd_mailbox *inbox,
  3137. struct mlx4_cmd_mailbox *outbox,
  3138. struct mlx4_cmd_info *cmd)
  3139. {
  3140. int err;
  3141. struct mlx4_qp_context *context = inbox->buf + 8;
  3142. err = adjust_qp_sched_queue(dev, slave, context, inbox);
  3143. if (err)
  3144. return err;
  3145. err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
  3146. if (err)
  3147. return err;
  3148. adjust_proxy_tun_qkey(dev, vhcr, context);
  3149. update_gid(dev, inbox, (u8)slave);
  3150. update_pkey_index(dev, slave, inbox);
  3151. return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  3152. }
  3153. int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
  3154. struct mlx4_vhcr *vhcr,
  3155. struct mlx4_cmd_mailbox *inbox,
  3156. struct mlx4_cmd_mailbox *outbox,
  3157. struct mlx4_cmd_info *cmd)
  3158. {
  3159. int err;
  3160. int qpn = vhcr->in_modifier & 0x7fffff;
  3161. struct res_qp *qp;
  3162. err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
  3163. if (err)
  3164. return err;
  3165. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  3166. if (err)
  3167. goto ex_abort;
  3168. atomic_dec(&qp->mtt->ref_count);
  3169. atomic_dec(&qp->rcq->ref_count);
  3170. atomic_dec(&qp->scq->ref_count);
  3171. if (qp->srq)
  3172. atomic_dec(&qp->srq->ref_count);
  3173. res_end_move(dev, slave, RES_QP, qpn);
  3174. return 0;
  3175. ex_abort:
  3176. res_abort_move(dev, slave, RES_QP, qpn);
  3177. return err;
  3178. }
  3179. static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
  3180. struct res_qp *rqp, u8 *gid)
  3181. {
  3182. struct res_gid *res;
  3183. list_for_each_entry(res, &rqp->mcg_list, list) {
  3184. if (!memcmp(res->gid, gid, 16))
  3185. return res;
  3186. }
  3187. return NULL;
  3188. }
  3189. static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
  3190. u8 *gid, enum mlx4_protocol prot,
  3191. enum mlx4_steer_type steer, u64 reg_id)
  3192. {
  3193. struct res_gid *res;
  3194. int err;
  3195. res = kzalloc(sizeof *res, GFP_KERNEL);
  3196. if (!res)
  3197. return -ENOMEM;
  3198. spin_lock_irq(&rqp->mcg_spl);
  3199. if (find_gid(dev, slave, rqp, gid)) {
  3200. kfree(res);
  3201. err = -EEXIST;
  3202. } else {
  3203. memcpy(res->gid, gid, 16);
  3204. res->prot = prot;
  3205. res->steer = steer;
  3206. res->reg_id = reg_id;
  3207. list_add_tail(&res->list, &rqp->mcg_list);
  3208. err = 0;
  3209. }
  3210. spin_unlock_irq(&rqp->mcg_spl);
  3211. return err;
  3212. }
  3213. static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
  3214. u8 *gid, enum mlx4_protocol prot,
  3215. enum mlx4_steer_type steer, u64 *reg_id)
  3216. {
  3217. struct res_gid *res;
  3218. int err;
  3219. spin_lock_irq(&rqp->mcg_spl);
  3220. res = find_gid(dev, slave, rqp, gid);
  3221. if (!res || res->prot != prot || res->steer != steer)
  3222. err = -EINVAL;
  3223. else {
  3224. *reg_id = res->reg_id;
  3225. list_del(&res->list);
  3226. kfree(res);
  3227. err = 0;
  3228. }
  3229. spin_unlock_irq(&rqp->mcg_spl);
  3230. return err;
  3231. }
  3232. static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
  3233. u8 gid[16], int block_loopback, enum mlx4_protocol prot,
  3234. enum mlx4_steer_type type, u64 *reg_id)
  3235. {
  3236. switch (dev->caps.steering_mode) {
  3237. case MLX4_STEERING_MODE_DEVICE_MANAGED: {
  3238. int port = mlx4_slave_convert_port(dev, slave, gid[5]);
  3239. if (port < 0)
  3240. return port;
  3241. return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
  3242. block_loopback, prot,
  3243. reg_id);
  3244. }
  3245. case MLX4_STEERING_MODE_B0:
  3246. if (prot == MLX4_PROT_ETH) {
  3247. int port = mlx4_slave_convert_port(dev, slave, gid[5]);
  3248. if (port < 0)
  3249. return port;
  3250. gid[5] = port;
  3251. }
  3252. return mlx4_qp_attach_common(dev, qp, gid,
  3253. block_loopback, prot, type);
  3254. default:
  3255. return -EINVAL;
  3256. }
  3257. }
  3258. static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
  3259. u8 gid[16], enum mlx4_protocol prot,
  3260. enum mlx4_steer_type type, u64 reg_id)
  3261. {
  3262. switch (dev->caps.steering_mode) {
  3263. case MLX4_STEERING_MODE_DEVICE_MANAGED:
  3264. return mlx4_flow_detach(dev, reg_id);
  3265. case MLX4_STEERING_MODE_B0:
  3266. return mlx4_qp_detach_common(dev, qp, gid, prot, type);
  3267. default:
  3268. return -EINVAL;
  3269. }
  3270. }
  3271. static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
  3272. u8 *gid, enum mlx4_protocol prot)
  3273. {
  3274. int real_port;
  3275. if (prot != MLX4_PROT_ETH)
  3276. return 0;
  3277. if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
  3278. dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
  3279. real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
  3280. if (real_port < 0)
  3281. return -EINVAL;
  3282. gid[5] = real_port;
  3283. }
  3284. return 0;
  3285. }
  3286. int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
  3287. struct mlx4_vhcr *vhcr,
  3288. struct mlx4_cmd_mailbox *inbox,
  3289. struct mlx4_cmd_mailbox *outbox,
  3290. struct mlx4_cmd_info *cmd)
  3291. {
  3292. struct mlx4_qp qp; /* dummy for calling attach/detach */
  3293. u8 *gid = inbox->buf;
  3294. enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
  3295. int err;
  3296. int qpn;
  3297. struct res_qp *rqp;
  3298. u64 reg_id = 0;
  3299. int attach = vhcr->op_modifier;
  3300. int block_loopback = vhcr->in_modifier >> 31;
  3301. u8 steer_type_mask = 2;
  3302. enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
  3303. qpn = vhcr->in_modifier & 0xffffff;
  3304. err = get_res(dev, slave, qpn, RES_QP, &rqp);
  3305. if (err)
  3306. return err;
  3307. qp.qpn = qpn;
  3308. if (attach) {
  3309. err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
  3310. type, &reg_id);
  3311. if (err) {
  3312. pr_err("Fail to attach rule to qp 0x%x\n", qpn);
  3313. goto ex_put;
  3314. }
  3315. err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
  3316. if (err)
  3317. goto ex_detach;
  3318. } else {
  3319. err = mlx4_adjust_port(dev, slave, gid, prot);
  3320. if (err)
  3321. goto ex_put;
  3322. err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
  3323. if (err)
  3324. goto ex_put;
  3325. err = qp_detach(dev, &qp, gid, prot, type, reg_id);
  3326. if (err)
  3327. pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
  3328. qpn, reg_id);
  3329. }
  3330. put_res(dev, slave, qpn, RES_QP);
  3331. return err;
  3332. ex_detach:
  3333. qp_detach(dev, &qp, gid, prot, type, reg_id);
  3334. ex_put:
  3335. put_res(dev, slave, qpn, RES_QP);
  3336. return err;
  3337. }
  3338. /*
  3339. * MAC validation for Flow Steering rules.
  3340. * VF can attach rules only with a mac address which is assigned to it.
  3341. */
  3342. static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
  3343. struct list_head *rlist)
  3344. {
  3345. struct mac_res *res, *tmp;
  3346. __be64 be_mac;
  3347. /* make sure it isn't multicast or broadcast mac*/
  3348. if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
  3349. !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
  3350. list_for_each_entry_safe(res, tmp, rlist, list) {
  3351. be_mac = cpu_to_be64(res->mac << 16);
  3352. if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
  3353. return 0;
  3354. }
  3355. pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
  3356. eth_header->eth.dst_mac, slave);
  3357. return -EINVAL;
  3358. }
  3359. return 0;
  3360. }
  3361. /*
  3362. * In case of missing eth header, append eth header with a MAC address
  3363. * assigned to the VF.
  3364. */
  3365. static int add_eth_header(struct mlx4_dev *dev, int slave,
  3366. struct mlx4_cmd_mailbox *inbox,
  3367. struct list_head *rlist, int header_id)
  3368. {
  3369. struct mac_res *res, *tmp;
  3370. u8 port;
  3371. struct mlx4_net_trans_rule_hw_ctrl *ctrl;
  3372. struct mlx4_net_trans_rule_hw_eth *eth_header;
  3373. struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
  3374. struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
  3375. __be64 be_mac = 0;
  3376. __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
  3377. ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
  3378. port = ctrl->port;
  3379. eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
  3380. /* Clear a space in the inbox for eth header */
  3381. switch (header_id) {
  3382. case MLX4_NET_TRANS_RULE_ID_IPV4:
  3383. ip_header =
  3384. (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
  3385. memmove(ip_header, eth_header,
  3386. sizeof(*ip_header) + sizeof(*l4_header));
  3387. break;
  3388. case MLX4_NET_TRANS_RULE_ID_TCP:
  3389. case MLX4_NET_TRANS_RULE_ID_UDP:
  3390. l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
  3391. (eth_header + 1);
  3392. memmove(l4_header, eth_header, sizeof(*l4_header));
  3393. break;
  3394. default:
  3395. return -EINVAL;
  3396. }
  3397. list_for_each_entry_safe(res, tmp, rlist, list) {
  3398. if (port == res->port) {
  3399. be_mac = cpu_to_be64(res->mac << 16);
  3400. break;
  3401. }
  3402. }
  3403. if (!be_mac) {
  3404. pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
  3405. port);
  3406. return -EINVAL;
  3407. }
  3408. memset(eth_header, 0, sizeof(*eth_header));
  3409. eth_header->size = sizeof(*eth_header) >> 2;
  3410. eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
  3411. memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
  3412. memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
  3413. return 0;
  3414. }
  3415. #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
  3416. int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
  3417. struct mlx4_vhcr *vhcr,
  3418. struct mlx4_cmd_mailbox *inbox,
  3419. struct mlx4_cmd_mailbox *outbox,
  3420. struct mlx4_cmd_info *cmd_info)
  3421. {
  3422. int err;
  3423. u32 qpn = vhcr->in_modifier & 0xffffff;
  3424. struct res_qp *rqp;
  3425. u64 mac;
  3426. unsigned port;
  3427. u64 pri_addr_path_mask;
  3428. struct mlx4_update_qp_context *cmd;
  3429. int smac_index;
  3430. cmd = (struct mlx4_update_qp_context *)inbox->buf;
  3431. pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
  3432. if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
  3433. (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
  3434. return -EPERM;
  3435. /* Just change the smac for the QP */
  3436. err = get_res(dev, slave, qpn, RES_QP, &rqp);
  3437. if (err) {
  3438. mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
  3439. return err;
  3440. }
  3441. port = (rqp->sched_queue >> 6 & 1) + 1;
  3442. if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
  3443. smac_index = cmd->qp_context.pri_path.grh_mylmc;
  3444. err = mac_find_smac_ix_in_slave(dev, slave, port,
  3445. smac_index, &mac);
  3446. if (err) {
  3447. mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
  3448. qpn, smac_index);
  3449. goto err_mac;
  3450. }
  3451. }
  3452. err = mlx4_cmd(dev, inbox->dma,
  3453. vhcr->in_modifier, 0,
  3454. MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
  3455. MLX4_CMD_NATIVE);
  3456. if (err) {
  3457. mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
  3458. goto err_mac;
  3459. }
  3460. err_mac:
  3461. put_res(dev, slave, qpn, RES_QP);
  3462. return err;
  3463. }
  3464. int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
  3465. struct mlx4_vhcr *vhcr,
  3466. struct mlx4_cmd_mailbox *inbox,
  3467. struct mlx4_cmd_mailbox *outbox,
  3468. struct mlx4_cmd_info *cmd)
  3469. {
  3470. struct mlx4_priv *priv = mlx4_priv(dev);
  3471. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  3472. struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
  3473. int err;
  3474. int qpn;
  3475. struct res_qp *rqp;
  3476. struct mlx4_net_trans_rule_hw_ctrl *ctrl;
  3477. struct _rule_hw *rule_header;
  3478. int header_id;
  3479. if (dev->caps.steering_mode !=
  3480. MLX4_STEERING_MODE_DEVICE_MANAGED)
  3481. return -EOPNOTSUPP;
  3482. ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
  3483. ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
  3484. if (ctrl->port <= 0)
  3485. return -EINVAL;
  3486. qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
  3487. err = get_res(dev, slave, qpn, RES_QP, &rqp);
  3488. if (err) {
  3489. pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
  3490. return err;
  3491. }
  3492. rule_header = (struct _rule_hw *)(ctrl + 1);
  3493. header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
  3494. switch (header_id) {
  3495. case MLX4_NET_TRANS_RULE_ID_ETH:
  3496. if (validate_eth_header_mac(slave, rule_header, rlist)) {
  3497. err = -EINVAL;
  3498. goto err_put;
  3499. }
  3500. break;
  3501. case MLX4_NET_TRANS_RULE_ID_IB:
  3502. break;
  3503. case MLX4_NET_TRANS_RULE_ID_IPV4:
  3504. case MLX4_NET_TRANS_RULE_ID_TCP:
  3505. case MLX4_NET_TRANS_RULE_ID_UDP:
  3506. pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
  3507. if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
  3508. err = -EINVAL;
  3509. goto err_put;
  3510. }
  3511. vhcr->in_modifier +=
  3512. sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
  3513. break;
  3514. default:
  3515. pr_err("Corrupted mailbox\n");
  3516. err = -EINVAL;
  3517. goto err_put;
  3518. }
  3519. err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
  3520. vhcr->in_modifier, 0,
  3521. MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
  3522. MLX4_CMD_NATIVE);
  3523. if (err)
  3524. goto err_put;
  3525. err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
  3526. if (err) {
  3527. mlx4_err(dev, "Fail to add flow steering resources\n");
  3528. /* detach rule*/
  3529. mlx4_cmd(dev, vhcr->out_param, 0, 0,
  3530. MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
  3531. MLX4_CMD_NATIVE);
  3532. goto err_put;
  3533. }
  3534. atomic_inc(&rqp->ref_count);
  3535. err_put:
  3536. put_res(dev, slave, qpn, RES_QP);
  3537. return err;
  3538. }
  3539. int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
  3540. struct mlx4_vhcr *vhcr,
  3541. struct mlx4_cmd_mailbox *inbox,
  3542. struct mlx4_cmd_mailbox *outbox,
  3543. struct mlx4_cmd_info *cmd)
  3544. {
  3545. int err;
  3546. struct res_qp *rqp;
  3547. struct res_fs_rule *rrule;
  3548. if (dev->caps.steering_mode !=
  3549. MLX4_STEERING_MODE_DEVICE_MANAGED)
  3550. return -EOPNOTSUPP;
  3551. err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
  3552. if (err)
  3553. return err;
  3554. /* Release the rule form busy state before removal */
  3555. put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
  3556. err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
  3557. if (err)
  3558. return err;
  3559. err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
  3560. if (err) {
  3561. mlx4_err(dev, "Fail to remove flow steering resources\n");
  3562. goto out;
  3563. }
  3564. err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
  3565. MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
  3566. MLX4_CMD_NATIVE);
  3567. if (!err)
  3568. atomic_dec(&rqp->ref_count);
  3569. out:
  3570. put_res(dev, slave, rrule->qpn, RES_QP);
  3571. return err;
  3572. }
  3573. enum {
  3574. BUSY_MAX_RETRIES = 10
  3575. };
  3576. int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
  3577. struct mlx4_vhcr *vhcr,
  3578. struct mlx4_cmd_mailbox *inbox,
  3579. struct mlx4_cmd_mailbox *outbox,
  3580. struct mlx4_cmd_info *cmd)
  3581. {
  3582. int err;
  3583. int index = vhcr->in_modifier & 0xffff;
  3584. err = get_res(dev, slave, index, RES_COUNTER, NULL);
  3585. if (err)
  3586. return err;
  3587. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  3588. put_res(dev, slave, index, RES_COUNTER);
  3589. return err;
  3590. }
  3591. static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
  3592. {
  3593. struct res_gid *rgid;
  3594. struct res_gid *tmp;
  3595. struct mlx4_qp qp; /* dummy for calling attach/detach */
  3596. list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
  3597. switch (dev->caps.steering_mode) {
  3598. case MLX4_STEERING_MODE_DEVICE_MANAGED:
  3599. mlx4_flow_detach(dev, rgid->reg_id);
  3600. break;
  3601. case MLX4_STEERING_MODE_B0:
  3602. qp.qpn = rqp->local_qpn;
  3603. (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
  3604. rgid->prot, rgid->steer);
  3605. break;
  3606. }
  3607. list_del(&rgid->list);
  3608. kfree(rgid);
  3609. }
  3610. }
  3611. static int _move_all_busy(struct mlx4_dev *dev, int slave,
  3612. enum mlx4_resource type, int print)
  3613. {
  3614. struct mlx4_priv *priv = mlx4_priv(dev);
  3615. struct mlx4_resource_tracker *tracker =
  3616. &priv->mfunc.master.res_tracker;
  3617. struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
  3618. struct res_common *r;
  3619. struct res_common *tmp;
  3620. int busy;
  3621. busy = 0;
  3622. spin_lock_irq(mlx4_tlock(dev));
  3623. list_for_each_entry_safe(r, tmp, rlist, list) {
  3624. if (r->owner == slave) {
  3625. if (!r->removing) {
  3626. if (r->state == RES_ANY_BUSY) {
  3627. if (print)
  3628. mlx4_dbg(dev,
  3629. "%s id 0x%llx is busy\n",
  3630. resource_str(type),
  3631. r->res_id);
  3632. ++busy;
  3633. } else {
  3634. r->from_state = r->state;
  3635. r->state = RES_ANY_BUSY;
  3636. r->removing = 1;
  3637. }
  3638. }
  3639. }
  3640. }
  3641. spin_unlock_irq(mlx4_tlock(dev));
  3642. return busy;
  3643. }
  3644. static int move_all_busy(struct mlx4_dev *dev, int slave,
  3645. enum mlx4_resource type)
  3646. {
  3647. unsigned long begin;
  3648. int busy;
  3649. begin = jiffies;
  3650. do {
  3651. busy = _move_all_busy(dev, slave, type, 0);
  3652. if (time_after(jiffies, begin + 5 * HZ))
  3653. break;
  3654. if (busy)
  3655. cond_resched();
  3656. } while (busy);
  3657. if (busy)
  3658. busy = _move_all_busy(dev, slave, type, 1);
  3659. return busy;
  3660. }
  3661. static void rem_slave_qps(struct mlx4_dev *dev, int slave)
  3662. {
  3663. struct mlx4_priv *priv = mlx4_priv(dev);
  3664. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  3665. struct list_head *qp_list =
  3666. &tracker->slave_list[slave].res_list[RES_QP];
  3667. struct res_qp *qp;
  3668. struct res_qp *tmp;
  3669. int state;
  3670. u64 in_param;
  3671. int qpn;
  3672. int err;
  3673. err = move_all_busy(dev, slave, RES_QP);
  3674. if (err)
  3675. mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
  3676. slave);
  3677. spin_lock_irq(mlx4_tlock(dev));
  3678. list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
  3679. spin_unlock_irq(mlx4_tlock(dev));
  3680. if (qp->com.owner == slave) {
  3681. qpn = qp->com.res_id;
  3682. detach_qp(dev, slave, qp);
  3683. state = qp->com.from_state;
  3684. while (state != 0) {
  3685. switch (state) {
  3686. case RES_QP_RESERVED:
  3687. spin_lock_irq(mlx4_tlock(dev));
  3688. rb_erase(&qp->com.node,
  3689. &tracker->res_tree[RES_QP]);
  3690. list_del(&qp->com.list);
  3691. spin_unlock_irq(mlx4_tlock(dev));
  3692. if (!valid_reserved(dev, slave, qpn)) {
  3693. __mlx4_qp_release_range(dev, qpn, 1);
  3694. mlx4_release_resource(dev, slave,
  3695. RES_QP, 1, 0);
  3696. }
  3697. kfree(qp);
  3698. state = 0;
  3699. break;
  3700. case RES_QP_MAPPED:
  3701. if (!valid_reserved(dev, slave, qpn))
  3702. __mlx4_qp_free_icm(dev, qpn);
  3703. state = RES_QP_RESERVED;
  3704. break;
  3705. case RES_QP_HW:
  3706. in_param = slave;
  3707. err = mlx4_cmd(dev, in_param,
  3708. qp->local_qpn, 2,
  3709. MLX4_CMD_2RST_QP,
  3710. MLX4_CMD_TIME_CLASS_A,
  3711. MLX4_CMD_NATIVE);
  3712. if (err)
  3713. mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
  3714. slave, qp->local_qpn);
  3715. atomic_dec(&qp->rcq->ref_count);
  3716. atomic_dec(&qp->scq->ref_count);
  3717. atomic_dec(&qp->mtt->ref_count);
  3718. if (qp->srq)
  3719. atomic_dec(&qp->srq->ref_count);
  3720. state = RES_QP_MAPPED;
  3721. break;
  3722. default:
  3723. state = 0;
  3724. }
  3725. }
  3726. }
  3727. spin_lock_irq(mlx4_tlock(dev));
  3728. }
  3729. spin_unlock_irq(mlx4_tlock(dev));
  3730. }
  3731. static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
  3732. {
  3733. struct mlx4_priv *priv = mlx4_priv(dev);
  3734. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  3735. struct list_head *srq_list =
  3736. &tracker->slave_list[slave].res_list[RES_SRQ];
  3737. struct res_srq *srq;
  3738. struct res_srq *tmp;
  3739. int state;
  3740. u64 in_param;
  3741. LIST_HEAD(tlist);
  3742. int srqn;
  3743. int err;
  3744. err = move_all_busy(dev, slave, RES_SRQ);
  3745. if (err)
  3746. mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
  3747. slave);
  3748. spin_lock_irq(mlx4_tlock(dev));
  3749. list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
  3750. spin_unlock_irq(mlx4_tlock(dev));
  3751. if (srq->com.owner == slave) {
  3752. srqn = srq->com.res_id;
  3753. state = srq->com.from_state;
  3754. while (state != 0) {
  3755. switch (state) {
  3756. case RES_SRQ_ALLOCATED:
  3757. __mlx4_srq_free_icm(dev, srqn);
  3758. spin_lock_irq(mlx4_tlock(dev));
  3759. rb_erase(&srq->com.node,
  3760. &tracker->res_tree[RES_SRQ]);
  3761. list_del(&srq->com.list);
  3762. spin_unlock_irq(mlx4_tlock(dev));
  3763. mlx4_release_resource(dev, slave,
  3764. RES_SRQ, 1, 0);
  3765. kfree(srq);
  3766. state = 0;
  3767. break;
  3768. case RES_SRQ_HW:
  3769. in_param = slave;
  3770. err = mlx4_cmd(dev, in_param, srqn, 1,
  3771. MLX4_CMD_HW2SW_SRQ,
  3772. MLX4_CMD_TIME_CLASS_A,
  3773. MLX4_CMD_NATIVE);
  3774. if (err)
  3775. mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
  3776. slave, srqn);
  3777. atomic_dec(&srq->mtt->ref_count);
  3778. if (srq->cq)
  3779. atomic_dec(&srq->cq->ref_count);
  3780. state = RES_SRQ_ALLOCATED;
  3781. break;
  3782. default:
  3783. state = 0;
  3784. }
  3785. }
  3786. }
  3787. spin_lock_irq(mlx4_tlock(dev));
  3788. }
  3789. spin_unlock_irq(mlx4_tlock(dev));
  3790. }
  3791. static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
  3792. {
  3793. struct mlx4_priv *priv = mlx4_priv(dev);
  3794. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  3795. struct list_head *cq_list =
  3796. &tracker->slave_list[slave].res_list[RES_CQ];
  3797. struct res_cq *cq;
  3798. struct res_cq *tmp;
  3799. int state;
  3800. u64 in_param;
  3801. LIST_HEAD(tlist);
  3802. int cqn;
  3803. int err;
  3804. err = move_all_busy(dev, slave, RES_CQ);
  3805. if (err)
  3806. mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
  3807. slave);
  3808. spin_lock_irq(mlx4_tlock(dev));
  3809. list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
  3810. spin_unlock_irq(mlx4_tlock(dev));
  3811. if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
  3812. cqn = cq->com.res_id;
  3813. state = cq->com.from_state;
  3814. while (state != 0) {
  3815. switch (state) {
  3816. case RES_CQ_ALLOCATED:
  3817. __mlx4_cq_free_icm(dev, cqn);
  3818. spin_lock_irq(mlx4_tlock(dev));
  3819. rb_erase(&cq->com.node,
  3820. &tracker->res_tree[RES_CQ]);
  3821. list_del(&cq->com.list);
  3822. spin_unlock_irq(mlx4_tlock(dev));
  3823. mlx4_release_resource(dev, slave,
  3824. RES_CQ, 1, 0);
  3825. kfree(cq);
  3826. state = 0;
  3827. break;
  3828. case RES_CQ_HW:
  3829. in_param = slave;
  3830. err = mlx4_cmd(dev, in_param, cqn, 1,
  3831. MLX4_CMD_HW2SW_CQ,
  3832. MLX4_CMD_TIME_CLASS_A,
  3833. MLX4_CMD_NATIVE);
  3834. if (err)
  3835. mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
  3836. slave, cqn);
  3837. atomic_dec(&cq->mtt->ref_count);
  3838. state = RES_CQ_ALLOCATED;
  3839. break;
  3840. default:
  3841. state = 0;
  3842. }
  3843. }
  3844. }
  3845. spin_lock_irq(mlx4_tlock(dev));
  3846. }
  3847. spin_unlock_irq(mlx4_tlock(dev));
  3848. }
  3849. static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
  3850. {
  3851. struct mlx4_priv *priv = mlx4_priv(dev);
  3852. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  3853. struct list_head *mpt_list =
  3854. &tracker->slave_list[slave].res_list[RES_MPT];
  3855. struct res_mpt *mpt;
  3856. struct res_mpt *tmp;
  3857. int state;
  3858. u64 in_param;
  3859. LIST_HEAD(tlist);
  3860. int mptn;
  3861. int err;
  3862. err = move_all_busy(dev, slave, RES_MPT);
  3863. if (err)
  3864. mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
  3865. slave);
  3866. spin_lock_irq(mlx4_tlock(dev));
  3867. list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
  3868. spin_unlock_irq(mlx4_tlock(dev));
  3869. if (mpt->com.owner == slave) {
  3870. mptn = mpt->com.res_id;
  3871. state = mpt->com.from_state;
  3872. while (state != 0) {
  3873. switch (state) {
  3874. case RES_MPT_RESERVED:
  3875. __mlx4_mpt_release(dev, mpt->key);
  3876. spin_lock_irq(mlx4_tlock(dev));
  3877. rb_erase(&mpt->com.node,
  3878. &tracker->res_tree[RES_MPT]);
  3879. list_del(&mpt->com.list);
  3880. spin_unlock_irq(mlx4_tlock(dev));
  3881. mlx4_release_resource(dev, slave,
  3882. RES_MPT, 1, 0);
  3883. kfree(mpt);
  3884. state = 0;
  3885. break;
  3886. case RES_MPT_MAPPED:
  3887. __mlx4_mpt_free_icm(dev, mpt->key);
  3888. state = RES_MPT_RESERVED;
  3889. break;
  3890. case RES_MPT_HW:
  3891. in_param = slave;
  3892. err = mlx4_cmd(dev, in_param, mptn, 0,
  3893. MLX4_CMD_HW2SW_MPT,
  3894. MLX4_CMD_TIME_CLASS_A,
  3895. MLX4_CMD_NATIVE);
  3896. if (err)
  3897. mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
  3898. slave, mptn);
  3899. if (mpt->mtt)
  3900. atomic_dec(&mpt->mtt->ref_count);
  3901. state = RES_MPT_MAPPED;
  3902. break;
  3903. default:
  3904. state = 0;
  3905. }
  3906. }
  3907. }
  3908. spin_lock_irq(mlx4_tlock(dev));
  3909. }
  3910. spin_unlock_irq(mlx4_tlock(dev));
  3911. }
  3912. static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
  3913. {
  3914. struct mlx4_priv *priv = mlx4_priv(dev);
  3915. struct mlx4_resource_tracker *tracker =
  3916. &priv->mfunc.master.res_tracker;
  3917. struct list_head *mtt_list =
  3918. &tracker->slave_list[slave].res_list[RES_MTT];
  3919. struct res_mtt *mtt;
  3920. struct res_mtt *tmp;
  3921. int state;
  3922. LIST_HEAD(tlist);
  3923. int base;
  3924. int err;
  3925. err = move_all_busy(dev, slave, RES_MTT);
  3926. if (err)
  3927. mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
  3928. slave);
  3929. spin_lock_irq(mlx4_tlock(dev));
  3930. list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
  3931. spin_unlock_irq(mlx4_tlock(dev));
  3932. if (mtt->com.owner == slave) {
  3933. base = mtt->com.res_id;
  3934. state = mtt->com.from_state;
  3935. while (state != 0) {
  3936. switch (state) {
  3937. case RES_MTT_ALLOCATED:
  3938. __mlx4_free_mtt_range(dev, base,
  3939. mtt->order);
  3940. spin_lock_irq(mlx4_tlock(dev));
  3941. rb_erase(&mtt->com.node,
  3942. &tracker->res_tree[RES_MTT]);
  3943. list_del(&mtt->com.list);
  3944. spin_unlock_irq(mlx4_tlock(dev));
  3945. mlx4_release_resource(dev, slave, RES_MTT,
  3946. 1 << mtt->order, 0);
  3947. kfree(mtt);
  3948. state = 0;
  3949. break;
  3950. default:
  3951. state = 0;
  3952. }
  3953. }
  3954. }
  3955. spin_lock_irq(mlx4_tlock(dev));
  3956. }
  3957. spin_unlock_irq(mlx4_tlock(dev));
  3958. }
  3959. static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
  3960. {
  3961. struct mlx4_priv *priv = mlx4_priv(dev);
  3962. struct mlx4_resource_tracker *tracker =
  3963. &priv->mfunc.master.res_tracker;
  3964. struct list_head *fs_rule_list =
  3965. &tracker->slave_list[slave].res_list[RES_FS_RULE];
  3966. struct res_fs_rule *fs_rule;
  3967. struct res_fs_rule *tmp;
  3968. int state;
  3969. u64 base;
  3970. int err;
  3971. err = move_all_busy(dev, slave, RES_FS_RULE);
  3972. if (err)
  3973. mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
  3974. slave);
  3975. spin_lock_irq(mlx4_tlock(dev));
  3976. list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
  3977. spin_unlock_irq(mlx4_tlock(dev));
  3978. if (fs_rule->com.owner == slave) {
  3979. base = fs_rule->com.res_id;
  3980. state = fs_rule->com.from_state;
  3981. while (state != 0) {
  3982. switch (state) {
  3983. case RES_FS_RULE_ALLOCATED:
  3984. /* detach rule */
  3985. err = mlx4_cmd(dev, base, 0, 0,
  3986. MLX4_QP_FLOW_STEERING_DETACH,
  3987. MLX4_CMD_TIME_CLASS_A,
  3988. MLX4_CMD_NATIVE);
  3989. spin_lock_irq(mlx4_tlock(dev));
  3990. rb_erase(&fs_rule->com.node,
  3991. &tracker->res_tree[RES_FS_RULE]);
  3992. list_del(&fs_rule->com.list);
  3993. spin_unlock_irq(mlx4_tlock(dev));
  3994. kfree(fs_rule);
  3995. state = 0;
  3996. break;
  3997. default:
  3998. state = 0;
  3999. }
  4000. }
  4001. }
  4002. spin_lock_irq(mlx4_tlock(dev));
  4003. }
  4004. spin_unlock_irq(mlx4_tlock(dev));
  4005. }
  4006. static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
  4007. {
  4008. struct mlx4_priv *priv = mlx4_priv(dev);
  4009. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  4010. struct list_head *eq_list =
  4011. &tracker->slave_list[slave].res_list[RES_EQ];
  4012. struct res_eq *eq;
  4013. struct res_eq *tmp;
  4014. int err;
  4015. int state;
  4016. LIST_HEAD(tlist);
  4017. int eqn;
  4018. struct mlx4_cmd_mailbox *mailbox;
  4019. err = move_all_busy(dev, slave, RES_EQ);
  4020. if (err)
  4021. mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
  4022. slave);
  4023. spin_lock_irq(mlx4_tlock(dev));
  4024. list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
  4025. spin_unlock_irq(mlx4_tlock(dev));
  4026. if (eq->com.owner == slave) {
  4027. eqn = eq->com.res_id;
  4028. state = eq->com.from_state;
  4029. while (state != 0) {
  4030. switch (state) {
  4031. case RES_EQ_RESERVED:
  4032. spin_lock_irq(mlx4_tlock(dev));
  4033. rb_erase(&eq->com.node,
  4034. &tracker->res_tree[RES_EQ]);
  4035. list_del(&eq->com.list);
  4036. spin_unlock_irq(mlx4_tlock(dev));
  4037. kfree(eq);
  4038. state = 0;
  4039. break;
  4040. case RES_EQ_HW:
  4041. mailbox = mlx4_alloc_cmd_mailbox(dev);
  4042. if (IS_ERR(mailbox)) {
  4043. cond_resched();
  4044. continue;
  4045. }
  4046. err = mlx4_cmd_box(dev, slave, 0,
  4047. eqn & 0xff, 0,
  4048. MLX4_CMD_HW2SW_EQ,
  4049. MLX4_CMD_TIME_CLASS_A,
  4050. MLX4_CMD_NATIVE);
  4051. if (err)
  4052. mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
  4053. slave, eqn);
  4054. mlx4_free_cmd_mailbox(dev, mailbox);
  4055. atomic_dec(&eq->mtt->ref_count);
  4056. state = RES_EQ_RESERVED;
  4057. break;
  4058. default:
  4059. state = 0;
  4060. }
  4061. }
  4062. }
  4063. spin_lock_irq(mlx4_tlock(dev));
  4064. }
  4065. spin_unlock_irq(mlx4_tlock(dev));
  4066. }
  4067. static void rem_slave_counters(struct mlx4_dev *dev, int slave)
  4068. {
  4069. struct mlx4_priv *priv = mlx4_priv(dev);
  4070. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  4071. struct list_head *counter_list =
  4072. &tracker->slave_list[slave].res_list[RES_COUNTER];
  4073. struct res_counter *counter;
  4074. struct res_counter *tmp;
  4075. int err;
  4076. int index;
  4077. err = move_all_busy(dev, slave, RES_COUNTER);
  4078. if (err)
  4079. mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
  4080. slave);
  4081. spin_lock_irq(mlx4_tlock(dev));
  4082. list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
  4083. if (counter->com.owner == slave) {
  4084. index = counter->com.res_id;
  4085. rb_erase(&counter->com.node,
  4086. &tracker->res_tree[RES_COUNTER]);
  4087. list_del(&counter->com.list);
  4088. kfree(counter);
  4089. __mlx4_counter_free(dev, index);
  4090. mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
  4091. }
  4092. }
  4093. spin_unlock_irq(mlx4_tlock(dev));
  4094. }
  4095. static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
  4096. {
  4097. struct mlx4_priv *priv = mlx4_priv(dev);
  4098. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  4099. struct list_head *xrcdn_list =
  4100. &tracker->slave_list[slave].res_list[RES_XRCD];
  4101. struct res_xrcdn *xrcd;
  4102. struct res_xrcdn *tmp;
  4103. int err;
  4104. int xrcdn;
  4105. err = move_all_busy(dev, slave, RES_XRCD);
  4106. if (err)
  4107. mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
  4108. slave);
  4109. spin_lock_irq(mlx4_tlock(dev));
  4110. list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
  4111. if (xrcd->com.owner == slave) {
  4112. xrcdn = xrcd->com.res_id;
  4113. rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
  4114. list_del(&xrcd->com.list);
  4115. kfree(xrcd);
  4116. __mlx4_xrcd_free(dev, xrcdn);
  4117. }
  4118. }
  4119. spin_unlock_irq(mlx4_tlock(dev));
  4120. }
  4121. void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
  4122. {
  4123. struct mlx4_priv *priv = mlx4_priv(dev);
  4124. mlx4_reset_roce_gids(dev, slave);
  4125. mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
  4126. rem_slave_vlans(dev, slave);
  4127. rem_slave_macs(dev, slave);
  4128. rem_slave_fs_rule(dev, slave);
  4129. rem_slave_qps(dev, slave);
  4130. rem_slave_srqs(dev, slave);
  4131. rem_slave_cqs(dev, slave);
  4132. rem_slave_mrs(dev, slave);
  4133. rem_slave_eqs(dev, slave);
  4134. rem_slave_mtts(dev, slave);
  4135. rem_slave_counters(dev, slave);
  4136. rem_slave_xrcdns(dev, slave);
  4137. mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
  4138. }
  4139. void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
  4140. {
  4141. struct mlx4_vf_immed_vlan_work *work =
  4142. container_of(_work, struct mlx4_vf_immed_vlan_work, work);
  4143. struct mlx4_cmd_mailbox *mailbox;
  4144. struct mlx4_update_qp_context *upd_context;
  4145. struct mlx4_dev *dev = &work->priv->dev;
  4146. struct mlx4_resource_tracker *tracker =
  4147. &work->priv->mfunc.master.res_tracker;
  4148. struct list_head *qp_list =
  4149. &tracker->slave_list[work->slave].res_list[RES_QP];
  4150. struct res_qp *qp;
  4151. struct res_qp *tmp;
  4152. u64 qp_path_mask_vlan_ctrl =
  4153. ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
  4154. (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
  4155. (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
  4156. (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
  4157. (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
  4158. (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
  4159. u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
  4160. (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
  4161. (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
  4162. (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
  4163. (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
  4164. (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
  4165. (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
  4166. int err;
  4167. int port, errors = 0;
  4168. u8 vlan_control;
  4169. if (mlx4_is_slave(dev)) {
  4170. mlx4_warn(dev, "Trying to update-qp in slave %d\n",
  4171. work->slave);
  4172. goto out;
  4173. }
  4174. mailbox = mlx4_alloc_cmd_mailbox(dev);
  4175. if (IS_ERR(mailbox))
  4176. goto out;
  4177. if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
  4178. vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
  4179. MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
  4180. MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
  4181. MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
  4182. MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
  4183. MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
  4184. else if (!work->vlan_id)
  4185. vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
  4186. MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
  4187. else
  4188. vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
  4189. MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
  4190. MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
  4191. upd_context = mailbox->buf;
  4192. upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
  4193. spin_lock_irq(mlx4_tlock(dev));
  4194. list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
  4195. spin_unlock_irq(mlx4_tlock(dev));
  4196. if (qp->com.owner == work->slave) {
  4197. if (qp->com.from_state != RES_QP_HW ||
  4198. !qp->sched_queue || /* no INIT2RTR trans yet */
  4199. mlx4_is_qp_reserved(dev, qp->local_qpn) ||
  4200. qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
  4201. spin_lock_irq(mlx4_tlock(dev));
  4202. continue;
  4203. }
  4204. port = (qp->sched_queue >> 6 & 1) + 1;
  4205. if (port != work->port) {
  4206. spin_lock_irq(mlx4_tlock(dev));
  4207. continue;
  4208. }
  4209. if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
  4210. upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
  4211. else
  4212. upd_context->primary_addr_path_mask =
  4213. cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
  4214. if (work->vlan_id == MLX4_VGT) {
  4215. upd_context->qp_context.param3 = qp->param3;
  4216. upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
  4217. upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
  4218. upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
  4219. upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
  4220. upd_context->qp_context.pri_path.feup = qp->feup;
  4221. upd_context->qp_context.pri_path.sched_queue =
  4222. qp->sched_queue;
  4223. } else {
  4224. upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
  4225. upd_context->qp_context.pri_path.vlan_control = vlan_control;
  4226. upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
  4227. upd_context->qp_context.pri_path.fvl_rx =
  4228. qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
  4229. upd_context->qp_context.pri_path.fl =
  4230. qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
  4231. upd_context->qp_context.pri_path.feup =
  4232. qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
  4233. upd_context->qp_context.pri_path.sched_queue =
  4234. qp->sched_queue & 0xC7;
  4235. upd_context->qp_context.pri_path.sched_queue |=
  4236. ((work->qos & 0x7) << 3);
  4237. }
  4238. err = mlx4_cmd(dev, mailbox->dma,
  4239. qp->local_qpn & 0xffffff,
  4240. 0, MLX4_CMD_UPDATE_QP,
  4241. MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
  4242. if (err) {
  4243. mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
  4244. work->slave, port, qp->local_qpn, err);
  4245. errors++;
  4246. }
  4247. }
  4248. spin_lock_irq(mlx4_tlock(dev));
  4249. }
  4250. spin_unlock_irq(mlx4_tlock(dev));
  4251. mlx4_free_cmd_mailbox(dev, mailbox);
  4252. if (errors)
  4253. mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
  4254. errors, work->slave, work->port);
  4255. /* unregister previous vlan_id if needed and we had no errors
  4256. * while updating the QPs
  4257. */
  4258. if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
  4259. NO_INDX != work->orig_vlan_ix)
  4260. __mlx4_unregister_vlan(&work->priv->dev, work->port,
  4261. work->orig_vlan_id);
  4262. out:
  4263. kfree(work);
  4264. return;
  4265. }